From 57df571e60b8f1a7cb0a0141a2129bedcbf3fae8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 1 Jun 2020 01:40:41 +0300 Subject: [PATCH 01/52] Remove trailing whitespaces from formatted queries in some cases --- src/Parsers/ASTExpressionList.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp index 4f0b2d4cd6b..1395d8b15fe 100644 --- a/src/Parsers/ASTExpressionList.cpp +++ b/src/Parsers/ASTExpressionList.cpp @@ -37,10 +37,8 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For { if (separator) settings.ostr << separator; - settings.ostr << ' '; } - if (children.size() > 1) settings.ostr << indent_str; From 6eb6d8f3fd8beacbcb8ea536366e6c53ea833ff5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 2 Jun 2020 00:11:08 +0300 Subject: [PATCH 02/52] Get rid of annoying trailing whitespaces in CREATE query --- src/DataTypes/DataTypeTuple.cpp | 1 + src/Parsers/ASTColumnDeclaration.cpp | 3 -- src/Parsers/ASTConstraintDeclaration.cpp | 4 --- src/Parsers/ASTCreateQuery.cpp | 26 ++++++++--------- src/Parsers/ASTIndexDeclaration.cpp | 35 ++++++++++++++++++++++ src/Parsers/ASTIndexDeclaration.h | 37 ++---------------------- src/Parsers/ASTNameTypePair.cpp | 35 ++++++++++++++++++++++ src/Parsers/ASTNameTypePair.h | 24 ++------------- src/Parsers/ya.make | 2 ++ 9 files changed, 89 insertions(+), 78 deletions(-) create mode 100644 src/Parsers/ASTIndexDeclaration.cpp create mode 100644 src/Parsers/ASTNameTypePair.cpp diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index 29db2a49b99..b69c4c31ca4 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index b281315f555..15bf1d59574 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -47,9 +47,6 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & settings, FormatSta { frame.need_parens = false; - if (!settings.one_line) - settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' '); - /// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query. settings.ostr << backQuote(name); diff --git a/src/Parsers/ASTConstraintDeclaration.cpp b/src/Parsers/ASTConstraintDeclaration.cpp index f268141f619..371bfa40f54 100644 --- a/src/Parsers/ASTConstraintDeclaration.cpp +++ b/src/Parsers/ASTConstraintDeclaration.cpp @@ -19,10 +19,6 @@ ASTPtr ASTConstraintDeclaration::clone() const void ASTConstraintDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const { - frame.need_parens = false; - std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); - - s.ostr << s.nl_or_ws << indent_str; s.ostr << backQuoteIfNeed(name); s.ostr << (s.hilite ? hilite_keyword : "") << " CHECK " << (s.hilite ? hilite_none : ""); expr->formatImpl(s, state, frame); diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index d5942753f78..f7481ac3c09 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -108,17 +108,9 @@ void ASTColumnsElement::formatImpl(const FormatSettings & s, FormatState & state return; } - frame.need_parens = false; - std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); - - s.ostr << s.nl_or_ws << indent_str; s.ostr << (s.hilite ? hilite_keyword : "") << prefix << (s.hilite ? hilite_none : ""); - - FormatSettings nested_settings = s; - nested_settings.one_line = true; - nested_settings.nl_or_ws = ' '; - - elem->formatImpl(nested_settings, state, frame); + s.ostr << ' '; + elem->formatImpl(s, state, frame); } @@ -172,7 +164,12 @@ void ASTColumns::formatImpl(const FormatSettings & s, FormatState & state, Forma } if (!list.children.empty()) - list.formatImpl(s, state, frame); + { + if (s.one_line) + list.formatImpl(s, state, frame); + else + list.formatImplMultiline(s, state, frame); + } } @@ -277,7 +274,6 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat { settings.ostr << (settings.one_line ? " (" : "\n("); FormatStateStacked frame_nested = frame; - ++frame_nested.indent; columns_list->formatImpl(settings, state, frame_nested); settings.ostr << (settings.one_line ? ")" : "\n)"); } @@ -286,8 +282,10 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat { settings.ostr << (settings.one_line ? " (" : "\n("); FormatStateStacked frame_nested = frame; - ++frame_nested.indent; - dictionary_attributes_list->formatImpl(settings, state, frame_nested); + if (settings.one_line) + dictionary_attributes_list->formatImpl(settings, state, frame_nested); + else + dictionary_attributes_list->formatImplMultiline(settings, state, frame_nested); settings.ostr << (settings.one_line ? ")" : "\n)"); } diff --git a/src/Parsers/ASTIndexDeclaration.cpp b/src/Parsers/ASTIndexDeclaration.cpp new file mode 100644 index 00000000000..e89f9bf26ed --- /dev/null +++ b/src/Parsers/ASTIndexDeclaration.cpp @@ -0,0 +1,35 @@ +#include +#include + + +namespace DB +{ + +ASTPtr ASTIndexDeclaration::clone() const +{ + auto res = std::make_shared(); + + res->name = name; + res->granularity = granularity; + + if (expr) + res->set(res->expr, expr->clone()); + if (type) + res->set(res->type, type->clone()); + return res; +} + + +void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const +{ + s.ostr << backQuoteIfNeed(name); + s.ostr << " "; + expr->formatImpl(s, state, frame); + s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : ""); + type->formatImpl(s, state, frame); + s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : ""); + s.ostr << granularity; +} + +} + diff --git a/src/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h index c71ab21cf57..64ef6eb2db1 100644 --- a/src/Parsers/ASTIndexDeclaration.h +++ b/src/Parsers/ASTIndexDeclaration.h @@ -1,15 +1,8 @@ #pragma once -#include -#include -#include -#include -#include #include #include -#include - namespace DB { @@ -27,34 +20,8 @@ public: /** Get the text that identifies this element. */ String getID(char) const override { return "Index"; } - ASTPtr clone() const override - { - auto res = std::make_shared(); - - res->name = name; - res->granularity = granularity; - - if (expr) - res->set(res->expr, expr->clone()); - if (type) - res->set(res->type, type->clone()); - return res; - } - - void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override - { - frame.need_parens = false; - std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); - - s.ostr << s.nl_or_ws << indent_str; - s.ostr << backQuoteIfNeed(name); - s.ostr << " "; - expr->formatImpl(s, state, frame); - s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : ""); - type->formatImpl(s, state, frame); - s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : ""); - s.ostr << granularity; - } + ASTPtr clone() const override; + void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override; }; } diff --git a/src/Parsers/ASTNameTypePair.cpp b/src/Parsers/ASTNameTypePair.cpp new file mode 100644 index 00000000000..6c41d35315c --- /dev/null +++ b/src/Parsers/ASTNameTypePair.cpp @@ -0,0 +1,35 @@ +#include +#include + + +namespace DB +{ + +ASTPtr ASTNameTypePair::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (type) + { + res->type = type; + res->children.push_back(res->type); + } + + return res; +} + + +void ASTNameTypePair::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); + + settings.ostr << '#'; + settings.ostr << indent_str << backQuoteIfNeed(name) << ' '; + type->formatImpl(settings, state, frame); + settings.ostr << '#'; +} + +} + + diff --git a/src/Parsers/ASTNameTypePair.h b/src/Parsers/ASTNameTypePair.h index 48dd7ae1ac9..638e980cbdc 100644 --- a/src/Parsers/ASTNameTypePair.h +++ b/src/Parsers/ASTNameTypePair.h @@ -1,7 +1,6 @@ #pragma once #include -#include namespace DB @@ -19,29 +18,10 @@ public: /** Get the text that identifies this element. */ String getID(char delim) const override { return "NameTypePair" + (delim + name); } - - ASTPtr clone() const override - { - auto res = std::make_shared(*this); - res->children.clear(); - - if (type) - { - res->type = type; - res->children.push_back(res->type); - } - - return res; - } + ASTPtr clone() const override; protected: - void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override - { - std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); - - settings.ostr << settings.nl_or_ws << indent_str << backQuoteIfNeed(name) << " "; - type->formatImpl(settings, state, frame); - } + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; }; diff --git a/src/Parsers/ya.make b/src/Parsers/ya.make index 8c7e4ff68af..c1cca094518 100644 --- a/src/Parsers/ya.make +++ b/src/Parsers/ya.make @@ -26,9 +26,11 @@ SRCS( ASTFunctionWithKeyValueArguments.cpp ASTGrantQuery.cpp ASTIdentifier.cpp + ASTIndexDeclaration.cpp ASTInsertQuery.cpp ASTKillQueryQuery.cpp ASTLiteral.cpp + ASTNameTypePair.cpp ASTOptimizeQuery.cpp ASTOrderByElement.cpp ASTPartition.cpp From 2a0da608fd34ad5d35e4ed14a6797451e548c718 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 2 Jun 2020 02:31:50 +0300 Subject: [PATCH 03/52] Update tests --- .../00597_push_down_predicate.reference | 50 +++++++++---------- ...51_default_databasename_for_view.reference | 8 +-- .../00826_cross_to_inner_join.reference | 16 +++--- .../00849_multiple_comma_join.reference | 24 ++++----- .../00849_multiple_comma_join_2.reference | 18 +++---- .../0_stateless/00908_analyze_query.reference | 2 +- ...0957_format_with_clashed_aliases.reference | 2 +- ...58_format_of_tuple_array_element.reference | 14 +++--- .../01056_predicate_optimizer_bugs.reference | 10 ++-- ...76_predicate_optimizer_with_view.reference | 8 +-- .../01083_cross_to_inner_with_like.reference | 6 +-- .../01278_format_multiple_queries.reference | 6 +-- 12 files changed, 82 insertions(+), 82 deletions(-) diff --git a/tests/queries/0_stateless/00597_push_down_predicate.reference b/tests/queries/0_stateless/00597_push_down_predicate.reference index 480b1c4525c..829c5a1577e 100644 --- a/tests/queries/0_stateless/00597_push_down_predicate.reference +++ b/tests/queries/0_stateless/00597_push_down_predicate.reference @@ -4,12 +4,12 @@ 1 2000-01-01 1 test string 1 1 -------Forbid push down------- -SELECT count()\nFROM \n(\n SELECT \n [number] AS a, \n [number * 2] AS b\n FROM system.numbers\n LIMIT 1\n) AS t\nARRAY JOIN \n a, \n b\nWHERE NOT ignore(a + b) +SELECT count()\nFROM \n(\n SELECT \n [number] AS a,\n [number * 2] AS b\n FROM system.numbers\n LIMIT 1\n) AS t\nARRAY JOIN \n a,\n b\nWHERE NOT ignore(a + b) 1 -SELECT \n a, \n b\nFROM \n(\n SELECT 1 AS a\n)\nANY LEFT JOIN \n(\n SELECT \n 1 AS a, \n 1 AS b\n) USING (a)\nWHERE b = 0 -SELECT \n a, \n b\nFROM \n(\n SELECT \n 1 AS a, \n 1 AS b\n)\nANY RIGHT JOIN \n(\n SELECT 1 AS a\n) USING (a)\nWHERE b = 0 -SELECT \n a, \n b\nFROM \n(\n SELECT 1 AS a\n)\nANY FULL OUTER JOIN \n(\n SELECT \n 1 AS a, \n 1 AS b\n) USING (a)\nWHERE b = 0 -SELECT \n a, \n b\nFROM \n(\n SELECT \n 1 AS a, \n 1 AS b\n)\nANY FULL OUTER JOIN \n(\n SELECT 1 AS a\n) USING (a)\nWHERE b = 0 +SELECT \n a,\n b\nFROM \n(\n SELECT 1 AS a\n)\nANY LEFT JOIN \n(\n SELECT \n 1 AS a,\n 1 AS b\n) USING (a)\nWHERE b = 0 +SELECT \n a,\n b\nFROM \n(\n SELECT \n 1 AS a,\n 1 AS b\n)\nANY RIGHT JOIN \n(\n SELECT 1 AS a\n) USING (a)\nWHERE b = 0 +SELECT \n a,\n b\nFROM \n(\n SELECT 1 AS a\n)\nANY FULL OUTER JOIN \n(\n SELECT \n 1 AS a,\n 1 AS b\n) USING (a)\nWHERE b = 0 +SELECT \n a,\n b\nFROM \n(\n SELECT \n 1 AS a,\n 1 AS b\n)\nANY FULL OUTER JOIN \n(\n SELECT 1 AS a\n) USING (a)\nWHERE b = 0 -------Need push down------- SELECT toString(value) AS value\nFROM \n(\n SELECT 1 AS value\n) 1 @@ -19,46 +19,46 @@ SELECT id\nFROM \n(\n SELECT arrayJoin([1, 2, 3]) AS id\n WHERE id = 1\n)\ 1 SELECT id\nFROM \n(\n SELECT arrayJoin([1, 2, 3]) AS id\n WHERE id = 1\n)\nWHERE id = 1 1 -SELECT \n id, \n subquery\nFROM \n(\n SELECT \n 1 AS id, \n CAST(1, \'UInt8\') AS subquery\n) +SELECT \n id,\n subquery\nFROM \n(\n SELECT \n 1 AS id,\n CAST(1, \'UInt8\') AS subquery\n) 1 1 -SELECT \n a, \n b\nFROM \n(\n SELECT \n toUInt64(sum(id) AS b) AS a, \n b\n FROM test_00597\n HAVING a = 3\n)\nWHERE a = 3 +SELECT \n a,\n b\nFROM \n(\n SELECT \n toUInt64(sum(id) AS b) AS a,\n b\n FROM test_00597\n HAVING a = 3\n)\nWHERE a = 3 3 3 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n name, \n value, \n min(id) AS id\n FROM test_00597\n GROUP BY \n date, \n name, \n value\n HAVING id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n name,\n value,\n min(id) AS id\n FROM test_00597\n GROUP BY \n date,\n name,\n value\n HAVING id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n a, \n b\nFROM \n(\n SELECT \n toUInt64(sum(id) AS b) AS a, \n b\n FROM test_00597 AS table_alias\n HAVING b = 3\n) AS outer_table_alias\nWHERE b = 3 +SELECT \n a,\n b\nFROM \n(\n SELECT \n toUInt64(sum(id) AS b) AS a,\n b\n FROM test_00597 AS table_alias\n HAVING b = 3\n) AS outer_table_alias\nWHERE b = 3 3 3 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n ) AS b\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n ) AS b\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n) AS b\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n) AS b\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n ) AS a\n WHERE id = 1\n) AS b\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n ) AS a\n WHERE id = 1\n) AS b\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n id, \n date, \n value\nFROM \n(\n SELECT \n id, \n date, \n min(value) AS value\n FROM test_00597\n WHERE id = 1\n GROUP BY \n id, \n date\n)\nWHERE id = 1 +SELECT \n id,\n date,\n value\nFROM \n(\n SELECT \n id,\n date,\n min(value) AS value\n FROM test_00597\n WHERE id = 1\n GROUP BY \n id,\n date\n)\nWHERE id = 1 1 2000-01-01 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n UNION ALL\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n UNION ALL\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value, \n date, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n)\nANY LEFT JOIN \n(\n SELECT id\n FROM test_00597\n) USING (id)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value,\n date,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n)\nANY LEFT JOIN \n(\n SELECT id\n FROM test_00597\n) USING (id)\nWHERE id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 -SELECT \n id, \n date, \n name, \n value\nFROM \n(\n SELECT toInt8(1) AS id\n)\nANY LEFT JOIN \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n) AS test_00597 USING (id)\nWHERE value = 1 +SELECT \n id,\n date,\n name,\n value\nFROM \n(\n SELECT toInt8(1) AS id\n)\nANY LEFT JOIN \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n) AS test_00597 USING (id)\nWHERE value = 1 1 2000-01-01 test string 1 1 SELECT value\nFROM \n(\n SELECT toInt8(1) AS id\n)\nANY LEFT JOIN test_00597 AS b USING (id)\nWHERE value = 1 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value, \n date, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n )\n ANY LEFT JOIN \n (\n SELECT id\n FROM test_00597\n ) USING (id)\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value,\n date,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n )\n ANY LEFT JOIN \n (\n SELECT id\n FROM test_00597\n ) USING (id)\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value, \n b.date, \n b.name, \n b.value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n)\nANY LEFT JOIN \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n) AS b USING (id)\nWHERE b.id = 1 +SELECT \n date,\n id,\n name,\n value,\n b.date,\n b.name,\n b.value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n)\nANY LEFT JOIN \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n) AS b USING (id)\nWHERE b.id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 -SELECT \n id, \n date, \n name, \n value\nFROM \n(\n SELECT \n toInt8(1) AS id, \n toDate(\'2000-01-01\') AS date\n FROM system.numbers\n LIMIT 1\n)\nANY LEFT JOIN \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n) AS b USING (date, id)\nWHERE b.date = toDate(\'2000-01-01\') +SELECT \n id,\n date,\n name,\n value\nFROM \n(\n SELECT \n toInt8(1) AS id,\n toDate(\'2000-01-01\') AS date\n FROM system.numbers\n LIMIT 1\n)\nANY LEFT JOIN \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n) AS b USING (date, id)\nWHERE b.date = toDate(\'2000-01-01\') 1 2000-01-01 test string 1 1 -SELECT \n date, \n id, \n name, \n value, \n `b.date`, \n `b.id`, \n `b.name`, \n `b.value`\nFROM \n(\n SELECT \n date, \n id, \n name, \n value, \n b.date, \n b.id, \n b.name, \n b.value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n ) AS a\n ANY LEFT JOIN \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n ) AS b ON id = b.id\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value,\n `b.date`,\n `b.id`,\n `b.name`,\n `b.value`\nFROM \n(\n SELECT \n date,\n id,\n name,\n value,\n b.date,\n b.id,\n b.name,\n b.value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n ) AS a\n ANY LEFT JOIN \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n ) AS b ON id = b.id\n WHERE id = 1\n)\nWHERE id = 1 2000-01-01 1 test string 1 1 2000-01-01 1 test string 1 1 -SELECT \n date, \n id, \n name, \n value, \n r.date, \n r.name, \n r.value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n)\nSEMI LEFT JOIN \n(\n SELECT \n date, \n id, \n name, \n value\n FROM \n (\n SELECT \n date, \n id, \n name, \n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n) AS r USING (id)\nWHERE r.id = 1 +SELECT \n date,\n id,\n name,\n value,\n r.date,\n r.name,\n r.value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n)\nSEMI LEFT JOIN \n(\n SELECT \n date,\n id,\n name,\n value\n FROM \n (\n SELECT \n date,\n id,\n name,\n value\n FROM test_00597\n WHERE id = 1\n )\n WHERE id = 1\n) AS r USING (id)\nWHERE r.id = 1 2000-01-01 1 test string 1 1 2000-01-01 test string 1 1 diff --git a/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/tests/queries/0_stateless/00751_default_databasename_for_view.reference index 5ba1861e3ef..4814cc77b37 100644 --- a/tests/queries/0_stateless/00751_default_databasename_for_view.reference +++ b/tests/queries/0_stateless/00751_default_databasename_for_view.reference @@ -1,15 +1,15 @@ CREATE MATERIALIZED VIEW test_00751.t_mv_00751 ( - `date` Date, - `platform` Enum8('a' = 0, 'b' = 1), + `date` Date, + `platform` Enum8('a' = 0, 'b' = 1), `app` Enum8('a' = 0, 'b' = 1) ) ENGINE = MergeTree ORDER BY date SETTINGS index_granularity = 8192 AS SELECT - date, - platform, + date, + platform, app FROM test_00751.t_00751 WHERE (app = diff --git a/tests/queries/0_stateless/00826_cross_to_inner_join.reference b/tests/queries/0_stateless/00826_cross_to_inner_join.reference index 32b1c42ca2c..2a4b1487f20 100644 --- a/tests/queries/0_stateless/00826_cross_to_inner_join.reference +++ b/tests/queries/0_stateless/00826_cross_to_inner_join.reference @@ -35,18 +35,18 @@ comma nullable 1 1 1 1 2 2 1 2 cross -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.a\nWHERE a = t2_00826.a +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.a\nWHERE a = t2_00826.a cross nullable -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.a\nWHERE a = t2_00826.a +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.a\nWHERE a = t2_00826.a cross nullable vs not nullable -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.b\nWHERE a = t2_00826.b +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON a = t2_00826.b\nWHERE a = t2_00826.b cross self -SELECT \n a, \n b, \n y.a, \n y.b\nFROM t1_00826 AS x\nALL INNER JOIN t1_00826 AS y ON (a = y.a) AND (b = y.b)\nWHERE (a = y.a) AND (b = y.b) +SELECT \n a,\n b,\n y.a,\n y.b\nFROM t1_00826 AS x\nALL INNER JOIN t1_00826 AS y ON (a = y.a) AND (b = y.b)\nWHERE (a = y.a) AND (b = y.b) cross one table expr -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nCROSS JOIN t2_00826\nWHERE a = b +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nCROSS JOIN t2_00826\nWHERE a = b cross multiple ands -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND (b = t2_00826.b) +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND (b = t2_00826.b) cross and inside and -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (a = t2_00826.a) AND (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND ((a = t2_00826.a) AND ((a = t2_00826.a) AND (b = t2_00826.b))) +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (a = t2_00826.a) AND (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND ((a = t2_00826.a) AND ((a = t2_00826.a) AND (b = t2_00826.b))) cross split conjunction -SELECT \n a, \n b, \n t2_00826.a, \n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND (b = t2_00826.b) AND (a >= 1) AND (t2_00826.b > 0) +SELECT \n a,\n b,\n t2_00826.a,\n t2_00826.b\nFROM t1_00826\nALL INNER JOIN t2_00826 ON (a = t2_00826.a) AND (b = t2_00826.b)\nWHERE (a = t2_00826.a) AND (b = t2_00826.b) AND (a >= 1) AND (t2_00826.b > 0) diff --git a/tests/queries/0_stateless/00849_multiple_comma_join.reference b/tests/queries/0_stateless/00849_multiple_comma_join.reference index 829a5d25e54..0f7d28b65a0 100644 --- a/tests/queries/0_stateless/00849_multiple_comma_join.reference +++ b/tests/queries/0_stateless/00849_multiple_comma_join.reference @@ -1,18 +1,18 @@ SELECT a\nFROM t1_00849\nCROSS JOIN t2_00849 SELECT a\nFROM t1_00849\nALL INNER JOIN t2_00849 ON a = t2_00849.a\nWHERE a = t2_00849.a SELECT a\nFROM t1_00849\nALL INNER JOIN t2_00849 ON b = t2_00849.b\nWHERE b = t2_00849.b -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n) AS `--.s`\nALL INNER JOIN t3_00849 ON `--t1_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t1_00849.a` = a) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`, \n b AS `--t1_00849.b`, \n t2_00849.a, \n t2_00849.b AS `--t2_00849.b`\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.b` = `--t2_00849.b`\n) AS `--.s`\nALL INNER JOIN t3_00849 ON `--t1_00849.b` = b\nWHERE (`--t1_00849.b` = `--t2_00849.b`) AND (`--t1_00849.b` = b) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `--t2_00849.a`, \n `t2_00849.b`, \n a AS `--t3_00849.a`, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t1_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t1_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t1_00849.a` = `--t3_00849.a`) AND (`--t1_00849.a` = a) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n `--t1_00849.b`, \n `t2_00849.a`, \n `--t2_00849.b`, \n a, \n b AS `--t3_00849.b`\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b AS `--t1_00849.b`, \n t2_00849.a, \n t2_00849.b AS `--t2_00849.b`\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.b` = `--t2_00849.b`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t1_00849.b` = `--t3_00849.b`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t1_00849.b` = b\nWHERE (`--t1_00849.b` = `--t2_00849.b`) AND (`--t1_00849.b` = `--t3_00849.b`) AND (`--t1_00849.b` = b) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `--t2_00849.a`, \n `t2_00849.b`, \n a AS `--t3_00849.a`, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t2_00849.a` = `--t1_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t2_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t2_00849.a` = a\nWHERE (`--t2_00849.a` = `--t1_00849.a`) AND (`--t2_00849.a` = `--t3_00849.a`) AND (`--t2_00849.a` = a) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `--t2_00849.a`, \n `t2_00849.b`, \n a AS `--t3_00849.a`, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON (`--t3_00849.a` = `--t1_00849.a`) AND (`--t3_00849.a` = `--t2_00849.a`)\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t3_00849.a` = a\nWHERE (`--t3_00849.a` = `--t1_00849.a`) AND (`--t3_00849.a` = `--t2_00849.a`) AND (`--t3_00849.a` = a) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `--t2_00849.a`, \n `t2_00849.b`, \n a AS `--t3_00849.a`, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nALL INNER JOIN t4_00849 ON (a = `--t1_00849.a`) AND (a = `--t2_00849.a`) AND (a = `--t3_00849.a`)\nWHERE (a = `--t1_00849.a`) AND (a = `--t2_00849.a`) AND (a = `--t3_00849.a`) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `--t2_00849.a`, \n `t2_00849.b`, \n a AS `--t3_00849.a`, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t2_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t3_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t2_00849.a` = `--t3_00849.a`) AND (`--t3_00849.a` = a) -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `t2_00849.a`, \n `t2_00849.b`, \n a, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a, \n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nCROSS JOIN t4_00849 -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`, \n b, \n `t2_00849.a`, \n `t2_00849.b`, \n a, \n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a, \n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nCROSS JOIN t4_00849 -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a, \n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n) AS `--.s`\nCROSS JOIN t3_00849 -SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`, \n b, \n t2_00849.a AS `--t2_00849.a`, \n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n) AS `--.s`\nCROSS JOIN t3_00849 +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n) AS `--.s`\nALL INNER JOIN t3_00849 ON `--t1_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t1_00849.a` = a) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`,\n b AS `--t1_00849.b`,\n t2_00849.a,\n t2_00849.b AS `--t2_00849.b`\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.b` = `--t2_00849.b`\n) AS `--.s`\nALL INNER JOIN t3_00849 ON `--t1_00849.b` = b\nWHERE (`--t1_00849.b` = `--t2_00849.b`) AND (`--t1_00849.b` = b) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `--t2_00849.a`,\n `t2_00849.b`,\n a AS `--t3_00849.a`,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t1_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t1_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t1_00849.a` = `--t3_00849.a`) AND (`--t1_00849.a` = a) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n `--t1_00849.b`,\n `t2_00849.a`,\n `--t2_00849.b`,\n a,\n b AS `--t3_00849.b`\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b AS `--t1_00849.b`,\n t2_00849.a,\n t2_00849.b AS `--t2_00849.b`\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.b` = `--t2_00849.b`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t1_00849.b` = `--t3_00849.b`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t1_00849.b` = b\nWHERE (`--t1_00849.b` = `--t2_00849.b`) AND (`--t1_00849.b` = `--t3_00849.b`) AND (`--t1_00849.b` = b) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `--t2_00849.a`,\n `t2_00849.b`,\n a AS `--t3_00849.a`,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t2_00849.a` = `--t1_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t2_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t2_00849.a` = a\nWHERE (`--t2_00849.a` = `--t1_00849.a`) AND (`--t2_00849.a` = `--t3_00849.a`) AND (`--t2_00849.a` = a) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `--t2_00849.a`,\n `t2_00849.b`,\n a AS `--t3_00849.a`,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON (`--t3_00849.a` = `--t1_00849.a`) AND (`--t3_00849.a` = `--t2_00849.a`)\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t3_00849.a` = a\nWHERE (`--t3_00849.a` = `--t1_00849.a`) AND (`--t3_00849.a` = `--t2_00849.a`) AND (`--t3_00849.a` = a) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `--t2_00849.a`,\n `t2_00849.b`,\n a AS `--t3_00849.a`,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nALL INNER JOIN t4_00849 ON (a = `--t1_00849.a`) AND (a = `--t2_00849.a`) AND (a = `--t3_00849.a`)\nWHERE (a = `--t1_00849.a`) AND (a = `--t2_00849.a`) AND (a = `--t3_00849.a`) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `--t2_00849.a`,\n `t2_00849.b`,\n a AS `--t3_00849.a`,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n ) AS `--.s`\n ALL INNER JOIN t3_00849 ON `--t2_00849.a` = `--t3_00849.a`\n) AS `--.s`\nALL INNER JOIN t4_00849 ON `--t3_00849.a` = a\nWHERE (`--t1_00849.a` = `--t2_00849.a`) AND (`--t2_00849.a` = `--t3_00849.a`) AND (`--t3_00849.a` = a) +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `t2_00849.a`,\n `t2_00849.b`,\n a,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a,\n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nCROSS JOIN t4_00849 +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n `--t1_00849.a`,\n b,\n `t2_00849.a`,\n `t2_00849.b`,\n a,\n t3_00849.b\n FROM \n (\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a,\n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n ) AS `--.s`\n CROSS JOIN t3_00849\n) AS `--.s`\nCROSS JOIN t4_00849 +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a,\n t2_00849.b\n FROM t1_00849\n CROSS JOIN t2_00849\n) AS `--.s`\nCROSS JOIN t3_00849 +SELECT `--t1_00849.a` AS `t1_00849.a`\nFROM \n(\n SELECT \n a AS `--t1_00849.a`,\n b,\n t2_00849.a AS `--t2_00849.a`,\n t2_00849.b\n FROM t1_00849\n ALL INNER JOIN t2_00849 ON `--t1_00849.a` = `--t2_00849.a`\n) AS `--.s`\nCROSS JOIN t3_00849 SELECT * FROM t1, t2 1 1 1 1 1 1 1 \N diff --git a/tests/queries/0_stateless/00849_multiple_comma_join_2.reference b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference index 7875c1e9e86..f2e832123e0 100644 --- a/tests/queries/0_stateless/00849_multiple_comma_join_2.reference +++ b/tests/queries/0_stateless/00849_multiple_comma_join_2.reference @@ -1,18 +1,18 @@ SELECT a\nFROM t1\nCROSS JOIN t2 SELECT a\nFROM t1\nALL INNER JOIN t2 ON a = t2.a\nWHERE a = t2.a SELECT a\nFROM t1\nALL INNER JOIN t2 ON b = t2.b\nWHERE b = t2.b -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n) AS `--.s`\nALL INNER JOIN t3 ON `--t1.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t1.a` = a) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n b AS `--t1.b`, \n a AS `--t1.a`, \n t2.b AS `--t2.b`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.b` = `--t2.b`\n) AS `--.s`\nALL INNER JOIN t3 ON `--t1.b` = b\nWHERE (`--t1.b` = `--t2.b`) AND (`--t1.b` = b) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`, \n `--t2.a`, \n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t1.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t1.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t1.a` = `--t3.a`) AND (`--t1.a` = a) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.b`, \n `--t1.a`, \n `--t2.b`, \n b AS `--t3.b`\n FROM \n (\n SELECT \n b AS `--t1.b`, \n a AS `--t1.a`, \n t2.b AS `--t2.b`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.b` = `--t2.b`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t1.b` = `--t3.b`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t1.b` = b\nWHERE (`--t1.b` = `--t2.b`) AND (`--t1.b` = `--t3.b`) AND (`--t1.b` = b) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`, \n `--t2.a`, \n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t2.a` = `--t1.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t2.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t2.a` = a\nWHERE (`--t2.a` = `--t1.a`) AND (`--t2.a` = `--t3.a`) AND (`--t2.a` = a) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`, \n `--t2.a`, \n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n ALL INNER JOIN t3 ON (`--t3.a` = `--t1.a`) AND (`--t3.a` = `--t2.a`)\n) AS `--.s`\nALL INNER JOIN t4 ON `--t3.a` = a\nWHERE (`--t3.a` = `--t1.a`) AND (`--t3.a` = `--t2.a`) AND (`--t3.a` = a) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`, \n `--t2.a`, \n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n CROSS JOIN t3\n) AS `--.s`\nALL INNER JOIN t4 ON (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)\nWHERE (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`) -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`, \n `--t2.a`, \n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t2.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t3.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t2.a` = `--t3.a`) AND (`--t3.a` = a) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n) AS `--.s`\nALL INNER JOIN t3 ON `--t1.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t1.a` = a) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n b AS `--t1.b`,\n a AS `--t1.a`,\n t2.b AS `--t2.b`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.b` = `--t2.b`\n) AS `--.s`\nALL INNER JOIN t3 ON `--t1.b` = b\nWHERE (`--t1.b` = `--t2.b`) AND (`--t1.b` = b) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`,\n `--t2.a`,\n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t1.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t1.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t1.a` = `--t3.a`) AND (`--t1.a` = a) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.b`,\n `--t1.a`,\n `--t2.b`,\n b AS `--t3.b`\n FROM \n (\n SELECT \n b AS `--t1.b`,\n a AS `--t1.a`,\n t2.b AS `--t2.b`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.b` = `--t2.b`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t1.b` = `--t3.b`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t1.b` = b\nWHERE (`--t1.b` = `--t2.b`) AND (`--t1.b` = `--t3.b`) AND (`--t1.b` = b) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`,\n `--t2.a`,\n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t2.a` = `--t1.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t2.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t2.a` = a\nWHERE (`--t2.a` = `--t1.a`) AND (`--t2.a` = `--t3.a`) AND (`--t2.a` = a) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`,\n `--t2.a`,\n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n ALL INNER JOIN t3 ON (`--t3.a` = `--t1.a`) AND (`--t3.a` = `--t2.a`)\n) AS `--.s`\nALL INNER JOIN t4 ON `--t3.a` = a\nWHERE (`--t3.a` = `--t1.a`) AND (`--t3.a` = `--t2.a`) AND (`--t3.a` = a) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`,\n `--t2.a`,\n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n CROSS JOIN t3\n) AS `--.s`\nALL INNER JOIN t4 ON (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`)\nWHERE (a = `--t1.a`) AND (a = `--t2.a`) AND (a = `--t3.a`) +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n `--t1.a`,\n `--t2.a`,\n a AS `--t3.a`\n FROM \n (\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n ) AS `--.s`\n ALL INNER JOIN t3 ON `--t2.a` = `--t3.a`\n) AS `--.s`\nALL INNER JOIN t4 ON `--t3.a` = a\nWHERE (`--t1.a` = `--t2.a`) AND (`--t2.a` = `--t3.a`) AND (`--t3.a` = a) SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT `--t1.a`\n FROM \n (\n SELECT a AS `--t1.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n CROSS JOIN t3\n) AS `--.s`\nCROSS JOIN t4 SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT `--t1.a`\n FROM \n (\n SELECT a AS `--t1.a`\n FROM t1\n CROSS JOIN t2\n ) AS `--.s`\n CROSS JOIN t3\n) AS `--.s`\nCROSS JOIN t4 SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT a AS `--t1.a`\n FROM t1\n CROSS JOIN t2\n) AS `--.s`\nCROSS JOIN t3 -SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n a AS `--t1.a`, \n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n) AS `--.s`\nCROSS JOIN t3 +SELECT `--t1.a` AS `t1.a`\nFROM \n(\n SELECT \n a AS `--t1.a`,\n t2.a AS `--t2.a`\n FROM t1\n ALL INNER JOIN t2 ON `--t1.a` = `--t2.a`\n) AS `--.s`\nCROSS JOIN t3 SELECT * FROM t1, t2 1 1 1 1 1 1 1 \N diff --git a/tests/queries/0_stateless/00908_analyze_query.reference b/tests/queries/0_stateless/00908_analyze_query.reference index a8619cfcd4b..66db6f5a2e4 100644 --- a/tests/queries/0_stateless/00908_analyze_query.reference +++ b/tests/queries/0_stateless/00908_analyze_query.reference @@ -1 +1 @@ -SELECT \n a, \n b\nFROM a +SELECT \n a,\n b\nFROM a diff --git a/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference b/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference index d3f7a9aa18b..d1c8033b363 100644 --- a/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference +++ b/tests/queries/0_stateless/00957_format_with_clashed_aliases.reference @@ -1,5 +1,5 @@ SELECT - 1 AS x, + 1 AS x, x.y FROM ( diff --git a/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference b/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference index 7265311960f..eaea02ba40b 100644 --- a/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference +++ b/tests/queries/0_stateless/00958_format_of_tuple_array_element.reference @@ -1,9 +1,9 @@ SELECT - (x.1)[1], - (((x[1]).1)[1]).1, - (NOT x)[1], - -(x[1]), - (-x)[1], - (NOT x).1, - -(x.1), + (x.1)[1], + (((x[1]).1)[1]).1, + (NOT x)[1], + -(x[1]), + (-x)[1], + (NOT x).1, + -(x.1), (-x).1 diff --git a/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference index bd132202979..c797226d832 100644 --- a/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference +++ b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.reference @@ -1,10 +1,10 @@ -SELECT \n k, \n v, \n d, \n i\nFROM \n(\n SELECT \n t.1 AS k, \n t.2 AS v, \n runningDifference(v) AS d, \n runningDifference(cityHash64(t.1)) AS i\n FROM \n (\n SELECT arrayJoin([(\'a\', 1), (\'a\', 2), (\'a\', 3), (\'b\', 11), (\'b\', 13), (\'b\', 15)]) AS t\n )\n)\nWHERE i = 0 +SELECT \n k,\n v,\n d,\n i\nFROM \n(\n SELECT \n t.1 AS k,\n t.2 AS v,\n runningDifference(v) AS d,\n runningDifference(cityHash64(t.1)) AS i\n FROM \n (\n SELECT arrayJoin([(\'a\', 1), (\'a\', 2), (\'a\', 3), (\'b\', 11), (\'b\', 13), (\'b\', 15)]) AS t\n )\n)\nWHERE i = 0 a 1 0 0 a 2 1 0 a 3 1 0 b 13 2 0 b 15 2 0 -SELECT \n co, \n co2, \n co3, \n num\nFROM \n(\n SELECT \n co, \n co2, \n co3, \n count() AS num\n FROM \n (\n SELECT \n 1 AS co, \n 2 AS co2, \n 3 AS co3\n )\n GROUP BY \n co, \n co2, \n co3\n WITH CUBE\n HAVING (co2 != 2) AND (co != 0)\n)\nWHERE (co != 0) AND (co2 != 2) +SELECT \n co,\n co2,\n co3,\n num\nFROM \n(\n SELECT \n co,\n co2,\n co3,\n count() AS num\n FROM \n (\n SELECT \n 1 AS co,\n 2 AS co2,\n 3 AS co3\n )\n GROUP BY \n co,\n co2,\n co3\n WITH CUBE\n HAVING (co2 != 2) AND (co != 0)\n)\nWHERE (co != 0) AND (co2 != 2) 1 0 3 1 1 0 0 1 SELECT alias AS name\nFROM \n(\n SELECT name AS alias\n FROM system.settings\n WHERE alias = \'enable_optimize_predicate_expression\'\n)\nANY INNER JOIN \n(\n SELECT name\n FROM system.settings\n) USING (name)\nWHERE name = \'enable_optimize_predicate_expression\' @@ -12,8 +12,8 @@ enable_optimize_predicate_expression 1 val11 val21 val31 SELECT ccc\nFROM \n(\n SELECT 1 AS ccc\n WHERE 0\n UNION ALL\n SELECT ccc\n FROM \n (\n SELECT 2 AS ccc\n )\n ANY INNER JOIN \n (\n SELECT 2 AS ccc\n ) USING (ccc)\n WHERE ccc > 1\n)\nWHERE ccc > 1 2 -SELECT \n ts, \n id, \n id_b, \n b.ts, \n b.id, \n id_c\nFROM \n(\n SELECT \n ts, \n id, \n id_b\n FROM A\n WHERE ts <= toDateTime(\'1970-01-01 03:00:00\')\n) AS a\nALL LEFT JOIN B AS b ON b.id = id_b\nWHERE ts <= toDateTime(\'1970-01-01 03:00:00\') -SELECT \n ts AS `--a.ts`, \n id AS `--a.id`, \n id_b AS `--a.id_b`, \n b.ts AS `--b.ts`, \n b.id AS `--b.id`, \n id_c AS `--b.id_c`\nFROM \n(\n SELECT \n ts, \n id, \n id_b\n FROM A\n WHERE ts <= toDateTime(\'1970-01-01 03:00:00\')\n) AS a\nALL LEFT JOIN B AS b ON `--b.id` = `--a.id_b`\nWHERE `--a.ts` <= toDateTime(\'1970-01-01 03:00:00\') +SELECT \n ts,\n id,\n id_b,\n b.ts,\n b.id,\n id_c\nFROM \n(\n SELECT \n ts,\n id,\n id_b\n FROM A\n WHERE ts <= toDateTime(\'1970-01-01 03:00:00\')\n) AS a\nALL LEFT JOIN B AS b ON b.id = id_b\nWHERE ts <= toDateTime(\'1970-01-01 03:00:00\') +SELECT \n ts AS `--a.ts`,\n id AS `--a.id`,\n id_b AS `--a.id_b`,\n b.ts AS `--b.ts`,\n b.id AS `--b.id`,\n id_c AS `--b.id_c`\nFROM \n(\n SELECT \n ts,\n id,\n id_b\n FROM A\n WHERE ts <= toDateTime(\'1970-01-01 03:00:00\')\n) AS a\nALL LEFT JOIN B AS b ON `--b.id` = `--a.id_b`\nWHERE `--a.ts` <= toDateTime(\'1970-01-01 03:00:00\') 2 3 3 4 4 5 @@ -24,6 +24,6 @@ SELECT \n ts AS `--a.ts`, \n id AS `--a.id`, \n id_b AS `--a.id_b`, \n 4 5 SELECT dummy\nFROM \n(\n SELECT dummy\n FROM system.one\n WHERE arrayMap(x -> (x + 1), [dummy]) = [1]\n)\nWHERE arrayMap(x -> (x + 1), [dummy]) = [1] 0 -SELECT \n id, \n value, \n value_1\nFROM \n(\n SELECT \n 1 AS id, \n 2 AS value\n)\nALL INNER JOIN \n(\n SELECT \n 1 AS id, \n 3 AS value_1\n) USING (id)\nWHERE arrayMap(x -> ((x + value) + value_1), [1]) = [6] +SELECT \n id,\n value,\n value_1\nFROM \n(\n SELECT \n 1 AS id,\n 2 AS value\n)\nALL INNER JOIN \n(\n SELECT \n 1 AS id,\n 3 AS value_1\n) USING (id)\nWHERE arrayMap(x -> ((x + value) + value_1), [1]) = [6] 1 2 3 SELECT dummy\nFROM system.one\nWHERE (dummy > 0) AND (dummy < 0) diff --git a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference index 1e92e7b8596..e2c3b5dab4a 100644 --- a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference +++ b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.reference @@ -1,4 +1,4 @@ -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM default.test\n WHERE id = 1\n)\nWHERE id = 1 -SELECT \n date, \n id, \n name, \n value\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM default.test\n WHERE id = 2\n)\nWHERE id = 2 -SELECT id\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM default.test\n WHERE id = 1\n)\nWHERE id = 1 -SELECT id\nFROM \n(\n SELECT \n date, \n id, \n name, \n value\n FROM default.test\n WHERE id = 1\n) AS s\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM default.test\n WHERE id = 1\n)\nWHERE id = 1 +SELECT \n date,\n id,\n name,\n value\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM default.test\n WHERE id = 2\n)\nWHERE id = 2 +SELECT id\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM default.test\n WHERE id = 1\n)\nWHERE id = 1 +SELECT id\nFROM \n(\n SELECT \n date,\n id,\n name,\n value\n FROM default.test\n WHERE id = 1\n) AS s\nWHERE id = 1 diff --git a/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference b/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference index 92b51afb544..e6ebffcae9c 100644 --- a/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference +++ b/tests/queries/0_stateless/01083_cross_to_inner_with_like.reference @@ -1,3 +1,3 @@ -SELECT \n k, \n r.k, \n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name = \'A\') -SELECT \n k, \n r.k, \n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name LIKE \'A%\') -SELECT \n k, \n r.k, \n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name NOT LIKE \'A%\') +SELECT \n k,\n r.k,\n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name = \'A\') +SELECT \n k,\n r.k,\n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name LIKE \'A%\') +SELECT \n k,\n r.k,\n name\nFROM n\nALL INNER JOIN r ON k = r.k\nWHERE (k = r.k) AND (name NOT LIKE \'A%\') diff --git a/tests/queries/0_stateless/01278_format_multiple_queries.reference b/tests/queries/0_stateless/01278_format_multiple_queries.reference index cba2cc7b320..b12e3b30f0c 100644 --- a/tests/queries/0_stateless/01278_format_multiple_queries.reference +++ b/tests/queries/0_stateless/01278_format_multiple_queries.reference @@ -1,5 +1,5 @@ SELECT - a, + a, b AS x FROM table AS t INNER JOIN table2 AS t2 ON t.id = t2.t_id @@ -7,8 +7,8 @@ WHERE 1 = 1 ; SELECT - a, - b AS x, + a, + b AS x, if(x = 0, a, b) FROM table2 AS t WHERE t.id != 0 From 9f8c156fd23132d419618bbd8668f83245dc49fb Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 2 Jun 2020 02:35:44 +0300 Subject: [PATCH 04/52] Remove debug output --- src/Parsers/ASTNameTypePair.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Parsers/ASTNameTypePair.cpp b/src/Parsers/ASTNameTypePair.cpp index 6c41d35315c..35493eb77d1 100644 --- a/src/Parsers/ASTNameTypePair.cpp +++ b/src/Parsers/ASTNameTypePair.cpp @@ -24,10 +24,8 @@ void ASTNameTypePair::formatImpl(const FormatSettings & settings, FormatState & { std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); - settings.ostr << '#'; settings.ostr << indent_str << backQuoteIfNeed(name) << ' '; type->formatImpl(settings, state, frame); - settings.ostr << '#'; } } From 36c23e240df0c70917abfe38f1d9b910e8ed64c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 2 Jun 2020 02:41:41 +0300 Subject: [PATCH 05/52] Update some tests --- .../00061_merge_tree_alter.reference | 20 ++++++------ .../queries/0_stateless/00642_cast.reference | 2 +- .../00643_cast_zookeeper.reference | 2 +- .../00725_comment_columns.reference | 12 +++---- .../00725_ipv4_ipv6_domains.reference | 4 +-- .../00753_comment_columns_zookeeper.reference | 4 +-- ...4_alter_modify_column_partitions.reference | 4 +-- .../00754_alter_modify_order_by.reference | 2 +- ...fy_order_by_replicated_zookeeper.reference | 4 +-- ...4_test_custom_compression_codecs.reference | 4 +-- ...m_compression_codes_log_storages.reference | 8 ++--- .../0_stateless/00836_indices_alter.reference | 10 +++--- ...dices_alter_replicated_zookeeper.reference | 24 +++++++------- .../0_stateless/00933_alter_ttl.reference | 2 +- .../0_stateless/00933_ttl_simple.reference | 8 ++--- .../00980_merge_alter_settings.reference | 10 +++--- ...keeper_merge_tree_alter_settings.reference | 12 +++---- .../00998_constraints_all_tables.reference | 4 +-- ...age_odbc_parsing_exception_check.reference | 2 +- .../01055_compact_parts_1.reference | 4 +-- .../01069_database_memory.reference | 2 +- .../01070_alter_with_ttl.reference | 4 +-- .../01079_alter_default_zookeeper.reference | 16 +++++----- .../01079_bad_alters_zookeeper.reference | 4 +-- ..._expressions_in_engine_arguments.reference | 14 ++++---- ...1135_default_and_alter_zookeeper.reference | 2 +- ...13_alter_rename_column_zookeeper.reference | 4 +-- .../01213_alter_rename_nested.reference | 6 ++-- ...er_rename_with_default_zookeeper.reference | 10 +++--- .../01213_alter_table_rename_nested.reference | 4 +-- ...01224_no_superfluous_dict_reload.reference | 2 +- ...how_create_table_from_dictionary.reference | 2 +- ...9_bad_arguments_for_bloom_filter.reference | 6 ++-- .../01272_suspicious_codecs.reference | 32 +++++++++---------- ...alter_rename_column_default_expr.reference | 4 +-- ..._rename_column_materialized_expr.reference | 4 +-- ...7_alter_rename_column_constraint.reference | 4 +-- ...name_column_constraint_zookeeper.reference | 4 +-- .../01278_alter_rename_combination.reference | 8 ++--- ...1_alter_rename_and_other_renames.reference | 12 +++---- 40 files changed, 143 insertions(+), 143 deletions(-) diff --git a/tests/queries/0_stateless/00061_merge_tree_alter.reference b/tests/queries/0_stateless/00061_merge_tree_alter.reference index b609bc257f1..dcc44b9bd81 100644 --- a/tests/queries/0_stateless/00061_merge_tree_alter.reference +++ b/tests/queries/0_stateless/00061_merge_tree_alter.reference @@ -1,14 +1,14 @@ d Date k UInt64 i32 Int32 -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 10 42 d Date k UInt64 i32 Int32 n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 8 40 [1,2,3] ['12','13','14'] 2015-01-01 10 42 [] [] d Date @@ -17,7 +17,7 @@ i32 Int32 n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 10 42 [] [] [] @@ -28,7 +28,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 7 39 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 @@ -39,7 +39,7 @@ i32 Int32 n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 @@ -51,7 +51,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] @@ -65,7 +65,7 @@ k UInt64 i32 Int32 n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 ['asd','qwe','qwe'] 100500 2015-01-01 7 39 ['120','130','140'] 0 2015-01-01 8 40 ['12','13','14'] 0 @@ -74,7 +74,7 @@ d Date k UInt64 i32 Int32 s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 2015-01-01 7 39 0 2015-01-01 8 40 0 @@ -85,7 +85,7 @@ i32 Int32 s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 [] [] 2015-01-01 7 39 0 [] [] 2015-01-01 8 40 0 [] [] @@ -94,7 +94,7 @@ d Date k UInt64 i32 Int32 s UInt32 DEFAULT \'0\' -CREATE TABLE default.alter_00061\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) +CREATE TABLE default.alter_00061\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = MergeTree(d, k, 8192) 2015-01-01 6 38 100500 2015-01-01 7 39 0 2015-01-01 8 40 0 diff --git a/tests/queries/0_stateless/00642_cast.reference b/tests/queries/0_stateless/00642_cast.reference index 907861c1784..7f5333f590e 100644 --- a/tests/queries/0_stateless/00642_cast.reference +++ b/tests/queries/0_stateless/00642_cast.reference @@ -9,7 +9,7 @@ hello 1970-01-01 00:00:01 CREATE TABLE default.cast ( - `x` UInt8, + `x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') ) ENGINE = MergeTree diff --git a/tests/queries/0_stateless/00643_cast_zookeeper.reference b/tests/queries/0_stateless/00643_cast_zookeeper.reference index b79eb07aee3..226390d8510 100644 --- a/tests/queries/0_stateless/00643_cast_zookeeper.reference +++ b/tests/queries/0_stateless/00643_cast_zookeeper.reference @@ -1,6 +1,6 @@ CREATE TABLE test.cast1 ( - `x` UInt8, + `x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_cast', 'r1') diff --git a/tests/queries/0_stateless/00725_comment_columns.reference b/tests/queries/0_stateless/00725_comment_columns.reference index 86794581daf..7543f5854d7 100644 --- a/tests/queries/0_stateless/00725_comment_columns.reference +++ b/tests/queries/0_stateless/00725_comment_columns.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\', \n `fourth_column` UInt8 COMMENT \'comment 4\', \n `fifth_column` UInt8\n)\nENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1\',\n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2\',\n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3\',\n `fourth_column` UInt8 COMMENT \'comment 4\',\n `fifth_column` UInt8\n)\nENGINE = TinyLog first_column UInt8 DEFAULT 1 comment 1 second_column UInt8 MATERIALIZED first_column comment 2 third_column UInt8 ALIAS second_column comment 3 @@ -11,7 +11,7 @@ fifth_column UInt8 │ check_query_comment_column │ fourth_column │ comment 4 │ │ check_query_comment_column │ fifth_column │ │ └────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\', \n `fourth_column` UInt8 COMMENT \'comment 4_1\', \n `fifth_column` UInt8 COMMENT \'comment 5_1\'\n)\nENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_1\',\n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_1\',\n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_1\',\n `fourth_column` UInt8 COMMENT \'comment 4_1\',\n `fifth_column` UInt8 COMMENT \'comment 5_1\'\n)\nENGINE = TinyLog ┌─table──────────────────────┬─name──────────┬─comment─────┐ │ check_query_comment_column │ first_column │ comment 1_2 │ │ check_query_comment_column │ second_column │ comment 2_2 │ @@ -19,8 +19,8 @@ CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEF │ check_query_comment_column │ fourth_column │ comment 4_2 │ │ check_query_comment_column │ fifth_column │ comment 5_2 │ └────────────────────────────┴───────────────┴─────────────┘ -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\', \n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\', \n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\', \n `fourth_column` UInt8 COMMENT \'comment 4_2\', \n `fifth_column` UInt8 COMMENT \'comment 5_2\'\n)\nENGINE = TinyLog -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1\', \n `second_column` UInt8 COMMENT \'comment 2\', \n `third_column` UInt8 COMMENT \'comment 3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 DEFAULT 1 COMMENT \'comment 1_2\',\n `second_column` UInt8 MATERIALIZED first_column COMMENT \'comment 2_2\',\n `third_column` UInt8 ALIAS second_column COMMENT \'comment 3_2\',\n `fourth_column` UInt8 COMMENT \'comment 4_2\',\n `fifth_column` UInt8 COMMENT \'comment 5_2\'\n)\nENGINE = TinyLog +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1\',\n `second_column` UInt8 COMMENT \'comment 2\',\n `third_column` UInt8 COMMENT \'comment 3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 first_column UInt8 comment 1 second_column UInt8 comment 2 third_column UInt8 comment 3 @@ -29,8 +29,8 @@ third_column UInt8 comment 3 │ check_query_comment_column │ second_column │ comment 2 │ │ check_query_comment_column │ third_column │ comment 3 │ └────────────────────────────┴───────────────┴───────────┘ -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_2\', \n `second_column` UInt8 COMMENT \'comment 2_2\', \n `third_column` UInt8 COMMENT \'comment 3_2\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 -CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_3\', \n `second_column` UInt8 COMMENT \'comment 2_3\', \n `third_column` UInt8 COMMENT \'comment 3_3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_2\',\n `second_column` UInt8 COMMENT \'comment 2_2\',\n `third_column` UInt8 COMMENT \'comment 3_2\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_query_comment_column\n(\n `first_column` UInt8 COMMENT \'comment 1_3\',\n `second_column` UInt8 COMMENT \'comment 2_3\',\n `third_column` UInt8 COMMENT \'comment 3_3\'\n)\nENGINE = MergeTree()\nPARTITION BY second_column\nORDER BY first_column\nSAMPLE BY first_column\nSETTINGS index_granularity = 8192 ┌─table──────────────────────┬─name──────────┬─comment─────┐ │ check_query_comment_column │ first_column │ comment 1_3 │ │ check_query_comment_column │ second_column │ comment 2_3 │ diff --git a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference index 28051d15f65..69804e6cd24 100644 --- a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference +++ b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ipv4_test\n(\n `ipv4_` IPv4\n)\nENGINE = Memory +CREATE TABLE default.ipv4_test\n(`ipv4_` IPv4\n)\nENGINE = Memory 0.0.0.0 00 8.8.8.8 08080808 127.0.0.1 7F000001 @@ -10,7 +10,7 @@ CREATE TABLE default.ipv4_test\n(\n `ipv4_` IPv4\n)\nENGINE = Memory > 127.0.0.1 255.255.255.255 = 127.0.0.1 127.0.0.1 euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1 -CREATE TABLE default.ipv6_test\n(\n `ipv6_` IPv6\n)\nENGINE = Memory +CREATE TABLE default.ipv6_test\n(`ipv6_` IPv6\n)\nENGINE = Memory :: 00000000000000000000000000000000 :: 00000000000000000000000000000000 ::ffff:8.8.8.8 00000000000000000000FFFF08080808 diff --git a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference index d2705135440..5d8c5dc9f72 100644 --- a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference +++ b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference @@ -1,6 +1,6 @@ -CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 comment column_name2 UInt8 non default comment -CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\', \n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 another comment column_name2 UInt8 non default comment diff --git a/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference index a1493508b61..900a3200467 100644 --- a/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference +++ b/tests/queries/0_stateless/00754_alter_modify_column_partitions.reference @@ -1,5 +1,5 @@ *** Check SHOW CREATE TABLE *** -CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int32\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_column\n(\n `x` UInt32,\n `y` Int32\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 *** Check parts *** 0 0 10 -10 @@ -52,7 +52,7 @@ CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int32\n)\nENGINE 8 -8 9 -9 *** Check SHOW CREATE TABLE after ALTER MODIFY *** -CREATE TABLE default.alter_column\n(\n `x` UInt32, \n `y` Int64\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_column\n(\n `x` UInt32,\n `y` Int64\n)\nENGINE = MergeTree\nPARTITION BY x\nORDER BY x\nSETTINGS index_granularity = 8192 *** Check parts after ALTER MODIFY *** 0 0 10 -10 diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by.reference b/tests/queries/0_stateless/00754_alter_modify_order_by.reference index f0dc413a186..0279e5ca11b 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by.reference +++ b/tests/queries/0_stateless/00754_alter_modify_order_by.reference @@ -9,4 +9,4 @@ 1 2 1 30 1 2 4 90 *** Check SHOW CREATE TABLE *** -CREATE TABLE default.summing\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = SummingMergeTree\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.summing\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `val` UInt32\n)\nENGINE = SummingMergeTree\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference index 938a90a27b4..9303d45ea7d 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference @@ -9,6 +9,6 @@ 1 2 1 30 1 2 4 90 *** Check SHOW CREATE TABLE *** -CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 +CREATE TABLE test.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 *** Check SHOW CREATE TABLE after offline ALTER *** -CREATE TABLE test.summing_r2\n(\n `x` UInt32, \n `y` UInt32, \n `z` UInt32, \n `t` UInt32, \n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 +CREATE TABLE test.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `t` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference index f778c4f5d90..00556b0f8c9 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codecs.reference @@ -9,10 +9,10 @@ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec_multiple_more_types\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] ! 222 !ZSTD -CREATE TABLE default.test_default_delta\n(\n `id` UInt64 CODEC(Delta(8)), \n `data` String CODEC(Delta(1)), \n `somedate` Date CODEC(Delta(2)), \n `somenum` Float64 CODEC(Delta(8)), \n `somestr` FixedString(3) CODEC(Delta(1)), \n `othernum` Int64 CODEC(Delta(8)), \n `yetothernum` Float32 CODEC(Delta(4)), \n `ddd.age` Array(UInt8) CODEC(Delta(1)), \n `ddd.Name` Array(String) CODEC(Delta(1)), \n `ddd.OName` Array(String) CODEC(Delta(1)), \n `ddd.BName` Array(String) CODEC(Delta(1))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test_default_delta\n(\n `id` UInt64 CODEC(Delta(8)),\n `data` String CODEC(Delta(1)),\n `somedate` Date CODEC(Delta(2)),\n `somenum` Float64 CODEC(Delta(8)),\n `somestr` FixedString(3) CODEC(Delta(1)),\n `othernum` Int64 CODEC(Delta(8)),\n `yetothernum` Float32 CODEC(Delta(4)),\n `ddd.age` Array(UInt8) CODEC(Delta(1)),\n `ddd.Name` Array(String) CODEC(Delta(1)),\n `ddd.OName` Array(String) CODEC(Delta(1)),\n `ddd.BName` Array(String) CODEC(Delta(1))\n)\nENGINE = MergeTree()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference index b33535364e5..113e413bfac 100644 --- a/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference +++ b/tests/queries/0_stateless/00804_test_custom_compression_codes_log_storages.reference @@ -1,9 +1,9 @@ -CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log() +CREATE TABLE default.compression_codec_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = Log() 1 hello 2018-12-14 1.1 aaa 5 2 world 2018-12-15 2.2 bbb 6 3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log() +CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = Log() 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 @@ -11,12 +11,12 @@ CREATE TABLE default.compression_codec_multiple_log\n(\n `id` UInt64 CODEC(LZ 10003 274972506.6 9175437371954010821 -CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4), \n `data` String CODEC(ZSTD(1)), \n `ddd` Date CODEC(NONE), \n `somenum` Float64 CODEC(ZSTD(2)), \n `somestr` FixedString(3) CODEC(LZ4HC(7)), \n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog() +CREATE TABLE default.compression_codec_tiny_log\n(\n `id` UInt64 CODEC(LZ4),\n `data` String CODEC(ZSTD(1)),\n `ddd` Date CODEC(NONE),\n `somenum` Float64 CODEC(ZSTD(2)),\n `somestr` FixedString(3) CODEC(LZ4HC(7)),\n `othernum` Int64 CODEC(Delta(8))\n)\nENGINE = TinyLog() 1 hello 2018-12-14 1.1 aaa 5 2 world 2018-12-15 2.2 bbb 6 3 ! 2018-12-16 3.3 ccc 7 2 -CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)), \n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)), \n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)), \n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog() +CREATE TABLE default.compression_codec_multiple_tiny_log\n(\n `id` UInt64 CODEC(LZ4, ZSTD(1), NONE, LZ4HC(0), Delta(4)),\n `data` String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC(0), LZ4, LZ4, Delta(8)),\n `ddd` Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD(1), LZ4HC(0), LZ4HC(0)),\n `somenum` Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD(1))\n)\nENGINE = TinyLog() 1 world 2018-10-05 1.1 2 hello 2018-10-01 2.2 3 buy 2018-10-11 3.3 diff --git a/tests/queries/0_stateless/00836_indices_alter.reference b/tests/queries/0_stateless/00836_indices_alter.reference index 6efa25f47b7..7fd63a45d31 100644 --- a/tests/queries/0_stateless/00836_indices_alter.reference +++ b/tests/queries/0_stateless/00836_indices_alter.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -6,15 +6,15 @@ CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n I 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -23,6 +23,6 @@ CREATE TABLE default.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n I 1 2 1 2 1 2 -CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 diff --git a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference index ec9de160fcc..ce03d1e7de6 100644 --- a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference @@ -1,5 +1,5 @@ -CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -14,8 +14,8 @@ CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n IN 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -28,10 +28,10 @@ CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n IN 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -44,14 +44,14 @@ CREATE TABLE test.minmax_idx_r\n(\n `u64` UInt64, \n `i32` Int32, \n IN 3 2 19 9 65 75 -CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32, \n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, \n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 1 3 -CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE test.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 diff --git a/tests/queries/0_stateless/00933_alter_ttl.reference b/tests/queries/0_stateless/00933_alter_ttl.reference index 9b5cec0f773..545f5644e94 100644 --- a/tests/queries/0_stateless/00933_alter_ttl.reference +++ b/tests/queries/0_stateless/00933_alter_ttl.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ttl\n(\n `d` Date, \n `a` Int32\n)\nENGINE = MergeTree\nPARTITION BY toDayOfMonth(d)\nORDER BY a\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl\n(\n `d` Date,\n `a` Int32\n)\nENGINE = MergeTree\nPARTITION BY toDayOfMonth(d)\nORDER BY a\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 2100-10-10 3 2100-10-10 4 d Date diff --git a/tests/queries/0_stateless/00933_ttl_simple.reference b/tests/queries/0_stateless/00933_ttl_simple.reference index 102639947a3..a4ef8033328 100644 --- a/tests/queries/0_stateless/00933_ttl_simple.reference +++ b/tests/queries/0_stateless/00933_ttl_simple.reference @@ -6,11 +6,11 @@ 2000-10-10 00:00:00 0 2100-10-10 00:00:00 3 2100-10-10 2 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 1 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 0 -CREATE TABLE default.ttl_00933_1\n(\n `b` Int32, \n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 1 diff --git a/tests/queries/0_stateless/00980_merge_alter_settings.reference b/tests/queries/0_stateless/00980_merge_alter_settings.reference index 340cf29ce89..7f8aa23b722 100644 --- a/tests/queries/0_stateless/00980_merge_alter_settings.reference +++ b/tests/queries/0_stateless/00980_merge_alter_settings.reference @@ -1,6 +1,6 @@ -CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096 -CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100 2 -CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 -CREATE TABLE default.table_for_alter\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 30 +CREATE TABLE default.table_for_alter\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = MergeTree()\nORDER BY id\nSETTINGS index_granularity = 4096, parts_to_throw_insert = 100, parts_to_delay_insert = 100, check_delay_period = 15 diff --git a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference index ab006ea6931..2682051751b 100644 --- a/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference +++ b/tests/queries/0_stateless/00980_zookeeper_merge_tree_alter_settings.reference @@ -1,12 +1,12 @@ -CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 -CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192 4 4 4 4 6 6 -CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 -CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 -CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 -CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64, \n `Data` String, \n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64,\n `Data` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 +CREATE TABLE default.replicated_table_for_alter1\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'1\')\nORDER BY id\nSETTINGS index_granularity = 8192, use_minimalistic_part_header_in_zookeeper = 1, check_delay_period = 15 +CREATE TABLE default.replicated_table_for_alter2\n(\n `id` UInt64,\n `Data` String,\n `Data2` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/replicated_table_for_alter\', \'2\')\nORDER BY id\nSETTINGS index_granularity = 8192, parts_to_throw_insert = 1, parts_to_delay_insert = 1 diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.reference b/tests/queries/0_stateless/00998_constraints_all_tables.reference index 3de251daa71..0ec8b004ae4 100644 --- a/tests/queries/0_stateless/00998_constraints_all_tables.reference +++ b/tests/queries/0_stateless/00998_constraints_all_tables.reference @@ -10,5 +10,5 @@ 0 0 3 -CREATE TABLE default.constrained\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log -CREATE TABLE default.constrained2\n(\n `URL` String, \n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\', \n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained\n(\n `URL` String,\n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained2\n(\n `URL` String,\n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log diff --git a/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference index c2d7d849fae..548952c3a6a 100644 --- a/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference +++ b/tests/queries/0_stateless/01033_storage_odbc_parsing_exception_check.reference @@ -1 +1 @@ -CREATE TABLE default.BannerDict\n(\n `BannerID` UInt64, \n `CompaignID` UInt64\n)\nENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') +CREATE TABLE default.BannerDict\n(\n `BannerID` UInt64,\n `CompaignID` UInt64\n)\nENGINE = ODBC(\'DSN=pgconn;Database=postgres\', \'somedb\', \'bannerdict\') diff --git a/tests/queries/0_stateless/01055_compact_parts_1.reference b/tests/queries/0_stateless/01055_compact_parts_1.reference index b99f336d3b0..c5311a0b479 100644 --- a/tests/queries/0_stateless/01055_compact_parts_1.reference +++ b/tests/queries/0_stateless/01055_compact_parts_1.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, index_granularity = 8192 -CREATE TABLE default.mt_compact\n(\n `a` Int32, \n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 +CREATE TABLE default.mt_compact\n(\n `a` Int32,\n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, index_granularity = 8192 +CREATE TABLE default.mt_compact\n(\n `a` Int32,\n `s` String\n)\nENGINE = MergeTree\nPARTITION BY a\nORDER BY a\nSETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, index_granularity = 8192, parts_to_delay_insert = 300 diff --git a/tests/queries/0_stateless/01069_database_memory.reference b/tests/queries/0_stateless/01069_database_memory.reference index e7486d57276..cfccf5b1757 100644 --- a/tests/queries/0_stateless/01069_database_memory.reference +++ b/tests/queries/0_stateless/01069_database_memory.reference @@ -5,4 +5,4 @@ CREATE DATABASE memory_01069\nENGINE = Memory() 4 3 4 -CREATE TABLE memory_01069.file\n(\n `n` UInt8\n)\nENGINE = File(\'CSV\') +CREATE TABLE memory_01069.file\n(`n` UInt8\n)\nENGINE = File(\'CSV\') diff --git a/tests/queries/0_stateless/01070_alter_with_ttl.reference b/tests/queries/0_stateless/01070_alter_with_ttl.reference index de7833472a1..202ac2ac10f 100644 --- a/tests/queries/0_stateless/01070_alter_with_ttl.reference +++ b/tests/queries/0_stateless/01070_alter_with_ttl.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.alter_ttl\n(\n `i` Int32, \n `s` String TTL toDate(\'2020-01-01\')\n)\nENGINE = MergeTree\nORDER BY i\nTTL toDate(\'2020-05-05\')\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_ttl\n(\n `d` Date, \n `s` String TTL d + toIntervalDay(1)\n)\nENGINE = MergeTree\nORDER BY d\nTTL d + toIntervalMonth(1)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_ttl\n(\n `i` Int32,\n `s` String TTL toDate(\'2020-01-01\')\n)\nENGINE = MergeTree\nORDER BY i\nTTL toDate(\'2020-05-05\')\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_ttl\n(\n `d` Date,\n `s` String TTL d + toIntervalDay(1)\n)\nENGINE = MergeTree\nORDER BY d\nTTL d + toIntervalMonth(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference index 62d26bc9b4b..758150a7799 100644 --- a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference +++ b/tests/queries/0_stateless/01079_alter_default_zookeeper.reference @@ -1,11 +1,11 @@ -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 10000 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date, \n `key` UInt64, \n `value` UInt8 DEFAULT 10, \n `better_column` UInt8 DEFAULT \'1\', \n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\',\n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference index ea3fbec34a8..deb26676f39 100644 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.reference @@ -1,6 +1,6 @@ Wrong column name. -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64, \n `value1` UInt8, \n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_bad_alters\n(\n `key` UInt64,\n `value1` UInt8,\n `value2` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_bad_alters\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 syntax error at begin of string. 7 Hello diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference index 2007eda0f07..138f09f2634 100644 --- a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference @@ -1,11 +1,11 @@ -CREATE TABLE test_01083.file\n(\n `n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') -CREATE TABLE test_01083.buffer\n(\n `n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) -CREATE TABLE test_01083.merge\n(\n `n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') +CREATE TABLE test_01083.file\n(`n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') +CREATE TABLE test_01083.buffer\n(`n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) +CREATE TABLE test_01083.merge\n(`n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') CREATE TABLE test_01083.merge_tf AS merge(\'test_01083\', \'.*\') -CREATE TABLE test_01083.distributed\n(\n `n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') +CREATE TABLE test_01083.distributed\n(`n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') CREATE TABLE test_01083.distributed_tf AS cluster(\'test_shard_localhost\', \'test_01083\', \'buffer\') -CREATE TABLE test_01083.url\n(\n `n` UInt64, \n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') +CREATE TABLE test_01083.url\n(\n `n` UInt64,\n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') CREATE TABLE test_01083.rich_syntax AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'test_01083\', \'view\'))) -CREATE VIEW test_01083.view\n(\n `n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file -CREATE DICTIONARY test_01083.dict\n(\n `n` UInt64, \n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) +CREATE VIEW test_01083.view\n(`n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file +CREATE DICTIONARY test_01083.dict\n(\n \n `n` UInt64,\n \n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) 16 diff --git a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference index f7c4a48b4bc..6a5dd7223bd 100644 --- a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference @@ -1,2 +1,2 @@ 4 -CREATE TABLE default.default_table\n(\n `id` UInt64, \n `enum_column` Enum8(\'undefined\' = 0, \'fox\' = 1, \'index\' = 2) DEFAULT \'fox\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/default_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.default_table\n(\n `id` UInt64,\n `enum_column` Enum8(\'undefined\' = 0, \'fox\' = 1, \'index\' = 2) DEFAULT \'fox\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/default_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference index e2d6007c57f..5457becfeda 100644 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.reference @@ -1,6 +1,6 @@ 1 -CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String, \n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_replicated\n(\n `date` Date,\n `key` UInt64,\n `renamed_value1` String,\n `value2` String,\n `value3` String\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 1 date key renamed_value1 value2 value3 2019-10-02 1 1 1 1 diff --git a/tests/queries/0_stateless/01213_alter_rename_nested.reference b/tests/queries/0_stateless/01213_alter_rename_nested.reference index 2641df46aeb..403e87256fe 100644 --- a/tests/queries/0_stateless/01213_alter_rename_nested.reference +++ b/tests/queries/0_stateless/01213_alter_rename_nested.reference @@ -1,10 +1,10 @@ [8,9,10] ['a','b','c'] -CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date,\n `key` UInt64,\n `n.x` Array(UInt32),\n `n.y` Array(String),\n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date,\n `key` UInt64,\n `n.renamed_x` Array(UInt32),\n `n.renamed_y` Array(String),\n `value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 7 [8,9,10] 7 ['a','b','c'] [['7']] -CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `renamed_value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date,\n `key` UInt64,\n `n.renamed_x` Array(UInt32),\n `n.renamed_y` Array(String),\n `renamed_value1` Array(Array(LowCardinality(String)))\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 date key n.renamed_x n.renamed_y renamed_value1 2019-10-01 7 [8,9,10] ['a','b','c'] [['7']] diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference index 251e664b522..a4759ecb0f7 100644 --- a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference @@ -1,17 +1,17 @@ date key value1 value2 2019-10-02 1 1 Hello 1 -CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String DEFAULT concat(\'Hello \', value1), \n `value3` String ALIAS concat(\'Word \', value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String DEFAULT concat(\'Hello \', value1),\n `value3` String ALIAS concat(\'Word \', value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 date key renamed_value1 value2 2019-10-02 1 1 Hello 1 -CREATE TABLE default.table_rename_with_default\n(\n `date` Date, \n `key` UInt64, \n `renamed_value1` String, \n `value2` String DEFAULT concat(\'Hello \', renamed_value1), \n `value3` String ALIAS concat(\'Word \', renamed_value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_default\n(\n `date` Date,\n `key` UInt64,\n `renamed_value1` String,\n `value2` String DEFAULT concat(\'Hello \', renamed_value1),\n `value3` String ALIAS concat(\'Word \', renamed_value1)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 Hello 1 Word 1 date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 renamed_date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date, \n `renamed_date2` Date, \n `value1` String, \n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `renamed_date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_table_rename_nested.reference b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference index 8e6d93dbcce..1b89cf8f461 100644 --- a/tests/queries/0_stateless/01213_alter_table_rename_nested.reference +++ b/tests/queries/0_stateless/01213_alter_table_rename_nested.reference @@ -1,6 +1,6 @@ [8,9,10] ['a','b','c'] -CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.x` Array(UInt32), \n `n.y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.table_for_rename_nested\n(\n `date` Date, \n `key` UInt64, \n `n.renamed_x` Array(UInt32), \n `n.renamed_y` Array(String), \n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date,\n `key` UInt64,\n `n.x` Array(UInt32),\n `n.y` Array(String),\n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename_nested\n(\n `date` Date,\n `key` UInt64,\n `n.renamed_x` Array(UInt32),\n `n.renamed_y` Array(String),\n `value1` String\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 7 [8,9,10] 7 ['a','b','c'] diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference index 524fbdd26fc..96d4393e06b 100644 --- a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference @@ -12,7 +12,7 @@ LAYOUT(FLAT()) NOT_LOADED CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` ( - `key` UInt64, + `key` UInt64, `val` UInt64 ) ENGINE = Dictionary(`dict_db_01224.dict`) diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference index 14ddc093143..3363df5fb98 100644 --- a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.reference @@ -1,6 +1,6 @@ CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` ( - `key` UInt64, + `key` UInt64, `val` UInt64 ) ENGINE = Dictionary(`dict_db_01225.dict`) diff --git a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference index 04ae001675f..dfff8c7be00 100644 --- a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference +++ b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference @@ -1,3 +1,3 @@ -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(0.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01272_suspicious_codecs.reference b/tests/queries/0_stateless/01272_suspicious_codecs.reference index 559b6df2693..de91a1ddb25 100644 --- a/tests/queries/0_stateless/01272_suspicious_codecs.reference +++ b/tests/queries/0_stateless/01272_suspicious_codecs.reference @@ -1,16 +1,16 @@ -CREATE TABLE default.codecs1\n(\n `a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs2\n(\n `a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs3\n(\n `a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs4\n(\n `a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs5\n(\n `a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs6\n(\n `a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs7\n(\n `a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs8\n(\n `a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs1\n(\n `a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs2\n(\n `a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs3\n(\n `a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs4\n(\n `a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs5\n(\n `a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs6\n(\n `a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs7\n(\n `a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs8\n(\n `a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs1\n(`a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs2\n(`a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs3\n(`a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs4\n(`a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs5\n(`a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs6\n(`a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs7\n(`a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs8\n(`a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs1\n(`a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs2\n(`a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs3\n(`a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs4\n(`a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs5\n(`a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs6\n(`a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs7\n(`a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs8\n(`a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01275_alter_rename_column_default_expr.reference b/tests/queries/0_stateless/01275_alter_rename_column_default_expr.reference index d81601b92c5..e1ea5a778da 100644 --- a/tests/queries/0_stateless/01275_alter_rename_column_default_expr.reference +++ b/tests/queries/0_stateless/01275_alter_rename_column_default_expr.reference @@ -7,7 +7,7 @@ 2019-10-01 6 6 7 6 + 7 2019-10-02 7 7 8 7 + 8 2019-10-03 8 8 9 8 + 9 -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value4` String, \n `value5` String, \n `value3` String DEFAULT concat(value4, \' + \', value5)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String DEFAULT concat(value4, \' + \', value5)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 0 + 1 2019-10-02 1 1 2 1 + 2 2019-10-03 2 2 3 2 + 3 @@ -36,7 +36,7 @@ CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \ 2019-10-03 17 17 18 17 + 18 2019-10-01 18 18 19 18 + 19 2019-10-02 19 19 20 19 + 20 -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String DEFAULT concat(value1, \' + \', value2)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String DEFAULT concat(value1, \' + \', value2)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 0 + 1 2019-10-02 1 1 2 1 + 2 2019-10-03 2 2 3 2 + 3 diff --git a/tests/queries/0_stateless/01276_alter_rename_column_materialized_expr.reference b/tests/queries/0_stateless/01276_alter_rename_column_materialized_expr.reference index 5d721230db3..c430b6a28af 100644 --- a/tests/queries/0_stateless/01276_alter_rename_column_materialized_expr.reference +++ b/tests/queries/0_stateless/01276_alter_rename_column_materialized_expr.reference @@ -7,7 +7,7 @@ 2019-10-01 6 6 7 2019-10-02 7 7 8 2019-10-03 8 8 9 -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value4` String, \n `value5` String, \n `value3` String MATERIALIZED concat(value4, \' + \', value5)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String MATERIALIZED concat(value4, \' + \', value5)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2019-10-02 1 1 2 2019-10-03 2 2 3 @@ -38,7 +38,7 @@ CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \ 2019-10-01 18 18 19 2019-10-02 19 19 20 -- rename columns back -- -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String MATERIALIZED concat(value1, \' + \', value2)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String MATERIALIZED concat(value1, \' + \', value2)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2019-10-02 1 1 2 2019-10-03 2 2 3 diff --git a/tests/queries/0_stateless/01277_alter_rename_column_constraint.reference b/tests/queries/0_stateless/01277_alter_rename_column_constraint.reference index cb1842f95da..4316c7fa1b9 100644 --- a/tests/queries/0_stateless/01277_alter_rename_column_constraint.reference +++ b/tests/queries/0_stateless/01277_alter_rename_column_constraint.reference @@ -7,7 +7,7 @@ 2019-10-01 6 6 7 8 2019-10-02 7 7 8 9 2019-10-03 8 8 9 10 -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value4` String, \n `value5` String, \n `value3` String, \n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5), \n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5),\n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 @@ -38,7 +38,7 @@ CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \ 2019-10-01 18 18 19 20 2019-10-02 19 19 20 21 -- rename columns back -- -CREATE TABLE default.table_for_rename\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String, \n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2), \n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2),\n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = MergeTree()\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 diff --git a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference index 9ca17dbbc0a..6a9d5a3bdcf 100644 --- a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference +++ b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference @@ -7,7 +7,7 @@ 2019-10-01 6 6 7 8 2019-10-02 7 7 8 9 2019-10-03 8 8 9 10 -CREATE TABLE default.table_for_rename1\n(\n `date` Date, \n `key` UInt64, \n `value4` String, \n `value5` String, \n `value3` String, \n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5), \n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5),\n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 @@ -38,7 +38,7 @@ CREATE TABLE default.table_for_rename1\n(\n `date` Date, \n `key` UInt64, 2019-10-01 18 18 19 20 2019-10-02 19 19 20 21 -- rename columns back -- -CREATE TABLE default.table_for_rename1\n(\n `date` Date, \n `key` UInt64, \n `value1` String, \n `value2` String, \n `value3` String, \n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2), \n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2),\n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 diff --git a/tests/queries/0_stateless/01278_alter_rename_combination.reference b/tests/queries/0_stateless/01278_alter_rename_combination.reference index 3f00378b4b7..cc912e9b265 100644 --- a/tests/queries/0_stateless/01278_alter_rename_combination.reference +++ b/tests/queries/0_stateless/01278_alter_rename_combination.reference @@ -1,15 +1,15 @@ -CREATE TABLE default.rename_table\n(\n `key` Int32, \n `old_value1` Int32, \n `value1` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rename_table\n(\n `key` Int32,\n `old_value1` Int32,\n `value1` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 key old_value1 value1 1 2 3 -CREATE TABLE default.rename_table\n(\n `k` Int32, \n `v1` Int32, \n `v2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rename_table\n(\n `k` Int32,\n `v1` Int32,\n `v2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 k v1 v2 1 2 3 4 5 6 ---polymorphic--- -CREATE TABLE default.rename_table_polymorphic\n(\n `key` Int32, \n `old_value1` Int32, \n `value1` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 10000, index_granularity = 8192 +CREATE TABLE default.rename_table_polymorphic\n(\n `key` Int32,\n `old_value1` Int32,\n `value1` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 10000, index_granularity = 8192 key old_value1 value1 1 2 3 -CREATE TABLE default.rename_table_polymorphic\n(\n `k` Int32, \n `v1` Int32, \n `v2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 10000, index_granularity = 8192 +CREATE TABLE default.rename_table_polymorphic\n(\n `k` Int32,\n `v1` Int32,\n `v2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 10000, index_granularity = 8192 k v1 v2 1 2 3 4 5 6 diff --git a/tests/queries/0_stateless/01281_alter_rename_and_other_renames.reference b/tests/queries/0_stateless/01281_alter_rename_and_other_renames.reference index f0a906147ac..bf3358aea60 100644 --- a/tests/queries/0_stateless/01281_alter_rename_and_other_renames.reference +++ b/tests/queries/0_stateless/01281_alter_rename_and_other_renames.reference @@ -1,23 +1,23 @@ -CREATE TABLE default.rename_table_multiple\n(\n `key` Int32, \n `value1_string` String, \n `value2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rename_table_multiple\n(\n `key` Int32,\n `value1_string` String,\n `value2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 key value1_string value2 1 2 3 -CREATE TABLE default.rename_table_multiple\n(\n `key` Int32, \n `value1_string` String, \n `value2_old` Int32, \n `value2` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rename_table_multiple\n(\n `key` Int32,\n `value1_string` String,\n `value2_old` Int32,\n `value2` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 key value1_string value2_old value2 1 2 3 7 4 5 6 7 -CREATE TABLE default.rename_table_multiple\n(\n `key` Int32, \n `value1_string` String, \n `value2_old` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rename_table_multiple\n(\n `key` Int32,\n `value1_string` String,\n `value2_old` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 key value1_string value2_old 1 2 7 4 5 7 7 8 10 -CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32, \n `value1_string` String, \n `value2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32,\n `value1_string` String,\n `value2` Int32\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 key value1_string value2 1 2 3 -CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32, \n `value1_string` String, \n `value2_old` Int32, \n `value2` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32,\n `value1_string` String,\n `value2_old` Int32,\n `value2` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 key value1_string value2_old value2 1 2 3 7 4 5 6 7 -CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32, \n `value1_string` String, \n `value2_old` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.rename_table_multiple_compact\n(\n `key` Int32,\n `value1_string` String,\n `value2_old` Int64 DEFAULT 7\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, index_granularity = 8192 key value1_string value2_old 1 2 7 4 5 7 From 80b765542ff1b97165e5426235d72fb3bcd042d7 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Fri, 12 Jun 2020 18:04:42 +0800 Subject: [PATCH 06/52] ISSUES-7572 support config default http handler --- src/Server/HTTPHandlerFactory.cpp | 72 ++++++++++--------------- src/Server/HTTPHandlerFactory.h | 10 ++++ src/Server/PrometheusRequestHandler.cpp | 14 +++++ src/Server/ReplicasStatusHandler.cpp | 8 +++ src/Server/StaticRequestHandler.cpp | 16 ++++++ 5 files changed, 77 insertions(+), 43 deletions(-) diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 2f00aa0aa72..ec75656a9a8 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -1,9 +1,5 @@ #include "HTTPHandlerFactory.h" -#include -#include -#include -#include #include #include "HTTPHandler.h" @@ -68,7 +64,8 @@ HTTPRequestHandlerFactoryMain::TThis * HTTPRequestHandlerFactoryMain::addHandler return this; } -static inline auto createHandlersFactoryFromConfig(IServer & server, const std::string & name, const String & prefix) +static inline auto createHandlersFactoryFromConfig( + IServer & server, const std::string & name, const String & prefix, AsynchronousMetrics & async_metrics) { auto main_handler_factory = std::make_unique(name); @@ -82,7 +79,17 @@ static inline auto createHandlersFactoryFromConfig(IServer & server, const std:: const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); - if (handler_type == "static") + if (handler_type == "root") + addRootHandlerFactory(*main_handler_factory, server); + else if (handler_type == "ping") + addPingHandlerFactory(*main_handler_factory, server); + else if (handler_type == "defaults") + addDefaultHandlersFactory(*main_handler_factory, server, async_metrics); + else if (handler_type == "prometheus") + addPrometheusHandlerFactory(*main_handler_factory, server, async_metrics); + else if (handler_type == "replicas_status") + addReplicasStatusHandlerFactory(*main_handler_factory, server); + else if (handler_type == "static") main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key)); else if (handler_type == "dynamic_query_handler") main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key)); @@ -99,44 +106,23 @@ static inline auto createHandlersFactoryFromConfig(IServer & server, const std:: return main_handler_factory.release(); } -static const auto ping_response_expression = "Ok.\n"; -static const auto root_response_expression = "config://http_server_default_response"; - static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory( IServer & server, const std::string & name, AsynchronousMetrics & async_metrics) { if (server.config().has("http_handlers")) - return createHandlersFactoryFromConfig(server, name, "http_handlers"); + return createHandlersFactoryFromConfig(server, name, "http_handlers", async_metrics); else { auto factory = std::make_unique(name); - auto root_handler = std::make_unique>(server, root_response_expression); - root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); - factory->addHandler(root_handler.release()); - - auto ping_handler = std::make_unique>(server, ping_response_expression); - ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); - factory->addHandler(ping_handler.release()); - - auto replicas_status_handler = std::make_unique>(server); - replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); - factory->addHandler(replicas_status_handler.release()); + addRootHandlerFactory(*factory, server); + addPingHandlerFactory(*factory, server); + addReplicasStatusHandlerFactory(*factory, server); + addPrometheusHandlerFactory(*factory, server, async_metrics); auto query_handler = std::make_unique>(server, "query"); query_handler->allowPostAndGetParamsRequest(); factory->addHandler(query_handler.release()); - - /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see below. - if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) - { - auto prometheus_handler = std::make_unique>( - server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); - prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); - factory->addHandler(prometheus_handler.release()); - } - return factory.release(); } } @@ -145,17 +131,9 @@ static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandle { auto factory = std::make_unique(name); - auto root_handler = std::make_unique>(server, root_response_expression); - root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); - factory->addHandler(root_handler.release()); - - auto ping_handler = std::make_unique>(server, ping_response_expression); - ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); - factory->addHandler(ping_handler.release()); - - auto replicas_status_handler = std::make_unique>(server); - replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); - factory->addHandler(replicas_status_handler.release()); + addRootHandlerFactory(*factory, server); + addPingHandlerFactory(*factory, server); + addReplicasStatusHandlerFactory(*factory, server); auto main_handler = std::make_unique>(server); main_handler->allowPostAndGetParamsRequest(); @@ -183,4 +161,12 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR); } +void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) +{ + addRootHandlerFactory(factory, server); + addPingHandlerFactory(factory, server); + addReplicasStatusHandlerFactory(factory, server); + addPrometheusHandlerFactory(factory, server, async_metrics); +} + } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index 273e337813e..ac3a7451338 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -103,6 +103,16 @@ private: std::function creator; }; +void addRootHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); + +void addPingHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); + +void addReplicasStatusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); + +void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics); + +void addPrometheusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics); + Poco::Net::HTTPRequestHandlerFactory * createStaticHandlerFactory(IServer & server, const std::string & config_prefix); Poco::Net::HTTPRequestHandlerFactory * createDynamicHandlerFactory(IServer & server, const std::string & config_prefix); diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 43f39e36de8..0f5df54b002 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -12,6 +12,7 @@ #include #include +#include namespace DB @@ -40,4 +41,17 @@ void PrometheusRequestHandler::handleRequest( } } +void addPrometheusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) +{ + /// We check that prometheus handler will be served on current (default) port. + /// Otherwise it will be created separately, see below. + if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) + { + auto prometheus_handler = std::make_unique>( + server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); + prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); + factory.addHandler(prometheus_handler.release()); + } +} + } diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 57c97b0e4e0..9b3e00cc069 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -7,8 +7,10 @@ #include #include +#include #include #include +#include namespace DB @@ -104,5 +106,11 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request } } +void addReplicasStatusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) +{ + auto replicas_status_handler = std::make_unique>(server); + replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); + factory->addHandler(replicas_status_handler.release()); +} } diff --git a/src/Server/StaticRequestHandler.cpp b/src/Server/StaticRequestHandler.cpp index 22f32e6a0e7..255e3cab5af 100644 --- a/src/Server/StaticRequestHandler.cpp +++ b/src/Server/StaticRequestHandler.cpp @@ -155,6 +155,22 @@ StaticRequestHandler::StaticRequestHandler(IServer & server_, const String & exp { } +void addRootHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) +{ + static const auto root_response_expression = "config://http_server_default_response"; + + auto root_handler = std::make_unique>(server, root_response_expression); + root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); + factory.addHandler(root_handler.release()); +} + +void addPingHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) +{ + auto ping_handler = std::make_unique>(server, "Ok.\n"); + ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); + factory.addHandler(ping_handler.release()); +} + Poco::Net::HTTPRequestHandlerFactory * createStaticHandlerFactory(IServer & server, const std::string & config_prefix) { int status = server.config().getInt(config_prefix + ".handler.status", 200); From 1c55aa03334f4c011c5c587468641b873ffcd83d Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Fri, 12 Jun 2020 18:19:03 +0800 Subject: [PATCH 07/52] ISSUES-7572 add integration test --- .../test_http_handlers_config/test.py | 20 +++++++++++++++++++ .../test_custom_defaults_handlers/config.xml | 10 ++++++++++ .../test_defaults_handlers/config.xml | 9 +++++++++ 3 files changed, 39 insertions(+) create mode 100644 tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml create mode 100644 tests/integration/test_http_handlers_config/test_defaults_handlers/config.xml diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index 31d40bd8a1d..a38bd3ff343 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -113,3 +113,23 @@ def test_relative_path_static_handler(): assert 'text/html; charset=UTF-8' == cluster.instance.http_request('test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).headers['Content-Type'] assert 'Relative Path File\n' == cluster.instance.http_request('test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content +def test_defaults_http_handlers(): + with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers")) as cluster: + assert 200 == cluster.instance.http_request('', method='GET').status_code + assert 'Default server response' == cluster.instance.http_request('', method='GET').content + + assert 200 == cluster.instance.http_request('ping', method='GET').status_code + assert 'Ok\n' == cluster.instance.http_request('ping', method='GET').content + + assert 200 == cluster.instance.http_request('replicas_status', method='GET').status_code + assert 'Ok\n' == cluster.instance.http_request('replicas_status', method='GET').content + +def test_custom_defaults_http_handlers(): + with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "custom_defaults_handlers", "test_custom_defaults_handlers")) as cluster: + assert 200 == cluster.instance.http_request('', method='GET').status_code + assert 'Default server response' == cluster.instance.http_request('', method='GET').content + + assert 200 == cluster.instance.http_request('ping', method='GET').status_code + assert 'Ok\n' == cluster.instance.http_request('ping', method='GET').content + + assert 404 == cluster.instance.http_request('replicas_status', method='GET').status_code diff --git a/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml b/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml new file mode 100644 index 00000000000..54008c2c4b8 --- /dev/null +++ b/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml @@ -0,0 +1,10 @@ + + + + Default server response + + + + + + diff --git a/tests/integration/test_http_handlers_config/test_defaults_handlers/config.xml b/tests/integration/test_http_handlers_config/test_defaults_handlers/config.xml new file mode 100644 index 00000000000..fd280e05cf4 --- /dev/null +++ b/tests/integration/test_http_handlers_config/test_defaults_handlers/config.xml @@ -0,0 +1,9 @@ + + + + Default server response + + + + + From 2c439afc015b15d9babb632b423991aeea0f397f Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Fri, 12 Jun 2020 19:17:34 +0800 Subject: [PATCH 08/52] ISSUES-7572 fix build failure --- src/Server/ReplicasStatusHandler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 9b3e00cc069..d6bbfdbd090 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -110,7 +110,7 @@ void addReplicasStatusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IS { auto replicas_status_handler = std::make_unique>(server); replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); - factory->addHandler(replicas_status_handler.release()); + factory.addHandler(replicas_status_handler.release()); } } From 8d9b770da4a74a16c405f90df669bf1d34135fc5 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Sat, 13 Jun 2020 00:15:02 +0800 Subject: [PATCH 09/52] ISSUES-7572 fix defaults config level & add replicas_status and prometheus handler --- src/Server/HTTPHandlerFactory.cpp | 93 +++++++++++-------- src/Server/HTTPHandlerFactory.h | 14 +-- src/Server/PrometheusRequestHandler.cpp | 15 +-- src/Server/ReplicasStatusHandler.cpp | 6 +- src/Server/StaticRequestHandler.cpp | 16 ---- .../test_http_handlers_config/test.py | 26 ++++-- .../test_custom_defaults_handlers/config.xml | 10 -- .../test_prometheus_handler/config.xml | 17 ++++ .../test_replicas_status_handler/config.xml | 12 +++ 9 files changed, 112 insertions(+), 97 deletions(-) delete mode 100644 tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml create mode 100644 tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml create mode 100644 tests/integration/test_http_handlers_config/test_replicas_status_handler/config.xml diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index ec75656a9a8..6459b0aab3b 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -74,55 +74,51 @@ static inline auto createHandlersFactoryFromConfig( for (const auto & key : keys) { - if (!startsWith(key, "rule")) - throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule'", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); + if (key == "defaults") + addDefaultHandlersFactory(*main_handler_factory, server, &async_metrics); + else if (startsWith(key, "rule")) + { + const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); - const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); + if (handler_type.empty()) + throw Exception("Handler type in config is not specified here: " + prefix + "." + key + ".handler.type", + ErrorCodes::INVALID_CONFIG_PARAMETER); - if (handler_type == "root") - addRootHandlerFactory(*main_handler_factory, server); - else if (handler_type == "ping") - addPingHandlerFactory(*main_handler_factory, server); - else if (handler_type == "defaults") - addDefaultHandlersFactory(*main_handler_factory, server, async_metrics); - else if (handler_type == "prometheus") - addPrometheusHandlerFactory(*main_handler_factory, server, async_metrics); - else if (handler_type == "replicas_status") - addReplicasStatusHandlerFactory(*main_handler_factory, server); - else if (handler_type == "static") - main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key)); - else if (handler_type == "dynamic_query_handler") - main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key)); - else if (handler_type == "predefined_query_handler") - main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key)); - else if (handler_type.empty()) - throw Exception("Handler type in config is not specified here: " + - prefix + "." + key + ".handler.type", ErrorCodes::INVALID_CONFIG_PARAMETER); + if (handler_type == "static") + main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key)); + else if (handler_type == "dynamic_query_handler") + main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key)); + else if (handler_type == "predefined_query_handler") + main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key)); + else if (handler_type == "prometheus") + main_handler_factory->addHandler(createPrometheusHandlerFactory(server, async_metrics, prefix + "." + key)); + else if (handler_type == "replicas_status") + main_handler_factory->addHandler(createReplicasStatusHandlerFactory(server, prefix + "." + key)); + else + throw Exception("Unknown handler type '" + handler_type + "' in config here: " + prefix + "." + key + ".handler.type", + ErrorCodes::INVALID_CONFIG_PARAMETER); + } else - throw Exception("Unknown handler type '" + handler_type +"' in config here: " + - prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER); + throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule' or 'defaults'", + ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); } return main_handler_factory.release(); } -static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory( - IServer & server, const std::string & name, AsynchronousMetrics & async_metrics) +static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IServer & server, const std::string & name, AsynchronousMetrics & async_metrics) { if (server.config().has("http_handlers")) return createHandlersFactoryFromConfig(server, name, "http_handlers", async_metrics); else { auto factory = std::make_unique(name); - - addRootHandlerFactory(*factory, server); - addPingHandlerFactory(*factory, server); - addReplicasStatusHandlerFactory(*factory, server); - addPrometheusHandlerFactory(*factory, server, async_metrics); + addDefaultHandlersFactory(*factory, server, &async_metrics); auto query_handler = std::make_unique>(server, "query"); query_handler->allowPostAndGetParamsRequest(); factory->addHandler(query_handler.release()); + return factory.release(); } } @@ -130,10 +126,7 @@ static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory( static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name) { auto factory = std::make_unique(name); - - addRootHandlerFactory(*factory, server); - addPingHandlerFactory(*factory, server); - addReplicasStatusHandlerFactory(*factory, server); + addDefaultHandlersFactory(*factory, server, nullptr); auto main_handler = std::make_unique>(server); main_handler->allowPostAndGetParamsRequest(); @@ -161,12 +154,32 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR); } -void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) +static const auto ping_response_expression = "Ok.\n"; +static const auto root_response_expression = "config://http_server_default_response"; + +void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics * async_metrics) { - addRootHandlerFactory(factory, server); - addPingHandlerFactory(factory, server); - addReplicasStatusHandlerFactory(factory, server); - addPrometheusHandlerFactory(factory, server, async_metrics); + auto root_handler = std::make_unique>(server, root_response_expression); + root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); + factory.addHandler(root_handler.release()); + + auto ping_handler = std::make_unique>(server, ping_response_expression); + ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); + factory.addHandler(ping_handler.release()); + + auto replicas_status_handler = std::make_unique>(server); + replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); + factory.addHandler(replicas_status_handler.release()); + + /// We check that prometheus handler will be served on current (default) port. + /// Otherwise it will be created separately, see below. + if (async_metrics && server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) + { + auto prometheus_handler = std::make_unique>( + server, PrometheusMetricsWriter(server.config(), "prometheus", *async_metrics)); + prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); + factory.addHandler(prometheus_handler.release()); + } } } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index ac3a7451338..8e21a13ba18 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -103,15 +103,7 @@ private: std::function creator; }; -void addRootHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); - -void addPingHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); - -void addReplicasStatusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); - -void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics); - -void addPrometheusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics); +void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics * async_metrics); Poco::Net::HTTPRequestHandlerFactory * createStaticHandlerFactory(IServer & server, const std::string & config_prefix); @@ -119,6 +111,10 @@ Poco::Net::HTTPRequestHandlerFactory * createDynamicHandlerFactory(IServer & ser Poco::Net::HTTPRequestHandlerFactory * createPredefinedHandlerFactory(IServer & server, const std::string & config_prefix); +Poco::Net::HTTPRequestHandlerFactory * createReplicasStatusHandlerFactory(IServer & server, const std::string & config_prefix); + +Poco::Net::HTTPRequestHandlerFactory * createPrometheusHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & config_prefix); + Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & name); } diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index 0f5df54b002..60deec9b289 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -12,7 +12,7 @@ #include #include -#include +#include namespace DB @@ -41,17 +41,10 @@ void PrometheusRequestHandler::handleRequest( } } -void addPrometheusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) +Poco::Net::HTTPRequestHandlerFactory * createPrometheusHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & config_prefix) { - /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see below. - if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) - { - auto prometheus_handler = std::make_unique>( - server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); - prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); - factory.addHandler(prometheus_handler.release()); - } + return addFiltersFromConfig(new HandlingRuleHTTPHandlerFactory( + server, PrometheusMetricsWriter(server.config(), config_prefix + ".handler", async_metrics)), server.config(), config_prefix); } } diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index d6bbfdbd090..3606da23ab5 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -106,11 +106,9 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request } } -void addReplicasStatusHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) +Poco::Net::HTTPRequestHandlerFactory * createReplicasStatusHandlerFactory(IServer & server, const std::string & config_prefix) { - auto replicas_status_handler = std::make_unique>(server); - replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); - factory.addHandler(replicas_status_handler.release()); + return addFiltersFromConfig(new HandlingRuleHTTPHandlerFactory(server), server.config(), config_prefix); } } diff --git a/src/Server/StaticRequestHandler.cpp b/src/Server/StaticRequestHandler.cpp index 255e3cab5af..22f32e6a0e7 100644 --- a/src/Server/StaticRequestHandler.cpp +++ b/src/Server/StaticRequestHandler.cpp @@ -155,22 +155,6 @@ StaticRequestHandler::StaticRequestHandler(IServer & server_, const String & exp { } -void addRootHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) -{ - static const auto root_response_expression = "config://http_server_default_response"; - - auto root_handler = std::make_unique>(server, root_response_expression); - root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); - factory.addHandler(root_handler.release()); -} - -void addPingHandlerFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) -{ - auto ping_handler = std::make_unique>(server, "Ok.\n"); - ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); - factory.addHandler(ping_handler.release()); -} - Poco::Net::HTTPRequestHandlerFactory * createStaticHandlerFactory(IServer & server, const std::string & config_prefix) { int status = server.config().getInt(config_prefix + ".handler.status", 200); diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index a38bd3ff343..c18c22acbb2 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -124,12 +124,24 @@ def test_defaults_http_handlers(): assert 200 == cluster.instance.http_request('replicas_status', method='GET').status_code assert 'Ok\n' == cluster.instance.http_request('replicas_status', method='GET').content -def test_custom_defaults_http_handlers(): - with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "custom_defaults_handlers", "test_custom_defaults_handlers")) as cluster: - assert 200 == cluster.instance.http_request('', method='GET').status_code - assert 'Default server response' == cluster.instance.http_request('', method='GET').content +def test_prometheus_handler(): + with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "prometheus_handler", "test_prometheus_handler")) as cluster: + assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code - assert 200 == cluster.instance.http_request('ping', method='GET').status_code - assert 'Ok\n' == cluster.instance.http_request('ping', method='GET').content + assert 404 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'bad'}).status_code - assert 404 == cluster.instance.http_request('replicas_status', method='GET').status_code + assert 404 == cluster.instance.http_request('test_prometheus', method='POST', headers={'XXX': 'xxx'}).status_code + + assert 200 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).status_code + assert 'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).content + +def test_replicas_status_handler(): + with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "replicas_status_handler", "test_replicas_status_handler")) as cluster: + assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + + assert 404 == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'bad'}).status_code + + assert 404 == cluster.instance.http_request('test_replicas_status', method='POST', headers={'XXX': 'xxx'}).status_code + + assert 200 == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'xxx'}).status_code + assert 'Ok\n' == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'xxx'}).content diff --git a/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml b/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml deleted file mode 100644 index 54008c2c4b8..00000000000 --- a/tests/integration/test_http_handlers_config/test_custom_defaults_handlers/config.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - Default server response - - - - - - diff --git a/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml b/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml new file mode 100644 index 00000000000..7c80649cee2 --- /dev/null +++ b/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml @@ -0,0 +1,17 @@ + + + + + + GET + xxx + /test_prometheus + + replicas_status + true + true + true + + + + diff --git a/tests/integration/test_http_handlers_config/test_replicas_status_handler/config.xml b/tests/integration/test_http_handlers_config/test_replicas_status_handler/config.xml new file mode 100644 index 00000000000..21f7d3b0fc8 --- /dev/null +++ b/tests/integration/test_http_handlers_config/test_replicas_status_handler/config.xml @@ -0,0 +1,12 @@ + + + + + + GET + xxx + /test_replicas_status + replicas_status + + + From 7f52b615e061d6cbd3c493bc599913541875e397 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 12 Jun 2020 21:17:06 +0300 Subject: [PATCH 10/52] Fix bloom filters for String (data skipping indices) bloom filter was broken for the first element, if all of the following conditions satisfied: - they are created on INSERT (in thie case bloom filter hashing uses offsets, in case of OPTIMIZE it does not, since it already has granulars). - the record is not the first in the block - the record is the first per index_granularity (do not confuse this with data skipping index GRANULARITY). - type of the field for indexing is "String" (not FixedString) Because in this case there was incorrect length and *data* for that string. --- src/Interpreters/BloomFilterHash.h | 7 +++---- .../0_stateless/01307_data_skip_bloom_filter.reference | 4 ++++ .../queries/0_stateless/01307_data_skip_bloom_filter.sql | 8 ++++++++ 3 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/01307_data_skip_bloom_filter.reference create mode 100644 tests/queries/0_stateless/01307_data_skip_bloom_filter.sql diff --git a/src/Interpreters/BloomFilterHash.h b/src/Interpreters/BloomFilterHash.h index e7411433781..43f5d7b5e87 100644 --- a/src/Interpreters/BloomFilterHash.h +++ b/src/Interpreters/BloomFilterHash.h @@ -196,18 +196,17 @@ struct BloomFilterHash const ColumnString::Chars & data = index_column->getChars(); const ColumnString::Offsets & offsets = index_column->getOffsets(); - ColumnString::Offset current_offset = pos; for (size_t index = 0, size = vec.size(); index < size; ++index) { + ColumnString::Offset current_offset = offsets[index + pos - 1]; + size_t length = offsets[index + pos] - current_offset - 1 /* terminating zero */; UInt64 city_hash = CityHash_v1_0_2::CityHash64( - reinterpret_cast(&data[current_offset]), offsets[index + pos] - current_offset - 1); + reinterpret_cast(&data[current_offset]), length); if constexpr (is_first) vec[index] = city_hash; else vec[index] = CityHash_v1_0_2::Hash128to64(CityHash_v1_0_2::uint128(vec[index], city_hash)); - - current_offset = offsets[index + pos]; } } else if (const auto * fixed_string_index_column = typeid_cast(column)) diff --git a/tests/queries/0_stateless/01307_data_skip_bloom_filter.reference b/tests/queries/0_stateless/01307_data_skip_bloom_filter.reference new file mode 100644 index 00000000000..98fb6a68656 --- /dev/null +++ b/tests/queries/0_stateless/01307_data_skip_bloom_filter.reference @@ -0,0 +1,4 @@ +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/01307_data_skip_bloom_filter.sql b/tests/queries/0_stateless/01307_data_skip_bloom_filter.sql new file mode 100644 index 00000000000..832f7140af2 --- /dev/null +++ b/tests/queries/0_stateless/01307_data_skip_bloom_filter.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS test_01307; +CREATE TABLE test_01307 (id UInt64, val String, INDEX ind val TYPE bloom_filter() GRANULARITY 1) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 2; +INSERT INTO test_01307 (id, val) select number as id, toString(number) as val from numbers(4); +SELECT count() FROM test_01307 WHERE identity(val) = '2'; +SELECT count() FROM test_01307 WHERE val = '2'; +OPTIMIZE TABLE test_01307 FINAL; +SELECT count() FROM test_01307 WHERE identity(val) = '2'; +SELECT count() FROM test_01307 WHERE val = '2'; From 0b1ff4f9cce72ab24e6b9bb909874f81789423ba Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 13 Jun 2020 00:48:10 +0300 Subject: [PATCH 11/52] Update max_rows_to_read in 00945_bloom_filter_index test The expected values was incorrect, since for strings we have 1 and 10 and there will be at least two index granulas, hence 12 rows. --- tests/queries/0_stateless/00945_bloom_filter_index.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/00945_bloom_filter_index.sql b/tests/queries/0_stateless/00945_bloom_filter_index.sql index 6f93ae89a42..d509b99229a 100755 --- a/tests/queries/0_stateless/00945_bloom_filter_index.sql +++ b/tests/queries/0_stateless/00945_bloom_filter_index.sql @@ -43,7 +43,7 @@ SELECT COUNT() FROM bloom_filter_types_test WHERE f32 = 1 SETTINGS max_rows_to_r SELECT COUNT() FROM bloom_filter_types_test WHERE f64 = 1 SETTINGS max_rows_to_read = 6; SELECT COUNT() FROM bloom_filter_types_test WHERE date = '1970-01-02' SETTINGS max_rows_to_read = 6; SELECT COUNT() FROM bloom_filter_types_test WHERE date_time = toDateTime('1970-01-01 03:00:01', 'Europe/Moscow') SETTINGS max_rows_to_read = 6; -SELECT COUNT() FROM bloom_filter_types_test WHERE str = '1' SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_types_test WHERE str IN ( SELECT str FROM bloom_filter_types_test); @@ -122,7 +122,7 @@ SELECT COUNT() FROM bloom_filter_null_types_test WHERE f32 = 1 SETTINGS max_rows SELECT COUNT() FROM bloom_filter_null_types_test WHERE f64 = 1 SETTINGS max_rows_to_read = 6; SELECT COUNT() FROM bloom_filter_null_types_test WHERE date = '1970-01-02' SETTINGS max_rows_to_read = 6; SELECT COUNT() FROM bloom_filter_null_types_test WHERE date_time = toDateTime('1970-01-01 03:00:01', 'Europe/Moscow') SETTINGS max_rows_to_read = 6; -SELECT COUNT() FROM bloom_filter_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_null_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(i8); @@ -150,7 +150,7 @@ CREATE TABLE bloom_filter_lc_null_types_test (order_key UInt64, str LowCardinali INSERT INTO bloom_filter_lc_null_types_test SELECT number AS order_key, toString(number) AS str, toFixedString(toString(number), 5) AS fixed_string FROM system.numbers LIMIT 100; INSERT INTO bloom_filter_lc_null_types_test SELECT 0 AS order_key, NULL AS str, NULL AS fixed_string; -SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE isNull(str); From de96296e019887f925c998bef97e42bafaa964e8 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Sat, 13 Jun 2020 10:17:02 +0800 Subject: [PATCH 12/52] ISSUES-7572 fix build failure --- src/Server/ReplicasStatusHandler.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index 3606da23ab5..5ead756ee1e 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace DB From 901a657417beb4d262a86c61b6935210eb9e7893 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 13 Jun 2020 14:20:24 +0300 Subject: [PATCH 13/52] Rename 01307_data_skip_bloom_filter to 01307_bloom_filter_index_string_multi_granulas This better reflects the covered case. --- ...e => 01307_bloom_filter_index_string_multi_granulas.reference} | 0 ...ter.sql => 01307_bloom_filter_index_string_multi_granulas.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{01307_data_skip_bloom_filter.reference => 01307_bloom_filter_index_string_multi_granulas.reference} (100%) rename tests/queries/0_stateless/{01307_data_skip_bloom_filter.sql => 01307_bloom_filter_index_string_multi_granulas.sql} (100%) diff --git a/tests/queries/0_stateless/01307_data_skip_bloom_filter.reference b/tests/queries/0_stateless/01307_bloom_filter_index_string_multi_granulas.reference similarity index 100% rename from tests/queries/0_stateless/01307_data_skip_bloom_filter.reference rename to tests/queries/0_stateless/01307_bloom_filter_index_string_multi_granulas.reference diff --git a/tests/queries/0_stateless/01307_data_skip_bloom_filter.sql b/tests/queries/0_stateless/01307_bloom_filter_index_string_multi_granulas.sql similarity index 100% rename from tests/queries/0_stateless/01307_data_skip_bloom_filter.sql rename to tests/queries/0_stateless/01307_bloom_filter_index_string_multi_granulas.sql From 713f8f0b2246be3381f2e7d1967b10173e24523c Mon Sep 17 00:00:00 2001 From: Volodymyr Kuznetsov Date: Sat, 13 Jun 2020 17:21:33 -0700 Subject: [PATCH 14/52] Added groupArrayArray and groupUniqArrayArray to SimpleAggregateFunction --- .../DataTypeCustomSimpleAggregateFunction.cpp | 2 +- .../00915_simple_aggregate_function.reference | 6 +++--- .../00915_simple_aggregate_function.sql | 14 ++++++++------ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp index 2ddce184cce..8b31a93dfe9 100644 --- a/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp +++ b/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp @@ -30,7 +30,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -static const std::vector supported_functions{"any", "anyLast", "min", "max", "sum", "groupBitAnd", "groupBitOr", "groupBitXor", "sumMap"}; +static const std::vector supported_functions{"any", "anyLast", "min", "max", "sum", "groupBitAnd", "groupBitOr", "groupBitXor", "sumMap", "groupArrayArray", "groupUniqArrayArray"}; String DataTypeCustomSimpleAggregateFunction::getName() const diff --git a/tests/queries/0_stateless/00915_simple_aggregate_function.reference b/tests/queries/0_stateless/00915_simple_aggregate_function.reference index d9e0a92cb01..771c19f2227 100644 --- a/tests/queries/0_stateless/00915_simple_aggregate_function.reference +++ b/tests/queries/0_stateless/00915_simple_aggregate_function.reference @@ -39,6 +39,6 @@ SimpleAggregateFunction(sum, Float64) 7 14 8 16 9 18 -1 1 2 2.2.2.2 3 ([1,2,3],[2,1,1]) -10 2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222 20 20.20.20.20 5 ([2,3,4],[2,1,1]) -SimpleAggregateFunction(anyLast, Nullable(String)) SimpleAggregateFunction(anyLast, LowCardinality(Nullable(String))) SimpleAggregateFunction(anyLast, IPv4) SimpleAggregateFunction(groupBitOr, UInt32) SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))) +1 1 2 2.2.2.2 3 ([1,2,3],[2,1,1]) [1,2,2,3,4] [4,2,1,3] +10 2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222 20 20.20.20.20 5 ([2,3,4],[2,1,1]) [] [] +SimpleAggregateFunction(anyLast, Nullable(String)) SimpleAggregateFunction(anyLast, LowCardinality(Nullable(String))) SimpleAggregateFunction(anyLast, IPv4) SimpleAggregateFunction(groupBitOr, UInt32) SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))) SimpleAggregateFunction(groupArrayArray, Array(Int32)) SimpleAggregateFunction(groupUniqArrayArray, Array(Int32)) diff --git a/tests/queries/0_stateless/00915_simple_aggregate_function.sql b/tests/queries/0_stateless/00915_simple_aggregate_function.sql index ba4935a6518..8cf0e032702 100644 --- a/tests/queries/0_stateless/00915_simple_aggregate_function.sql +++ b/tests/queries/0_stateless/00915_simple_aggregate_function.sql @@ -25,16 +25,18 @@ create table simple ( low_str SimpleAggregateFunction(anyLast,LowCardinality(Nullable(String))), ip SimpleAggregateFunction(anyLast,IPv4), status SimpleAggregateFunction(groupBitOr, UInt32), - tup SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))) + tup SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))), + arr SimpleAggregateFunction(groupArrayArray, Array(Int32)), + uniq_arr SimpleAggregateFunction(groupUniqArrayArray, Array(Int32)) ) engine=AggregatingMergeTree order by id; -insert into simple values(1,'1','1','1.1.1.1', 1, ([1,2], [1,1])); -insert into simple values(1,null,'2','2.2.2.2', 2, ([1,3], [1,1])); +insert into simple values(1,'1','1','1.1.1.1', 1, ([1,2], [1,1]), [1,2], [1,2]); +insert into simple values(1,null,'2','2.2.2.2', 2, ([1,3], [1,1]), [2,3,4], [2,3,4]); -- String longer then MAX_SMALL_STRING_SIZE (actual string length is 100) -insert into simple values(10,'10','10','10.10.10.10', 4, ([2,3], [1,1])); -insert into simple values(10,'2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222','20','20.20.20.20', 1, ([2, 4], [1,1])); +insert into simple values(10,'10','10','10.10.10.10', 4, ([2,3], [1,1]), [], []); +insert into simple values(10,'2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222','20','20.20.20.20', 1, ([2, 4], [1,1]), [], []); select * from simple final order by id; -select toTypeName(nullable_str),toTypeName(low_str),toTypeName(ip),toTypeName(status), toTypeName(tup) from simple limit 1; +select toTypeName(nullable_str),toTypeName(low_str),toTypeName(ip),toTypeName(status), toTypeName(tup), toTypeName(arr), toTypeName(uniq_arr) from simple limit 1; optimize table simple final; From e1317ef8ae6acc7ac40c3b6985bbd97828880dd2 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Sun, 14 Jun 2020 09:44:05 +0800 Subject: [PATCH 15/52] ISSUES-7572 fix test failure --- tests/integration/test_http_handlers_config/test.py | 6 +++--- .../test_prometheus_handler/config.xml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index c18c22acbb2..b31913ba962 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -119,10 +119,10 @@ def test_defaults_http_handlers(): assert 'Default server response' == cluster.instance.http_request('', method='GET').content assert 200 == cluster.instance.http_request('ping', method='GET').status_code - assert 'Ok\n' == cluster.instance.http_request('ping', method='GET').content + assert 'Ok.\n' == cluster.instance.http_request('ping', method='GET').content assert 200 == cluster.instance.http_request('replicas_status', method='GET').status_code - assert 'Ok\n' == cluster.instance.http_request('replicas_status', method='GET').content + assert 'Ok.\n' == cluster.instance.http_request('replicas_status', method='GET').content def test_prometheus_handler(): with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "prometheus_handler", "test_prometheus_handler")) as cluster: @@ -144,4 +144,4 @@ def test_replicas_status_handler(): assert 404 == cluster.instance.http_request('test_replicas_status', method='POST', headers={'XXX': 'xxx'}).status_code assert 200 == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'xxx'}).status_code - assert 'Ok\n' == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'xxx'}).content + assert 'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET', headers={'XXX': 'xxx'}).content diff --git a/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml b/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml index 7c80649cee2..8ace97a66dc 100644 --- a/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml +++ b/tests/integration/test_http_handlers_config/test_prometheus_handler/config.xml @@ -7,7 +7,7 @@ xxx /test_prometheus - replicas_status + prometheus true true true From ceaaf67d3fb6ae409a3515eac4ab0d63ae22303d Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Fri, 5 Jun 2020 23:44:10 +0300 Subject: [PATCH 16/52] Fix parsing CREATE SETTINGS PROFILE with WRITABLE keyword. --- src/Parsers/ParserSettingsProfileElement.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Parsers/ParserSettingsProfileElement.cpp b/src/Parsers/ParserSettingsProfileElement.cpp index 37044e8ccbe..1dccae50cf5 100644 --- a/src/Parsers/ParserSettingsProfileElement.cpp +++ b/src/Parsers/ParserSettingsProfileElement.cpp @@ -87,7 +87,7 @@ namespace readonly = true; return true; } - else if (ParserKeyword{"READONLY"}.ignore(pos, expected)) + else if (ParserKeyword{"WRITABLE"}.ignore(pos, expected)) { readonly = false; return true; From ca2fb5932126175fed213a6f040fd34ff7b2d908 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Sat, 6 Jun 2020 05:25:27 +0300 Subject: [PATCH 17/52] Fix calculating full names of row policies. --- src/Access/RowPolicy.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Access/RowPolicy.cpp b/src/Access/RowPolicy.cpp index 4249f351eae..acacaf01c6c 100644 --- a/src/Access/RowPolicy.cpp +++ b/src/Access/RowPolicy.cpp @@ -17,7 +17,7 @@ String RowPolicy::NameParts::getName() const name.reserve(database.length() + table_name.length() + short_name.length() + 6); name += backQuoteIfNeed(short_name); name += " ON "; - if (!name.empty()) + if (!database.empty()) { name += backQuoteIfNeed(database); name += '.'; From 3ffcb8e790434245cfeea7aceb9dbd8daf6a003b Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 15 Jun 2020 00:00:57 +0300 Subject: [PATCH 18/52] Fix casting values of settings while reading profiles from users.xml. --- src/Access/UsersConfigAccessStorage.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp index f5f48a2390e..4d7d1b4cdfe 100644 --- a/src/Access/UsersConfigAccessStorage.cpp +++ b/src/Access/UsersConfigAccessStorage.cpp @@ -353,16 +353,17 @@ namespace for (const String & name : names) { SettingsProfileElement profile_element; - profile_element.setting_index = Settings::findIndexStrict(name); + size_t setting_index = Settings::findIndexStrict(name); + profile_element.setting_index = setting_index; Poco::Util::AbstractConfiguration::Keys constraint_types; String path_to_name = path_to_constraints + "." + name; config.keys(path_to_name, constraint_types); for (const String & constraint_type : constraint_types) { if (constraint_type == "min") - profile_element.min_value = config.getString(path_to_name + "." + constraint_type); + profile_element.min_value = Settings::valueToCorrespondingType(setting_index, config.getString(path_to_name + "." + constraint_type)); else if (constraint_type == "max") - profile_element.max_value = config.getString(path_to_name + "." + constraint_type); + profile_element.max_value = Settings::valueToCorrespondingType(setting_index, config.getString(path_to_name + "." + constraint_type)); else if (constraint_type == "readonly") profile_element.readonly = true; else @@ -402,8 +403,9 @@ namespace } SettingsProfileElement profile_element; - profile_element.setting_index = Settings::findIndexStrict(key); - profile_element.value = config.getString(profile_config + "." + key); + size_t setting_index = Settings::findIndexStrict(key); + profile_element.setting_index = setting_index; + profile_element.value = Settings::valueToCorrespondingType(setting_index, config.getString(profile_config + "." + key)); profile->elements.emplace_back(std::move(profile_element)); } From 0c1b2d48a30d4006271638de056070ac9cc5a4ee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 06:58:07 +0300 Subject: [PATCH 19/52] Update test --- .../01249_bad_arguments_for_bloom_filter.reference | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference index 70d176d9b7a..e3f4955d4cf 100644 --- a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference +++ b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.reference @@ -1,3 +1,3 @@ -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(0., 1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(-0.1) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64, \n `i32` Int32, \n `f64` Float64, \n `d` Decimal(10, 2), \n `s` String, \n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3), \n `dt` Date, \n INDEX bloom_filter_a i32 TYPE bloom_filter(1.01) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(0., 1.) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(-0.1) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.bloom_filter_idx_good\n(\n `u64` UInt64,\n `i32` Int32,\n `f64` Float64,\n `d` Decimal(10, 2),\n `s` String,\n `e` Enum8(\'a\' = 1, \'b\' = 2, \'c\' = 3),\n `dt` Date,\n INDEX bloom_filter_a i32 TYPE bloom_filter(1.01) GRANULARITY 1\n)\nENGINE = MergeTree()\nORDER BY u64\nSETTINGS index_granularity = 8192 From 1c5c2f8c690a714e4f53b7809813364989789ef9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:08:20 +0300 Subject: [PATCH 20/52] Fix formatting of CREATE DICTIONARY --- src/Parsers/ASTDictionaryAttributeDeclaration.cpp | 3 --- src/Parsers/ParserCreateQuery.cpp | 1 - 2 files changed, 4 deletions(-) diff --git a/src/Parsers/ASTDictionaryAttributeDeclaration.cpp b/src/Parsers/ASTDictionaryAttributeDeclaration.cpp index 2b056cb3743..05ba48ace7b 100644 --- a/src/Parsers/ASTDictionaryAttributeDeclaration.cpp +++ b/src/Parsers/ASTDictionaryAttributeDeclaration.cpp @@ -34,9 +34,6 @@ void ASTDictionaryAttributeDeclaration::formatImpl(const FormatSettings & settin { frame.need_parens = false; - if (!settings.one_line) - settings.ostr << settings.nl_or_ws << std::string(4 * frame.indent, ' '); - settings.ostr << backQuote(name); if (type) diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index c54033bd27d..f8c137fb679 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -796,7 +796,6 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E ParserDictionaryAttributeDeclarationList attributes_p; ParserDictionary dictionary_p; - bool if_not_exists = false; ASTPtr database; From e9eb722d4ac539539fd8a1b803b3b203fa42d91b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:27:33 +0300 Subject: [PATCH 21/52] Better formatting of CREATE queries --- src/Parsers/ASTCreateQuery.cpp | 1 + src/Parsers/ASTExpressionList.cpp | 6 ++++-- src/Parsers/IAST.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index f7481ac3c09..fb6bbaeafb0 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -197,6 +197,7 @@ ASTPtr ASTCreateQuery::clone() const void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { frame.need_parens = false; + frame.expression_list_always_start_on_new_line = true; if (!database.empty() && table.empty()) { diff --git a/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp index 1395d8b15fe..abab1e895cf 100644 --- a/src/Parsers/ASTExpressionList.cpp +++ b/src/Parsers/ASTExpressionList.cpp @@ -39,10 +39,12 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For settings.ostr << separator; } - if (children.size() > 1) + if (children.size() > 1 || frame.expression_list_always_start_on_new_line) settings.ostr << indent_str; - (*it)->formatImpl(settings, state, frame); + FormatStateStacked frame_nested = frame; + frame_nested.expression_list_always_start_on_new_line = false; + (*it)->formatImpl(settings, state, frame_nested); } } diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index 88dedc54d3f..c0c286ac0d2 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -202,6 +202,7 @@ public: { UInt8 indent = 0; bool need_parens = false; + bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element. const IAST * current_select = nullptr; }; From e2607f005c334ee8e35b107308b016bd98db7412 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:36:20 +0300 Subject: [PATCH 22/52] Fix error with ALTER CONSTRAINT formatting; added a test --- src/Parsers/ASTAlterQuery.cpp | 2 +- .../01318_alter_add_constraint_format.reference | 1 + .../0_stateless/01318_alter_add_constraint_format.sh | 8 ++++++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01318_alter_add_constraint_format.reference create mode 100755 tests/queries/0_stateless/01318_alter_add_constraint_format.sh diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index f323f66be17..1309037ec01 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -146,7 +146,7 @@ void ASTAlterCommand::formatImpl( } else if (type == ASTAlterCommand::ADD_CONSTRAINT) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "ADD CONSTRAINT" << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "ADD CONSTRAINT " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : ""); constraint_decl->formatImpl(settings, state, frame); } else if (type == ASTAlterCommand::DROP_CONSTRAINT) diff --git a/tests/queries/0_stateless/01318_alter_add_constraint_format.reference b/tests/queries/0_stateless/01318_alter_add_constraint_format.reference new file mode 100644 index 00000000000..4283da7b3af --- /dev/null +++ b/tests/queries/0_stateless/01318_alter_add_constraint_format.reference @@ -0,0 +1 @@ +ALTER TABLE replicated_constraints1 ADD CONSTRAINT IF NOT EXISTS b_constraint CHECK b > 10 diff --git a/tests/queries/0_stateless/01318_alter_add_constraint_format.sh b/tests/queries/0_stateless/01318_alter_add_constraint_format.sh new file mode 100755 index 00000000000..f8eb655a766 --- /dev/null +++ b/tests/queries/0_stateless/01318_alter_add_constraint_format.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +set -e + +$CLICKHOUSE_FORMAT --oneline <<<"ALTER TABLE replicated_constraints1 ADD CONSTRAINT IF NOT EXISTS b_constraint CHECK b > 10" From d5e3e7ff761d9fa56b5b6ad75dd81516e45a043f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:36:55 +0300 Subject: [PATCH 23/52] Update tests --- .../queries/0_stateless/01018_ddl_dictionaries_create.reference | 2 +- .../01110_dictionary_layout_without_arguments.reference | 2 +- .../0_stateless/01224_no_superfluous_dict_reload.reference | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference index ad16e8ae7f2..7c2eca9cedf 100644 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_create.reference @@ -1,5 +1,5 @@ =DICTIONARY in Ordinary DB -CREATE DICTIONARY ordinary_db.dict1\n(\n `key_column` UInt64 DEFAULT 0, \n `second_column` UInt8 DEFAULT 1, \n `third_column` String DEFAULT \'qqq\'\n)\nPRIMARY KEY key_column\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(FLAT()) +CREATE DICTIONARY ordinary_db.dict1\n(\n `key_column` UInt64 DEFAULT 0,\n `second_column` UInt8 DEFAULT 1,\n `third_column` String DEFAULT \'qqq\'\n)\nPRIMARY KEY key_column\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' PASSWORD \'\' DB \'database_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(FLAT()) dict1 1 ordinary_db dict1 diff --git a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference index 852abeea187..69018bef2ef 100644 --- a/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference +++ b/tests/queries/0_stateless/01110_dictionary_layout_without_arguments.reference @@ -1,3 +1,3 @@ World -CREATE DICTIONARY db_for_dict.dict_with_hashed_layout\n(\n `key1` UInt64, \n `value` String\n)\nPRIMARY KEY key1\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(HASHED) +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout\n(\n `key1` UInt64,\n `value` String\n)\nPRIMARY KEY key1\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' TABLE \'table_for_dict\' DB \'db_for_dict\'))\nLIFETIME(MIN 1 MAX 10)\nLAYOUT(HASHED) Hello diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference index 96d4393e06b..d80501b3f4d 100644 --- a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.reference @@ -2,7 +2,7 @@ NOT_LOADED NOT_LOADED CREATE DICTIONARY dict_db_01224.dict ( - `key` UInt64 DEFAULT 0, + `key` UInt64 DEFAULT 0, `val` UInt64 DEFAULT 10 ) PRIMARY KEY key From 94d55abfd13d0a3c0e4299f96e80fabb75b35a01 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:40:03 +0300 Subject: [PATCH 24/52] Update tests --- src/Parsers/ASTCreateQuery.cpp | 5 ++++- .../0_stateless/00933_ttl_replicated_zookeeper.reference | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index fb6bbaeafb0..201e2e45528 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -197,7 +197,6 @@ ASTPtr ASTCreateQuery::clone() const void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { frame.need_parens = false; - frame.expression_list_always_start_on_new_line = true; if (!database.empty() && table.empty()) { @@ -271,6 +270,8 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat << (!as_database.empty() ? backQuoteIfNeed(as_database) + "." : "") << backQuoteIfNeed(as_table); } + frame.expression_list_always_start_on_new_line = true; + if (columns_list) { settings.ostr << (settings.one_line ? " (" : "\n("); @@ -290,6 +291,8 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat settings.ostr << (settings.one_line ? ")" : "\n)"); } + frame.expression_list_always_start_on_new_line = false; + if (storage) storage->formatImpl(settings, state, frame); diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference index 629fbf2a4a3..c727c24707d 100644 --- a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference @@ -1,3 +1,3 @@ 200 400 -CREATE TABLE test.ttl_repl2\n(\n `d` Date, \n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 +CREATE TABLE test.ttl_repl2\n(\n `d` Date,\n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 From e99c6d9143163bcc5104d391d620e18b4aaf83a0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 07:42:29 +0300 Subject: [PATCH 25/52] Update tests --- ...cated_merge_tree_alter_zookeeper.reference | 48 +++++++++---------- ...om_compression_codecs_replicated.reference | 2 +- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference index fa5e65d2d60..ac0e0d557cb 100644 --- a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference +++ b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference @@ -1,22 +1,22 @@ d Date k UInt64 i32 Int32 -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 10 42 d Date k UInt64 i32 Int32 dt DateTime -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 9 41 1992-01-01 08:00:00 2015-01-01 10 42 0000-00-00 00:00:00 d Date @@ -25,14 +25,14 @@ i32 Int32 dt DateTime n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] 2015-01-01 10 42 0000-00-00 00:00:00 [] [] @@ -43,7 +43,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -51,7 +51,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] @@ -64,7 +64,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -73,7 +73,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `n.d` Array(Date), \n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00'] 0 @@ -86,7 +86,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -94,7 +94,7 @@ dt DateTime n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 @@ -108,7 +108,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -117,7 +117,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.ui8` Array(UInt8), \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\', \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00'] @@ -129,14 +129,14 @@ i32 Int32 dt DateTime n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `n.s` Array(String), \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 @@ -147,13 +147,13 @@ k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -166,7 +166,7 @@ dt DateTime s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -174,7 +174,7 @@ dt DateTime s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\', \n `n.s` Array(String), \n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] 2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] 2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] @@ -185,13 +185,13 @@ k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime s UInt32 DEFAULT \'0\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` DateTime, \n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime,\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -202,13 +202,13 @@ k UInt64 i32 Int32 dt Date s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter1\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) +CREATE TABLE test.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt Date s DateTime DEFAULT \'0000-00-00 00:00:00\' -CREATE TABLE test.replicated_alter2\n(\n `d` Date, \n `k` UInt64, \n `i32` Int32, \n `dt` Date, \n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) +CREATE TABLE test.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime DEFAULT \'0000-00-00 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00 2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00 2015-01-01 8 40 2012-12-12 0000-00-00 00:00:00 diff --git a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference index ee481c88d89..62cea01089a 100644 --- a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference +++ b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference @@ -20,7 +20,7 @@ 274972506.6 9175437371954010821 9175437371954010821 -CREATE TABLE test.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)), \n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)), \n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)), \n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE test.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] ! From 65a8fe7cf053b9209c591c249971c8a8b9e4a102 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 09:14:58 +0300 Subject: [PATCH 26/52] Update tests --- .../00725_ipv4_ipv6_domains.reference | 4 +-- .../01069_database_memory.reference | 2 +- ..._expressions_in_engine_arguments.reference | 12 +++---- .../01272_suspicious_codecs.reference | 32 +++++++++---------- .../01297_alter_distributed.reference | 4 +-- .../0_stateless/01298_alter_merge.reference | 4 +-- 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference index 69804e6cd24..28051d15f65 100644 --- a/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference +++ b/tests/queries/0_stateless/00725_ipv4_ipv6_domains.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.ipv4_test\n(`ipv4_` IPv4\n)\nENGINE = Memory +CREATE TABLE default.ipv4_test\n(\n `ipv4_` IPv4\n)\nENGINE = Memory 0.0.0.0 00 8.8.8.8 08080808 127.0.0.1 7F000001 @@ -10,7 +10,7 @@ CREATE TABLE default.ipv4_test\n(`ipv4_` IPv4\n)\nENGINE = Memory > 127.0.0.1 255.255.255.255 = 127.0.0.1 127.0.0.1 euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function: 1 -CREATE TABLE default.ipv6_test\n(`ipv6_` IPv6\n)\nENGINE = Memory +CREATE TABLE default.ipv6_test\n(\n `ipv6_` IPv6\n)\nENGINE = Memory :: 00000000000000000000000000000000 :: 00000000000000000000000000000000 ::ffff:8.8.8.8 00000000000000000000FFFF08080808 diff --git a/tests/queries/0_stateless/01069_database_memory.reference b/tests/queries/0_stateless/01069_database_memory.reference index cfccf5b1757..e7486d57276 100644 --- a/tests/queries/0_stateless/01069_database_memory.reference +++ b/tests/queries/0_stateless/01069_database_memory.reference @@ -5,4 +5,4 @@ CREATE DATABASE memory_01069\nENGINE = Memory() 4 3 4 -CREATE TABLE memory_01069.file\n(`n` UInt8\n)\nENGINE = File(\'CSV\') +CREATE TABLE memory_01069.file\n(\n `n` UInt8\n)\nENGINE = File(\'CSV\') diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference index 138f09f2634..d360a046958 100644 --- a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference @@ -1,11 +1,11 @@ -CREATE TABLE test_01083.file\n(`n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') -CREATE TABLE test_01083.buffer\n(`n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) -CREATE TABLE test_01083.merge\n(`n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') +CREATE TABLE test_01083.file\n(\n `n` Int8\n)\nENGINE = File(\'TSVWithNamesAndTypes\') +CREATE TABLE test_01083.buffer\n(\n `n` Int8\n)\nENGINE = Buffer(\'test_01083\', \'file\', 16, 10, 200, 10000, 1000000, 10000000, 1000000000) +CREATE TABLE test_01083.merge\n(\n `n` Int8\n)\nENGINE = Merge(\'test_01083\', \'distributed\') CREATE TABLE test_01083.merge_tf AS merge(\'test_01083\', \'.*\') -CREATE TABLE test_01083.distributed\n(`n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') +CREATE TABLE test_01083.distributed\n(\n `n` Int8\n)\nENGINE = Distributed(\'test_shard_localhost\', \'test_01083\', \'file\') CREATE TABLE test_01083.distributed_tf AS cluster(\'test_shard_localhost\', \'test_01083\', \'buffer\') CREATE TABLE test_01083.url\n(\n `n` UInt64,\n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+test_01083.merge+format+CSV\', \'CSV\') CREATE TABLE test_01083.rich_syntax AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'test_01083\', \'view\'))) -CREATE VIEW test_01083.view\n(`n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file -CREATE DICTIONARY test_01083.dict\n(\n \n `n` UInt64,\n \n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) +CREATE VIEW test_01083.view\n(\n `n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM \n(\n SELECT toString(n) AS n\n FROM test_01083.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM test_01083.file +CREATE DICTIONARY test_01083.dict\n(\n `n` UInt64,\n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\' DB \'test_01083\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) 16 diff --git a/tests/queries/0_stateless/01272_suspicious_codecs.reference b/tests/queries/0_stateless/01272_suspicious_codecs.reference index de91a1ddb25..559b6df2693 100644 --- a/tests/queries/0_stateless/01272_suspicious_codecs.reference +++ b/tests/queries/0_stateless/01272_suspicious_codecs.reference @@ -1,16 +1,16 @@ -CREATE TABLE default.codecs1\n(`a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs2\n(`a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs3\n(`a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs4\n(`a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs5\n(`a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs6\n(`a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs7\n(`a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs8\n(`a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs1\n(`a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs2\n(`a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs3\n(`a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs4\n(`a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs5\n(`a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs6\n(`a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs7\n(`a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.codecs8\n(`a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs1\n(\n `a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs2\n(\n `a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs3\n(\n `a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs4\n(\n `a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs5\n(\n `a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs6\n(\n `a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs7\n(\n `a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs8\n(\n `a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs1\n(\n `a` UInt8 CODEC(NONE, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs2\n(\n `a` UInt8 CODEC(NONE, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs3\n(\n `a` UInt8 CODEC(LZ4, NONE)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs4\n(\n `a` UInt8 CODEC(LZ4, LZ4)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs5\n(\n `a` UInt8 CODEC(LZ4, ZSTD(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs6\n(\n `a` UInt8 CODEC(Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs7\n(\n `a` UInt8 CODEC(Delta(1), Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.codecs8\n(\n `a` UInt8 CODEC(LZ4, Delta(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01297_alter_distributed.reference b/tests/queries/0_stateless/01297_alter_distributed.reference index bd269322884..8fd8bc7ab72 100644 --- a/tests/queries/0_stateless/01297_alter_distributed.reference +++ b/tests/queries/0_stateless/01297_alter_distributed.reference @@ -6,7 +6,7 @@ VisitID UInt64 UserID UInt64 StartTime DateTime ClickLogID UInt64 -CREATE TABLE default.merge_distributed\n(\n `CounterID` UInt32, \n `dummy` String, \n `StartDate` Date, \n `Sign` Int8, \n `VisitID` UInt64, \n `UserID` UInt64, \n `StartTime` DateTime, \n `ClickLogID` UInt64\n)\nENGINE = Distributed(\'test_shard_localhost\', \'default\', \'merge_distributed1\') +CREATE TABLE default.merge_distributed\n(\n `CounterID` UInt32,\n `dummy` String,\n `StartDate` Date,\n `Sign` Int8,\n `VisitID` UInt64,\n `UserID` UInt64,\n `StartTime` DateTime,\n `ClickLogID` UInt64\n)\nENGINE = Distributed(\'test_shard_localhost\', \'default\', \'merge_distributed1\') 1 Hello, Alter Table! CounterID UInt32 StartDate Date @@ -15,4 +15,4 @@ VisitID UInt64 UserID UInt64 StartTime DateTime ClickLogID UInt64 -CREATE TABLE default.merge_distributed\n(\n `CounterID` UInt32, \n `StartDate` Date, \n `Sign` Int8, \n `VisitID` UInt64, \n `UserID` UInt64, \n `StartTime` DateTime, \n `ClickLogID` UInt64\n)\nENGINE = Distributed(\'test_shard_localhost\', \'default\', \'merge_distributed1\') +CREATE TABLE default.merge_distributed\n(\n `CounterID` UInt32,\n `StartDate` Date,\n `Sign` Int8,\n `VisitID` UInt64,\n `UserID` UInt64,\n `StartTime` DateTime,\n `ClickLogID` UInt64\n)\nENGINE = Distributed(\'test_shard_localhost\', \'default\', \'merge_distributed1\') diff --git a/tests/queries/0_stateless/01298_alter_merge.reference b/tests/queries/0_stateless/01298_alter_merge.reference index 393c0a600ff..a012900f978 100644 --- a/tests/queries/0_stateless/01298_alter_merge.reference +++ b/tests/queries/0_stateless/01298_alter_merge.reference @@ -6,7 +6,7 @@ VisitID UInt64 UserID UInt64 StartTime DateTime ClickLogID UInt64 -CREATE TABLE default.merge\n(\n `CounterID` UInt32, \n `dummy` String, \n `StartDate` Date, \n `Sign` Int8, \n `VisitID` UInt64, \n `UserID` UInt64, \n `StartTime` DateTime, \n `ClickLogID` UInt64\n)\nENGINE = Merge(\'default\', \'merge\\\\[0-9\\\\]\') +CREATE TABLE default.merge\n(\n `CounterID` UInt32,\n `dummy` String,\n `StartDate` Date,\n `Sign` Int8,\n `VisitID` UInt64,\n `UserID` UInt64,\n `StartTime` DateTime,\n `ClickLogID` UInt64\n)\nENGINE = Merge(\'default\', \'merge\\\\[0-9\\\\]\') CounterID UInt32 StartDate Date Sign Int8 @@ -14,4 +14,4 @@ VisitID UInt64 UserID UInt64 StartTime DateTime ClickLogID UInt64 -CREATE TABLE default.merge\n(\n `CounterID` UInt32, \n `StartDate` Date, \n `Sign` Int8, \n `VisitID` UInt64, \n `UserID` UInt64, \n `StartTime` DateTime, \n `ClickLogID` UInt64\n)\nENGINE = Merge(\'default\', \'merge\\\\[0-9\\\\]\') +CREATE TABLE default.merge\n(\n `CounterID` UInt32,\n `StartDate` Date,\n `Sign` Int8,\n `VisitID` UInt64,\n `UserID` UInt64,\n `StartTime` DateTime,\n `ClickLogID` UInt64\n)\nENGINE = Merge(\'default\', \'merge\\\\[0-9\\\\]\') From def0158638b82c6d2d38ceb80daec6f74b992a15 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 15 Jun 2020 14:33:44 +0300 Subject: [PATCH 27/52] configure query handler as default --- src/Server/HTTPHandlerFactory.cpp | 31 ++++++++++++------- src/Server/HTTPHandlerFactory.h | 2 -- .../test_http_handlers_config/test.py | 3 ++ 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 6459b0aab3b..f34852054d1 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -20,6 +20,9 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; } +static void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); +static void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics); + HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & name_) : log(&Poco::Logger::get(name_)), name(name_) { @@ -75,7 +78,7 @@ static inline auto createHandlersFactoryFromConfig( for (const auto & key : keys) { if (key == "defaults") - addDefaultHandlersFactory(*main_handler_factory, server, &async_metrics); + addDefaultHandlersFactory(*main_handler_factory, server, async_metrics); else if (startsWith(key, "rule")) { const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); @@ -113,12 +116,7 @@ static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IS else { auto factory = std::make_unique(name); - addDefaultHandlersFactory(*factory, server, &async_metrics); - - auto query_handler = std::make_unique>(server, "query"); - query_handler->allowPostAndGetParamsRequest(); - factory->addHandler(query_handler.release()); - + addDefaultHandlersFactory(*factory, server, async_metrics); return factory.release(); } } @@ -126,7 +124,7 @@ static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IS static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name) { auto factory = std::make_unique(name); - addDefaultHandlersFactory(*factory, server, nullptr); + addCommonDefaultHandlersFactory(*factory, server); auto main_handler = std::make_unique>(server); main_handler->allowPostAndGetParamsRequest(); @@ -157,7 +155,7 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As static const auto ping_response_expression = "Ok.\n"; static const auto root_response_expression = "config://http_server_default_response"; -void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics * async_metrics) +void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server) { auto root_handler = std::make_unique>(server, root_response_expression); root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); @@ -170,13 +168,22 @@ void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer auto replicas_status_handler = std::make_unique>(server); replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); factory.addHandler(replicas_status_handler.release()); +} + +void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics & async_metrics) +{ + addCommonDefaultHandlersFactory(factory, server); + + auto query_handler = std::make_unique>(server, "query"); + query_handler->allowPostAndGetParamsRequest(); + factory.addHandler(query_handler.release()); /// We check that prometheus handler will be served on current (default) port. - /// Otherwise it will be created separately, see below. - if (async_metrics && server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) + /// Otherwise it will be created separately, see createHandlerFactory(...). + if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) { auto prometheus_handler = std::make_unique>( - server, PrometheusMetricsWriter(server.config(), "prometheus", *async_metrics)); + server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); factory.addHandler(prometheus_handler.release()); } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index 8e21a13ba18..3e8313172eb 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -103,8 +103,6 @@ private: std::function creator; }; -void addDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server, AsynchronousMetrics * async_metrics); - Poco::Net::HTTPRequestHandlerFactory * createStaticHandlerFactory(IServer & server, const std::string & config_prefix); Poco::Net::HTTPRequestHandlerFactory * createDynamicHandlerFactory(IServer & server, const std::string & config_prefix); diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index b31913ba962..b15cd1fdb89 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -124,6 +124,9 @@ def test_defaults_http_handlers(): assert 200 == cluster.instance.http_request('replicas_status', method='GET').status_code assert 'Ok.\n' == cluster.instance.http_request('replicas_status', method='GET').content + assert 200 == cluster.instance.http_request('?query=SELECT+1', method='GET').status_code + assert '1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content + def test_prometheus_handler(): with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "prometheus_handler", "test_prometheus_handler")) as cluster: assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code From c7140724a8c2abbd7793744904cf475841d927fa Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 15 Jun 2020 16:25:27 +0300 Subject: [PATCH 28/52] Fix that ALTER USER RENAME could change allowed hosts. --- src/Parsers/ParserCreateUserQuery.cpp | 16 +++++----------- .../01075_allowed_client_hosts.reference | 4 ++-- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/Parsers/ParserCreateUserQuery.cpp b/src/Parsers/ParserCreateUserQuery.cpp index 3bf7e508220..e03f8334d42 100644 --- a/src/Parsers/ParserCreateUserQuery.cpp +++ b/src/Parsers/ParserCreateUserQuery.cpp @@ -23,14 +23,14 @@ namespace ErrorCodes namespace { - bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name, std::optional & new_host_pattern) + bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, String & new_name) { return IParserBase::wrapParseImpl(pos, [&] { if (!ParserKeyword{"RENAME TO"}.ignore(pos, expected)) return false; - return parseUserName(pos, expected, new_name, new_host_pattern); + return parseUserName(pos, expected, new_name); }); } @@ -274,7 +274,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec return false; String new_name; - std::optional new_host_pattern; std::optional authentication; std::optional hosts; std::optional add_hosts; @@ -302,7 +301,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (alter) { - if (new_name.empty() && parseRenameTo(pos, expected, new_name, new_host_pattern)) + if (new_name.empty() && parseRenameTo(pos, expected, new_name)) continue; if (parseHosts(pos, expected, "ADD", add_hosts) || parseHosts(pos, expected, "DROP", remove_hosts)) @@ -312,13 +311,8 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec break; } - if (!hosts) - { - if (!alter && host_pattern) - hosts.emplace().addLikePattern(*host_pattern); - else if (alter && new_host_pattern) - hosts.emplace().addLikePattern(*new_host_pattern); - } + if (!alter && !hosts && host_pattern) + hosts.emplace().addLikePattern(*host_pattern); auto query = std::make_shared(); node = query; diff --git a/tests/queries/0_stateless/01075_allowed_client_hosts.reference b/tests/queries/0_stateless/01075_allowed_client_hosts.reference index 3fdea9d1cda..5fb11bae65e 100644 --- a/tests/queries/0_stateless/01075_allowed_client_hosts.reference +++ b/tests/queries/0_stateless/01075_allowed_client_hosts.reference @@ -13,5 +13,5 @@ CREATE USER test_user_01075 HOST REGEXP \'.*\\\\.anothersite\\\\.com\', \'.*\\\\ CREATE USER test_user_01075 HOST REGEXP \'.*\\\\.anothersite2\\\\.com\', \'.*\\\\.anothersite2\\\\.org\' CREATE USER test_user_01075 HOST REGEXP \'.*\\\\.anothersite3\\\\.com\', \'.*\\\\.anothersite3\\\\.org\' CREATE USER `test_user_01075_x@localhost` HOST LOCAL -CREATE USER test_user_01075_x -CREATE USER `test_user_01075_x@192.168.23.15` HOST LIKE \'192.168.23.15\' +CREATE USER test_user_01075_x HOST LOCAL +CREATE USER `test_user_01075_x@192.168.23.15` HOST LOCAL From 6af27d6c324222eec5c51284391518741d2a31b1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 15 Jun 2020 18:22:11 +0300 Subject: [PATCH 29/52] Update simpleaggregatefunction.md --- docs/en/sql-reference/data-types/simpleaggregatefunction.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 5f4c408f939..8b7e498e535 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -12,6 +12,8 @@ The following aggregate functions are supported: - [`groupBitAnd`](../../sql-reference/aggregate-functions/reference.md#groupbitand) - [`groupBitOr`](../../sql-reference/aggregate-functions/reference.md#groupbitor) - [`groupBitXor`](../../sql-reference/aggregate-functions/reference.md#groupbitxor) +- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference.md#agg_function-grouparray) +- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference.md#groupuniqarrayx-groupuniqarraymax-sizex) Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function. From 6e18001bd81809e4b8d85c5b388e2111f685cb6f Mon Sep 17 00:00:00 2001 From: long2ice Date: Mon, 15 Jun 2020 23:25:13 +0800 Subject: [PATCH 30/52] add mysql2ch in docs (#11680) * add mysql2ch add mysql2ch * Update integrations.md * Update integrations.md * Update integrations.md * Update integrations.md * Update integrations.md * Update integrations.md * Update integrations.md --- docs/en/interfaces/third-party/integrations.md | 1 + docs/es/interfaces/third-party/integrations.md | 1 + docs/fa/interfaces/third-party/integrations.md | 1 + docs/fr/interfaces/third-party/integrations.md | 1 + docs/ja/interfaces/third-party/integrations.md | 1 + docs/ru/interfaces/third-party/integrations.md | 1 + docs/tr/interfaces/third-party/integrations.md | 1 + docs/zh/interfaces/third-party/integrations.md | 1 + 8 files changed, 8 insertions(+) diff --git a/docs/en/interfaces/third-party/integrations.md b/docs/en/interfaces/third-party/integrations.md index 716e774871b..17e7f1f18cc 100644 --- a/docs/en/interfaces/third-party/integrations.md +++ b/docs/en/interfaces/third-party/integrations.md @@ -12,6 +12,7 @@ toc_title: Integrations - Relational database management systems - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/es/interfaces/third-party/integrations.md b/docs/es/interfaces/third-party/integrations.md index 716e774871b..17e7f1f18cc 100644 --- a/docs/es/interfaces/third-party/integrations.md +++ b/docs/es/interfaces/third-party/integrations.md @@ -12,6 +12,7 @@ toc_title: Integrations - Relational database management systems - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/fa/interfaces/third-party/integrations.md b/docs/fa/interfaces/third-party/integrations.md index 657432c7958..df864ef71e6 100644 --- a/docs/fa/interfaces/third-party/integrations.md +++ b/docs/fa/interfaces/third-party/integrations.md @@ -14,6 +14,7 @@ toc_title: "\u06CC\u06A9\u067E\u0627\u0631\u0686\u06AF\u06CC" - سیستم های مدیریت پایگاه داده رابطه ای - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [در حال بارگذاری](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [تاتر-خروجی زیر-داده خوان](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/fr/interfaces/third-party/integrations.md b/docs/fr/interfaces/third-party/integrations.md index f252fd6229b..8332ffe5e59 100644 --- a/docs/fr/interfaces/third-party/integrations.md +++ b/docs/fr/interfaces/third-party/integrations.md @@ -14,6 +14,7 @@ toc_title: "Int\xE9gration" - Systèmes de gestion de bases de données relationnelles - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-lecteur de données](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-réplicateur](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/ja/interfaces/third-party/integrations.md b/docs/ja/interfaces/third-party/integrations.md index 3e38d578093..2ac2ad24410 100644 --- a/docs/ja/interfaces/third-party/integrations.md +++ b/docs/ja/interfaces/third-party/integrations.md @@ -14,6 +14,7 @@ toc_title: "\u7D71\u5408" - リレーショナルデータベース管理システム - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-データリーダー](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-レプリケーター](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/ru/interfaces/third-party/integrations.md b/docs/ru/interfaces/third-party/integrations.md index 39449b54df8..19a72edc4d3 100644 --- a/docs/ru/interfaces/third-party/integrations.md +++ b/docs/ru/interfaces/third-party/integrations.md @@ -7,6 +7,7 @@ - Реляционные системы управления базами данных - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/tr/interfaces/third-party/integrations.md b/docs/tr/interfaces/third-party/integrations.md index 8a1d5c239f6..a5e5a60c72f 100644 --- a/docs/tr/interfaces/third-party/integrations.md +++ b/docs/tr/interfaces/third-party/integrations.md @@ -14,6 +14,7 @@ toc_title: Entegrasyonlar - İlişkisel veritabanı yönetim sistemleri - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-çoğaltıcı](https://github.com/larsnovikov/horgh-replicator) diff --git a/docs/zh/interfaces/third-party/integrations.md b/docs/zh/interfaces/third-party/integrations.md index 014fdc88304..e0f308fecde 100644 --- a/docs/zh/interfaces/third-party/integrations.md +++ b/docs/zh/interfaces/third-party/integrations.md @@ -7,6 +7,7 @@ - 关系数据库管理系统 - [MySQL](https://www.mysql.com) + - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - [horgh-复制器](https://github.com/larsnovikov/horgh-replicator) From a9599d0a37c4e28ef853963901c6d09c8e6d52e1 Mon Sep 17 00:00:00 2001 From: Tom Bombadil <565258751@qq.com> Date: Tue, 16 Jun 2020 02:40:20 +0800 Subject: [PATCH 31/52] Update index.md (#11674) * Update index.md optimize chinese-doc translation * Update index.md * Update index.md Co-authored-by: Ivan Blinkov --- docs/zh/sql-reference/index.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/zh/sql-reference/index.md b/docs/zh/sql-reference/index.md index aed96c4b34f..c47c20b9cf9 100644 --- a/docs/zh/sql-reference/index.md +++ b/docs/zh/sql-reference/index.md @@ -1,15 +1,13 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "SQL\u53C2\u8003" +toc_folder_title: SQL参考 toc_hidden: true toc_priority: 28 -toc_title: "\u9690\u85CF" +toc_title: hidden --- # SQL参考 {#sql-reference} -ClickHouse支持以下类型的查询: +ClickHouse支持以下形式的查询: - [SELECT](statements/select/index.md) - [INSERT INTO](statements/insert-into.md) @@ -17,4 +15,4 @@ ClickHouse支持以下类型的查询: - [ALTER](statements/alter.md#query_language_queries_alter) - [其他类型的查询](statements/misc.md) -[原始文章](https://clickhouse.tech/docs/en/sql-reference/) +[原始文档](https://clickhouse.tech/docs/zh/sql-reference/) From 3f8d72c3724b2d80d698aaef643ae036831118a7 Mon Sep 17 00:00:00 2001 From: Yuntao Wu Date: Tue, 16 Jun 2020 02:41:31 +0800 Subject: [PATCH 32/52] =?UTF-8?q?merge=20translates=20into=20"=E5=90=88?= =?UTF-8?q?=E5=B9=B6"=20better=20(#11659)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * merge translates into "合并" better * Update index.md Co-authored-by: Ivan Blinkov --- docs/zh/engines/table-engines/mergetree-family/index.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/zh/engines/table-engines/mergetree-family/index.md b/docs/zh/engines/table-engines/mergetree-family/index.md index 746d9f03281..c24dd02bb72 100644 --- a/docs/zh/engines/table-engines/mergetree-family/index.md +++ b/docs/zh/engines/table-engines/mergetree-family/index.md @@ -1,7 +1,5 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "\u6885\u6811\u5BB6\u65CF" +toc_folder_title: "合并树家族" toc_priority: 28 --- From 4f8c7bcf78483f86c088185147391686c61c30f7 Mon Sep 17 00:00:00 2001 From: Yuntao Wu Date: Tue, 16 Jun 2020 02:42:35 +0800 Subject: [PATCH 33/52] update some errors (#11656) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit collapse means “折叠” in Chinese engine means “引擎” in Chinese when we are developing --- .../mergetree-family/versionedcollapsingmergetree.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 19caae5e1a1..3ee35e7cdf7 100644 --- a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -33,7 +33,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] 有关查询参数的说明,请参阅 [查询说明](../../../sql-reference/statements/create.md). -**发动机参数** +**引擎参数** ``` sql VersionedCollapsingMergeTree(sign, version) @@ -79,7 +79,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] -## 崩溃 {#table_engines_versionedcollapsingmergetree} +## 折叠 {#table_engines_versionedcollapsingmergetree} ### 数据 {#data} From 42ff73eb00401ee1609ca65108582dd28f91686c Mon Sep 17 00:00:00 2001 From: bluebirddm Date: Tue, 16 Jun 2020 02:43:06 +0800 Subject: [PATCH 34/52] Update versionedcollapsingmergetree.md (#11654) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update versionedcollapsingmergetree.md 简单翻译 * Update versionedcollapsingmergetree.md Co-authored-by: Ivan Blinkov --- .../versionedcollapsingmergetree.md | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index 3ee35e7cdf7..257bc2ad203 100644 --- a/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/zh/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -1,6 +1,4 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd toc_priority: 37 toc_title: "\u7248\u672C\u96C6\u5408\u5728\u65B0\u6811" --- @@ -39,17 +37,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] VersionedCollapsingMergeTree(sign, version) ``` -- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 +- `sign` — 指定行类型的列名: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 列数据类型应为 `Int8`. -- `version` — Name of the column with the version of the object state. +- `version` — 指定对象状态版本的列名。 列数据类型应为 `UInt*`. -**查询子句** +**查询 Clauses** -当创建一个 `VersionedCollapsingMergeTree` 表,相同 [条款](mergetree.md) 需要创建一个时 `MergeTree` 桌子 +当创建一个 `VersionedCollapsingMergeTree` 表时,跟创建一个 `MergeTree`表的时候需要相同 [Clause](mergetree.md)
@@ -69,11 +67,11 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] 所有的参数,除了 `sign` 和 `version` 具有相同的含义 `MergeTree`. -- `sign` — Name of the column with the type of row: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 +- `sign` — 指定行类型的列名: `1` 是一个 “state” 行, `-1` 是一个 “cancel” 划 Column Data Type — `Int8`. -- `version` — Name of the column with the version of the object state. +- `version` — 指定对象状态版本的列名。 列数据类型应为 `UInt*`. @@ -125,23 +123,23 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] 1. 写入数据的程序应该记住对象的状态以取消它。 该 “cancel” 字符串应该是 “state” 与相反的字符串 `Sign`. 这增加了存储的初始大小,但允许快速写入数据。 2. 列中长时间增长的数组由于写入负载而降低了引擎的效率。 数据越简单,效率就越高。 -3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 您可以通过不一致的数据获得不可预测的结果,例如会话深度等非负指标的负值。 +3. `SELECT` 结果很大程度上取决于对象变化历史的一致性。 准备插入数据时要准确。 不一致的数据将导致不可预测的结果,例如会话深度等非负指标的负值。 ### 算法 {#table_engines-versionedcollapsingmergetree-algorithm} -当ClickHouse合并数据部分时,它会删除具有相同主键和版本且不同主键和版本的每对行 `Sign`. 行的顺序并不重要。 +当ClickHouse合并数据部分时,它会删除具有相同主键和版本但 `Sign`值不同的一对行. 行的顺序并不重要。 当ClickHouse插入数据时,它会按主键对行进行排序。 如果 `Version` 列不在主键中,ClickHouse将其隐式添加到主键作为最后一个字段并使用它进行排序。 ## 选择数据 {#selecting-data} -ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着聚合是必需的,如果有必要得到完全 “collapsed” 从数据 `VersionedCollapsingMergeTree` 桌子 +ClickHouse不保证具有相同主键的所有行都将位于相同的结果数据部分中,甚至位于相同的物理服务器上。 对于写入数据和随后合并数据部分都是如此。 此外,ClickHouse流程 `SELECT` 具有多个线程的查询,并且无法预测结果中的行顺序。 这意味着,如果有必要从`VersionedCollapsingMergeTree` 表中得到完全 “collapsed” 的数据,聚合是必需的。 要完成折叠,请使用 `GROUP BY` 考虑符号的子句和聚合函数。 例如,要计算数量,请使用 `sum(Sign)` 而不是 `count()`. 要计算的东西的总和,使用 `sum(Sign * x)` 而不是 `sum(x)`,并添加 `HAVING sum(Sign) > 0`. 聚合 `count`, `sum` 和 `avg` 可以这样计算。 聚合 `uniq` 如果对象至少具有一个非折叠状态,则可以计算。 聚合 `min` 和 `max` 无法计算是因为 `VersionedCollapsingMergeTree` 不保存折叠状态值的历史记录。 -如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰符 `FROM` 条款 这种方法效率低下,不应与大型表一起使用。 +如果您需要提取数据 “collapsing” 但是,如果没有聚合(例如,要检查是否存在其最新值与某些条件匹配的行),则可以使用 `FINAL` 修饰 `FROM` 条件这种方法效率低下,不应与大型表一起使用。 ## 使用示例 {#example-of-use} @@ -233,6 +231,6 @@ SELECT * FROM UAct FINAL └─────────────────────┴───────────┴──────────┴──────┴─────────┘ ``` -这是一个非常低效的方式来选择数据。 不要把它用于大桌子。 +这是一个非常低效的方式来选择数据。 不要把它用于数据量大的表。 [原始文章](https://clickhouse.tech/docs/en/operations/table_engines/versionedcollapsingmergetree/) From 1474dd6d690c97a185dab462689e28ad0fb0ff93 Mon Sep 17 00:00:00 2001 From: BayoNet Date: Mon, 15 Jun 2020 21:44:05 +0300 Subject: [PATCH 35/52] DOCS-646: randomString (#11610) * [CLICKHOUSEDOCS] Document the "randomString" function (#121) * Add description of randomString function * Add description for randomString * Update docs/en/sql-reference/functions/other-functions.md Co-authored-by: BayoNet * Update docs/en/sql-reference/functions/other-functions.md Co-authored-by: BayoNet * Changed example * Add russian version * Fixed links * Fixed links Co-authored-by: Anna Devyatova Co-authored-by: BayoNet * CLICKHOUSEDOCS-646: Updated text. Fixed links. * CLICKHOUSEDOCS-646: Fixed more links. Co-authored-by: Sergei Shtykov Co-authored-by: Anna <42538400+adevyatova@users.noreply.github.com> Co-authored-by: Anna Devyatova --- .../functions/other-functions.md | 48 +++++++++++++++++++ .../settings.md | 4 +- .../functions/other-functions.md | 48 +++++++++++++++++++ docs/ru/sql-reference/statements/system.md | 2 +- .../sql-reference/table-functions/generate.md | 2 +- 5 files changed, 100 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 9aa26f32b18..18641614bef 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -1200,4 +1200,52 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers └────────┴────────────────────────────────┴──────────────────────────────────┘ ``` +## randomString {#randomstring} + +Generates a binary string of the specified length filled with random bytes (including zero bytes). + +**Syntax** + +``` sql +randomString(length) +``` + +**Parameters** + +- `length` — String length. Positive integer. + +**Returned value** + +- String filled with random bytes. + +Type: [String](../../sql-reference/data-types/string.md). + +**Example** + +Query: + +``` sql +SELECT randomString(30) AS str, length(str) AS len FROM numbers(2) FORMAT Vertical; +``` + +Result: + +``` text +Row 1: +────── +str: 3 G : pT ?w тi k aV f6 +len: 30 + +Row 2: +────── +str: 9 ,] ^ ) ]?? 8 +len: 30 +``` + +**See Also** + +- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom) +- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii) + + [Original article](https://clickhouse.tech/docs/en/query_language/functions/other_functions/) diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index e3c1629a46a..5bfedf4c520 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -78,7 +78,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat default ``` -## dictionaries\_config {#dictionaries-config} +## dictionaries\_config {#server_configuration_parameters-dictionaries_config} Путь к конфигурации внешних словарей. @@ -95,7 +95,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat *_dictionary.xml ``` -## dictionaries\_lazy\_load {#dictionaries-lazy-load} +## dictionaries\_lazy\_load {#server_configuration_parameters-dictionaries_lazy_load} Отложенная загрузка словарей. diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 2c715cd15a5..7161b1a2468 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1153,4 +1153,52 @@ SELECT number, randomPrintableASCII(30) as str, length(str) FROM system.numbers └────────┴────────────────────────────────┴──────────────────────────────────┘ ``` +## randomString {#randomstring} + +Генерирует бинарную строку заданной длины, заполненную случайными байтами (в том числе нулевыми). + +**Синтаксис** + +``` sql +randomString(length) +``` + +**Параметры** + +- `length` — длина строки. Положительное целое число. + +**Возвращаемое значение** + +- Строка, заполненная случайными байтами. + +Type: [String](../../sql-reference/data-types/string.md). + +**Пример** + +Запрос: + +``` sql +SELECT randomString(30) AS str, length(str) AS len FROM numbers(2) FORMAT Vertical; +``` + +Ответ: + +``` text +Row 1: +────── +str: 3 G : pT ?w тi k aV f6 +len: 30 + +Row 2: +────── +str: 9 ,] ^ ) ]?? 8 +len: 30 +``` + +**Смотрите также** + +- [generateRandom](../../sql-reference/table-functions/generate.md#generaterandom) +- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii) + + [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/other_functions/) diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index 1b66fa039d9..9a6dccd7d89 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -38,7 +38,7 @@ ## RELOAD DICTIONARIES {#query_language-system-reload-dictionaries} Перегружает все словари, которые были успешно загружены до этого. -По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../../sql-reference/statements/system.md#dictionaries-lazy-load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`. +По умолчанию включена ленивая загрузка [dictionaries\_lazy\_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load), поэтому словари не загружаются автоматически при старте, а только при первом обращении через dictGet или SELECT к ENGINE=Dictionary. После этого такие словари (LOADED) будут перегружаться командой `system reload dictionaries`. Всегда возвращает `Ok.`, вне зависимости от результата обновления словарей. ## RELOAD DICTIONARY Dictionary\_name {#query_language-system-reload-dictionary} diff --git a/docs/ru/sql-reference/table-functions/generate.md b/docs/ru/sql-reference/table-functions/generate.md index b1abdbf1d63..9e6d36b2a4b 100644 --- a/docs/ru/sql-reference/table-functions/generate.md +++ b/docs/ru/sql-reference/table-functions/generate.md @@ -1,4 +1,4 @@ -# generateRandom {#generateRandom} +# generateRandom {#generaterandom} Генерирует случайные данные с заданной схемой. Позволяет заполнять тестовые таблицы данными. From f0eafed5206e0cbe8f89fd078c733a5889f6b0af Mon Sep 17 00:00:00 2001 From: BayoNet Date: Mon, 15 Jun 2020 21:55:04 +0300 Subject: [PATCH 36/52] DOCS-678: Updated ASOF Join description (#11676) * CLICKHOUSEDOCS-678: Updated ASOF Join Usage. * CLICKHOUSEDOCS-678: Updated templates. * Update docs/ru/sql-reference/statements/select/join.md Co-authored-by: Ivan Blinkov * CLICKHOUSEDOCS-678: Update by comments. Co-authored-by: Sergei Shtykov Co-authored-by: emironyuk Co-authored-by: Ivan Blinkov --- docs/_description_templates/template-function.md | 2 +- docs/_description_templates/template-setting.md | 2 +- docs/en/sql-reference/statements/select/join.md | 6 +++++- docs/ru/sql-reference/statements/select/join.md | 6 +++++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/_description_templates/template-function.md b/docs/_description_templates/template-function.md index 1acf92cb501..b69d7ed5309 100644 --- a/docs/_description_templates/template-function.md +++ b/docs/_description_templates/template-function.md @@ -1,4 +1,4 @@ -## function-name {#function-name-in-lower-case} +## functionName {#functionname-in-lower-case} Short description. diff --git a/docs/_description_templates/template-setting.md b/docs/_description_templates/template-setting.md index 5a33716f899..fc912aba3e1 100644 --- a/docs/_description_templates/template-setting.md +++ b/docs/_description_templates/template-setting.md @@ -1,4 +1,4 @@ -## setting-name {#setting-name-in-lower-case} +## setting_name {#setting_name} Description. diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 5ac3f4a0e25..87bc542dbdc 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -51,7 +51,11 @@ Modifies how matching by "join keys" is performed `ASOF JOIN` is useful when you need to join records that have no exact match. -Tables for `ASOF JOIN` must have an ordered sequence column. This column cannot be alone in a table, and should be one of the data types: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date`, and `DateTime`. +Algorithm requires the special column in tables. This column: + +- Must contain an ordered sequence. +- Can be one of the following types: [Int*, UInt*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Date](../../data-types/date.md), [DateTime](../../data-types/datetime.md), [Decimal*](../../data-types/decimal.md). +- Can't be the only column in the `JOIN` clause. Syntax `ASOF JOIN ... ON`: diff --git a/docs/ru/sql-reference/statements/select/join.md b/docs/ru/sql-reference/statements/select/join.md index 60f391d888b..26e7ae8257e 100644 --- a/docs/ru/sql-reference/statements/select/join.md +++ b/docs/ru/sql-reference/statements/select/join.md @@ -45,7 +45,11 @@ FROM `ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения. -Таблицы для `ASOF JOIN` должны иметь столбец с отсортированной последовательностью. Этот столбец не может быть единственным в таблице и должен быть одного из типов: `UInt32`, `UInt64`, `Float32`, `Float64`, `Date` и `DateTime`. +Для работы алгоритма необходим специальный столбец в таблицах. Этот столбец: + +- Должен содержать упорядоченную последовательность. +- Может быть одного из следующих типов: [Int*, UInt*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Date](../../data-types/date.md), [DateTime](../../data-types/datetime.md), [Decimal*](../../data-types/decimal.md). +- Не может быть единственным столбцом в секции `JOIN`. Синтаксис `ASOF JOIN ... ON`: From d6e69211b1ba074f5e51fd467e51b892a7e091a6 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 15 Jun 2020 22:05:36 +0300 Subject: [PATCH 37/52] fix test --- tests/queries/0_stateless/01269_create_with_null.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01269_create_with_null.reference b/tests/queries/0_stateless/01269_create_with_null.reference index 739063af67f..e4945eed114 100644 --- a/tests/queries/0_stateless/01269_create_with_null.reference +++ b/tests/queries/0_stateless/01269_create_with_null.reference @@ -1,4 +1,4 @@ Nullable(Int32) Int32 Nullable(Int32) Int32 -CREATE TABLE default.data_null\n(\n `a` Nullable(Int32), \n `b` Int32, \n `c` Nullable(Int32), \n `d` Int32\n)\nENGINE = Memory() +CREATE TABLE default.data_null\n(\n `a` Nullable(Int32),\n `b` Int32,\n `c` Nullable(Int32),\n `d` Int32\n)\nENGINE = Memory() Nullable(Int32) Int32 Nullable(Int32) Nullable(Int32) -CREATE TABLE default.set_null\n(\n `a` Nullable(Int32), \n `b` Int32, \n `c` Nullable(Int32), \n `d` Nullable(Int32)\n)\nENGINE = Memory() +CREATE TABLE default.set_null\n(\n `a` Nullable(Int32),\n `b` Int32,\n `c` Nullable(Int32),\n `d` Nullable(Int32)\n)\nENGINE = Memory() From d10109dc38d6f2caa7b1e1a897e0a73082412bde Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 22:18:33 +0300 Subject: [PATCH 38/52] Remove excessive statement #11131 --- src/Storages/MergeTree/MergeTreeDataWriter.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 52eace30657..f3da98f0ba3 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -139,7 +139,6 @@ BlocksWithPartition MergeTreeDataWriter::splitBlockIntoParts(const Block & block return result; data.check(block, true); - block.checkNumberOfRows(); if (!data.hasPartitionKey()) /// Table is not partitioned. { From bb6c0743fc512a817b4ee53dc47c53822083b10d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 15 Jun 2020 23:30:36 +0300 Subject: [PATCH 39/52] Change the level of log message about failure to listen, to warning #4406 --- programs/server/Server.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 9734bafe30e..25d8e5595b7 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -869,7 +869,7 @@ int Server::main(const std::vector & /*args*/) if (listen_try) { - LOG_ERROR(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to " + LOG_WARNING(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to " "specify not disabled IPv4 or IPv6 address to listen in element of configuration " "file. Example for disabled IPv6: 0.0.0.0 ." " Example for disabled IPv4: ::", @@ -1013,7 +1013,8 @@ int Server::main(const std::vector & /*args*/) } if (servers.empty()) - throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)", ErrorCodes::NO_ELEMENTS_IN_CONFIG); + throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)", + ErrorCodes::NO_ELEMENTS_IN_CONFIG); global_context->enableNamedSessions(); From bc58e22c5bcf85dc42b97af40357483f62a4ecf6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jun 2020 01:23:13 +0300 Subject: [PATCH 40/52] Whitespace --- src/Common/XDBCBridgeHelper.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Common/XDBCBridgeHelper.h b/src/Common/XDBCBridgeHelper.h index 1609737107e..233c5c83df4 100644 --- a/src/Common/XDBCBridgeHelper.h +++ b/src/Common/XDBCBridgeHelper.h @@ -272,7 +272,8 @@ struct ODBCBridgeMixin return AccessType::ODBC; } - static std::unique_ptr startBridge(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout) + static std::unique_ptr startBridge( + const Poco::Util::AbstractConfiguration & config, Poco::Logger * log, const Poco::Timespan & http_timeout) { /// Path to executable folder Poco::Path path{config.getString("application.dir", "/usr/bin")}; From 1e73a56a778d2f2e864f8e932d18e1b44f2beba5 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jun 2020 01:23:56 +0300 Subject: [PATCH 41/52] Whitespace --- src/Common/XDBCBridgeHelper.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Common/XDBCBridgeHelper.h b/src/Common/XDBCBridgeHelper.h index 233c5c83df4..9320122d2e5 100644 --- a/src/Common/XDBCBridgeHelper.h +++ b/src/Common/XDBCBridgeHelper.h @@ -278,7 +278,6 @@ struct ODBCBridgeMixin /// Path to executable folder Poco::Path path{config.getString("application.dir", "/usr/bin")}; - std::vector cmd_args; path.setFileName("clickhouse-odbc-bridge"); From eabbabed04aba337e94d32b4a869bc84e883fbec Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 16 Jun 2020 01:24:00 +0300 Subject: [PATCH 42/52] fix 'LIMIT WITH TIES' with aliases --- src/Interpreters/InterpreterSelectQuery.cpp | 8 ++++++ .../01142_with_ties_and_aliases.reference | 25 +++++++++++++++++++ .../01142_with_ties_and_aliases.sql | 12 +++++++++ 3 files changed, 45 insertions(+) create mode 100644 tests/queries/0_stateless/01142_with_ties_and_aliases.reference create mode 100644 tests/queries/0_stateless/01142_with_ties_and_aliases.sql diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index ac17a3042d8..523e467261b 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -973,6 +973,14 @@ void InterpreterSelectQuery::executeImpl(QueryPipeline & pipeline, const BlockIn executeWithFill(pipeline); + /// If we have 'WITH TIES', we need execute limit before projection, + /// because in that case columns from 'ORDER BY' are used. + if (query.limit_with_ties) + { + executeLimit(pipeline); + has_prelimit = true; + } + /** We must do projection after DISTINCT because projection may remove some columns. */ executeProjection(pipeline, expressions.final_projection); diff --git a/tests/queries/0_stateless/01142_with_ties_and_aliases.reference b/tests/queries/0_stateless/01142_with_ties_and_aliases.reference new file mode 100644 index 00000000000..1846e07a908 --- /dev/null +++ b/tests/queries/0_stateless/01142_with_ties_and_aliases.reference @@ -0,0 +1,25 @@ +0 0 +1 0 +2 0 +3 0 +4 0 +1 +1 +1 +1 +1 +0 +1 +2 +3 +4 +0 0 +0 1 +0 2 +0 3 +0 4 +0 0 +0 1 +0 2 +0 3 +0 4 diff --git a/tests/queries/0_stateless/01142_with_ties_and_aliases.sql b/tests/queries/0_stateless/01142_with_ties_and_aliases.sql new file mode 100644 index 00000000000..f086cb9d907 --- /dev/null +++ b/tests/queries/0_stateless/01142_with_ties_and_aliases.sql @@ -0,0 +1,12 @@ +select number, intDiv(number,5) value from numbers(20) order by value limit 3 with ties; + +drop table if exists wt; +create table wt (a Int, b Int) engine = Memory; +insert into wt select 0, number from numbers(5); + +select 1 from wt order by a limit 3 with ties; +select b from wt order by a limit 3 with ties; +with a * 2 as c select a, b from wt order by c limit 3 with ties; +select a * 2 as c, b from wt order by c limit 3 with ties; + +drop table if exists wt; From 53d985909e21ce536a900e5c53944bd4d706315d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jun 2020 01:54:19 +0300 Subject: [PATCH 43/52] Fix race condition in SYSTEM SYNC REPLICA --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 4 ++-- src/Storages/StorageReplicatedMergeTree.cpp | 13 ++++++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 8a9dbceba04..6325b1adca4 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -646,7 +646,7 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C } } - if (some_active_mutations_were_killed) + if (some_active_mutations_were_killed && storage.queue_task_handle) storage.queue_task_handle->signalReadyToRun(); if (!entries_to_load.empty()) @@ -759,7 +759,7 @@ ReplicatedMergeTreeMutationEntryPtr ReplicatedMergeTreeQueue::removeMutation( LOG_DEBUG(log, "Removed mutation {} from local state.", entry->znode_name); } - if (mutation_was_active) + if (mutation_was_active && storage.queue_task_handle) storage.queue_task_handle->signalReadyToRun(); return entry; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 5931bca17ea..885db89e5b0 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5643,9 +5643,16 @@ bool StorageReplicatedMergeTree::waitForShrinkingQueueSize(size_t queue_size, UI /// Let's fetch new log entries firstly queue.pullLogsToQueue(getZooKeeper()); - /// This is significant, because the execution of this task could be delayed at BackgroundPool. - /// And we force it to be executed. - queue_task_handle->signalReadyToRun(); + + { + auto lock = queue.lockQueue(); + if (!queue_task_handle) + return false; + + /// This is significant, because the execution of this task could be delayed at BackgroundPool. + /// And we force it to be executed. + queue_task_handle->signalReadyToRun(); + } Poco::Event target_size_event; auto callback = [&target_size_event, queue_size] (size_t new_queue_size) From 00224ee94f482573ca996cecc11a8110aba8dc15 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jun 2020 02:04:12 +0300 Subject: [PATCH 44/52] Added a test --- ...01320_create_sync_race_condition.reference | 0 .../01320_create_sync_race_condition.sh | 28 +++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 tests/queries/0_stateless/01320_create_sync_race_condition.reference create mode 100755 tests/queries/0_stateless/01320_create_sync_race_condition.sh diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition.reference b/tests/queries/0_stateless/01320_create_sync_race_condition.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition.sh b/tests/queries/0_stateless/01320_create_sync_race_condition.sh new file mode 100755 index 00000000000..2e42033644a --- /dev/null +++ b/tests/queries/0_stateless/01320_create_sync_race_condition.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +set -e + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r;" + +function thread1() +{ + while true; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r (x UInt64) ENGINE = ReplicatedMergeTree('/test/table', 'r') ORDER BY x; DROP TABLE r;"; done +} + +function thread2() +{ + while true; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA r" 2>/dev/null; done +} + +export -f thread1 +export -f thread2 + +timeout 10 bash -c thread1 & +timeout 10 bash -c thread2 & + +wait + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r;" From 22a92faab649479c69bcc3eab7a7f6bfaef30c45 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 16 Jun 2020 05:14:53 +0300 Subject: [PATCH 45/52] Avoid connection to replica when fetches are cancelled --- src/Storages/MergeTree/DataPartsExchange.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index acc3bf38461..6796e630ff2 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -63,8 +63,10 @@ void Service::processQuery(const Poco::Net::HTMLForm & params, ReadBuffer & /*bo static std::atomic_uint total_sends {0}; - if ((data_settings->replicated_max_parallel_sends && total_sends >= data_settings->replicated_max_parallel_sends) - || (data_settings->replicated_max_parallel_sends_for_table && data.current_table_sends >= data_settings->replicated_max_parallel_sends_for_table)) + if ((data_settings->replicated_max_parallel_sends + && total_sends >= data_settings->replicated_max_parallel_sends) + || (data_settings->replicated_max_parallel_sends_for_table + && data.current_table_sends >= data_settings->replicated_max_parallel_sends_for_table)) { response.setStatus(std::to_string(HTTP_TOO_MANY_REQUESTS)); response.setReason("Too many concurrent fetches, try again later"); @@ -182,6 +184,9 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( bool to_detached, const String & tmp_prefix_) { + if (blocker.isCancelled()) + throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); + /// Validation of the input that may come from malicious replica. MergeTreePartInfo::fromPartName(part_name, data.format_version); const auto data_settings = data.getSettings(); @@ -294,7 +299,8 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPart( if (blocker.isCancelled()) { - /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, performing a poll with a not very large timeout. + /// NOTE The is_cancelled flag also makes sense to check every time you read over the network, + /// performing a poll with a not very large timeout. /// And now we check it only between read chunks (in the `copyData` function). disk->removeRecursive(part_download_path); throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); From 9a26d48ad0e595468225566aafb8c78d41a20ae0 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Tue, 16 Jun 2020 09:31:00 +0300 Subject: [PATCH 46/52] Basic blog similar to docs (#11609) * Basic blog similar to docs * rename post * no post content in post_meta * update readme and template * more "en" content * complete "en" content * build blog redirects * redirects for migration * link sitemaps * update po * add "ru" content * ru redirects * remove old domain mentions * adjust styles * content improvements * +1 alt * use main images from CDN * use re-hosted in-content images * extra vertical margin around embedded youtube * minor improvements * adjust post page * adjust html meta * adjust post page * improve blog rendering --- docs/en/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/en/whats-new/changelog/2017.md | 2 +- docs/es/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/es/whats-new/changelog/2017.md | 2 +- docs/fa/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/fa/whats-new/changelog/2017.md | 2 +- docs/fr/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/fr/whats-new/changelog/2017.md | 2 +- docs/ja/introduction/adopters.md | 2 +- docs/ja/introduction/distinctive-features.md | 2 +- docs/ja/introduction/history.md | 2 +- docs/ja/introduction/performance.md | 6 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/ja/whats-new/changelog/2017.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 4 +- docs/tools/blog.py | 107 +++++++++++ docs/tools/build.py | 38 ++-- docs/tools/mdx_clickhouse.py | 28 ++- docs/tools/nav.py | 48 ++++- docs/tools/redirects.py | 38 ++-- docs/tools/website.py | 39 ++++ docs/tr/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- .../functions/array-functions.md | 6 +- docs/tr/whats-new/changelog/2017.md | 2 +- docs/zh/introduction/adopters.md | 2 +- .../sql-reference/data-types/domains/ipv4.md | 2 +- .../sql-reference/data-types/domains/ipv6.md | 2 +- docs/zh/whats-new/changelog/2017.md | 2 +- website/blog/README.md | 47 +++++ ...on-of-data-structures-in-yandex-metrica.md | 108 +++++++++++ .../2016/how-to-update-data-in-clickhouse.md | 169 ++++++++++++++++++ .../en/2016/yandex-opensources-clickhouse.md | 12 ++ .../en/2017/clickhouse-at-data-scale-2017.md | 10 ++ .../2017/clickhouse-at-percona-live-2017.md | 22 +++ ...ckhouse-meetup-in-berlin-october-5-2017.md | 10 ++ ...khouse-meetup-in-santa-clara-may-4-2017.md | 8 + .../join-the-clickhouse-meetup-in-berlin.md | 13 ++ ...ouse-meetup-in-amsterdam-on-november-15.md | 8 + .../2018/clickhouse-at-analysys-a10-2018.md | 27 +++ .../clickhouse-at-percona-live-europe-2018.md | 25 +++ ...ty-meetup-in-beijing-on-january-27-2018.md | 68 +++++++ ...ty-meetup-in-beijing-on-october-28-2018.md | 54 ++++++ ...mmunity-meetup-in-berlin-on-july-3-2018.md | 39 ++++ ...se-community-meetup-in-berlin-on-july-3.md | 8 + ...unity-meetup-in-paris-on-october-2-2018.md | 20 +++ ...meetup-in-amsterdam-on-november-15-2018.md | 27 +++ .../en/2018/concept-cloud-mergetree-tables.md | 120 +++++++++++++ .../2019/clickhouse-at-percona-live-2019.md | 38 ++++ ...nese-academy-of-science-on-june-11-2019.md | 17 ++ ...khouse-meetup-in-beijing-on-june-8-2019.md | 35 ++++ ...khouse-meetup-in-limassol-on-may-7-2019.md | 41 +++++ ...khouse-meetup-in-madrid-on-april-2-2019.md | 28 +++ ...-meetup-in-san-francisco-on-june-4-2019.md | 10 ++ ...peed-up-lz4-decompression-in-clickhouse.md | 12 ++ ...of-clickhouse-meetups-in-china-for-2019.md | 14 ++ .../five-methods-for-database-obfuscation.md | 10 ++ website/blog/en/index.md | 3 + website/blog/en/redirects.txt | 32 ++++ ...khouse-meetup-v-moskve-21-noyabrya-2016.md | 8 + .../ru/2016/clickhouse-na-highload-2016.md | 14 ++ ...raneniya-i-obrabotki-dannykh-v-yandekse.md | 10 ++ .../ru/2016/yandeks-otkryvaet-clickhouse.md | 10 ++ .../ru/2017/clickhouse-meetup-edet-v-minsk.md | 14 ++ ...use-meetup-v-ekaterinburge-16-maya-2017.md | 8 + .../2017/clickhouse-meetup-v-minske-itogi.md | 16 ++ ...se-meetup-v-novosibirske-3-aprelya-2017.md | 10 ++ ...tup-v-sankt-peterburge-28-fevralya-2017.md | 8 + .../blog/ru/2017/clickhouse-na-uwdc-2017.md | 10 ++ ...ickhouse-meetup-v-limassole-7-maya-2019.md | 38 ++++ ...house-meetup-v-moskve-5-sentyabrya-2019.md | 10 ++ ...se-meetup-v-novosibirske-26-iyunya-2019.md | 12 ++ ...eetup-v-sankt-peterburge-27-iyulya-2019.md | 10 ++ ...ickrouse-meetup-v-minske-11-iyulya-2019.md | 12 ++ website/blog/ru/index.md | 3 + website/blog/ru/redirects.txt | 15 ++ website/css/blog.css | 8 + website/locale/en/LC_MESSAGES/messages.po | 22 ++- website/locale/es/LC_MESSAGES/messages.mo | Bin 6641 -> 6739 bytes website/locale/es/LC_MESSAGES/messages.po | 22 ++- website/locale/fa/LC_MESSAGES/messages.mo | Bin 7453 -> 7553 bytes website/locale/fa/LC_MESSAGES/messages.po | 22 ++- website/locale/fr/LC_MESSAGES/messages.mo | Bin 6490 -> 6585 bytes website/locale/fr/LC_MESSAGES/messages.po | 22 ++- website/locale/ja/LC_MESSAGES/messages.mo | Bin 6613 -> 6691 bytes website/locale/ja/LC_MESSAGES/messages.po | 22 ++- website/locale/messages.pot | 22 ++- website/locale/ru/LC_MESSAGES/messages.mo | Bin 8556 -> 8672 bytes website/locale/ru/LC_MESSAGES/messages.po | 22 ++- website/locale/tr/LC_MESSAGES/messages.mo | Bin 6378 -> 6473 bytes website/locale/tr/LC_MESSAGES/messages.po | 22 ++- website/locale/zh/LC_MESSAGES/messages.mo | Bin 5926 -> 6007 bytes website/locale/zh/LC_MESSAGES/messages.po | 22 ++- website/main.html | 41 +++-- website/sitemap-index.xml | 6 + website/templates/blog/content.html | 43 +++++ website/templates/blog/footer.html | 9 + website/templates/blog/nav.html | 45 +++++ website/templates/common_meta.html | 11 +- website/templates/docs/ld_json.html | 13 +- website/templates/index/community.html | 4 +- website/templates/index/nav.html | 3 +- 119 files changed, 1848 insertions(+), 184 deletions(-) create mode 100644 docs/tools/blog.py create mode 100644 website/blog/README.md create mode 100644 website/blog/en/2016/evolution-of-data-structures-in-yandex-metrica.md create mode 100644 website/blog/en/2016/how-to-update-data-in-clickhouse.md create mode 100644 website/blog/en/2016/yandex-opensources-clickhouse.md create mode 100644 website/blog/en/2017/clickhouse-at-data-scale-2017.md create mode 100644 website/blog/en/2017/clickhouse-at-percona-live-2017.md create mode 100644 website/blog/en/2017/clickhouse-meetup-in-berlin-october-5-2017.md create mode 100644 website/blog/en/2017/clickhouse-meetup-in-santa-clara-may-4-2017.md create mode 100644 website/blog/en/2017/join-the-clickhouse-meetup-in-berlin.md create mode 100644 website/blog/en/2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15.md create mode 100644 website/blog/en/2018/clickhouse-at-analysys-a10-2018.md create mode 100644 website/blog/en/2018/clickhouse-at-percona-live-europe-2018.md create mode 100644 website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md create mode 100644 website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018.md create mode 100644 website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018.md create mode 100644 website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3.md create mode 100644 website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md create mode 100644 website/blog/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018.md create mode 100644 website/blog/en/2018/concept-cloud-mergetree-tables.md create mode 100644 website/blog/en/2019/clickhouse-at-percona-live-2019.md create mode 100644 website/blog/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md create mode 100644 website/blog/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019.md create mode 100644 website/blog/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019.md create mode 100644 website/blog/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019.md create mode 100644 website/blog/en/2019/clickhouse-meetup-in-san-francisco-on-june-4-2019.md create mode 100644 website/blog/en/2019/how-to-speed-up-lz4-decompression-in-clickhouse.md create mode 100644 website/blog/en/2019/schedule-of-clickhouse-meetups-in-china-for-2019.md create mode 100644 website/blog/en/2020/five-methods-for-database-obfuscation.md create mode 100644 website/blog/en/index.md create mode 100644 website/blog/en/redirects.txt create mode 100644 website/blog/ru/2016/clickhouse-meetup-v-moskve-21-noyabrya-2016.md create mode 100644 website/blog/ru/2016/clickhouse-na-highload-2016.md create mode 100644 website/blog/ru/2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md create mode 100644 website/blog/ru/2016/yandeks-otkryvaet-clickhouse.md create mode 100644 website/blog/ru/2017/clickhouse-meetup-edet-v-minsk.md create mode 100644 website/blog/ru/2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017.md create mode 100644 website/blog/ru/2017/clickhouse-meetup-v-minske-itogi.md create mode 100644 website/blog/ru/2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017.md create mode 100644 website/blog/ru/2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md create mode 100644 website/blog/ru/2017/clickhouse-na-uwdc-2017.md create mode 100644 website/blog/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019.md create mode 100644 website/blog/ru/2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019.md create mode 100644 website/blog/ru/2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019.md create mode 100644 website/blog/ru/2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md create mode 100644 website/blog/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019.md create mode 100644 website/blog/ru/index.md create mode 100644 website/blog/ru/redirects.txt create mode 100644 website/css/blog.css create mode 100644 website/templates/blog/content.html create mode 100644 website/templates/blog/footer.html create mode 100644 website/templates/blog/nav.html diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 081f963f74f..df9cdfa6430 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -35,7 +35,7 @@ toc_title: Adopters | [Exness](https://www.exness.com){.favicon} | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | [Geniee](https://geniee.co.jp){.favicon} | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | [HUYA](https://www.huya.com/){.favicon} | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Idealista](https://www.idealista.com){.favicon} | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Idealista](https://www.idealista.com){.favicon} | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | [Infovista](https://www.infovista.com/){.favicon} | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | [InnoGames](https://www.innogames.com){.favicon} | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | [Integros](https://integros.com){.favicon} | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/en/sql-reference/data-types/domains/ipv4.md b/docs/en/sql-reference/data-types/domains/ipv4.md index d8735d70b29..1237514b9e7 100644 --- a/docs/en/sql-reference/data-types/domains/ipv4.md +++ b/docs/en/sql-reference/data-types/domains/ipv4.md @@ -31,7 +31,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` domain supports custom input format as IPv4-strings: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/en/sql-reference/data-types/domains/ipv6.md b/docs/en/sql-reference/data-types/domains/ipv6.md index 7fd88887acc..bc57202bf66 100644 --- a/docs/en/sql-reference/data-types/domains/ipv6.md +++ b/docs/en/sql-reference/data-types/domains/ipv6.md @@ -31,7 +31,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` domain supports custom input as IPv6-strings: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/en/sql-reference/functions/array-functions.md b/docs/en/sql-reference/functions/array-functions.md index 4f449eea516..1468b48695b 100644 --- a/docs/en/sql-reference/functions/array-functions.md +++ b/docs/en/sql-reference/functions/array-functions.md @@ -701,13 +701,13 @@ arrayDifference(array) **Parameters** -- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Array](https://clickhouse.tech/docs/en/data_types/array/). **Returned values** Returns an array of differences between adjacent elements. -Type: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.yandex/docs/en/data_types/float/). +Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Float\*](https://clickhouse.tech/docs/en/data_types/float/). **Example** @@ -753,7 +753,7 @@ arrayDistinct(array) **Parameters** -- `array` – [Array](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Array](https://clickhouse.tech/docs/en/data_types/array/). **Returned values** diff --git a/docs/en/whats-new/changelog/2017.md b/docs/en/whats-new/changelog/2017.md index d819324b07a..3b48e23233f 100644 --- a/docs/en/whats-new/changelog/2017.md +++ b/docs/en/whats-new/changelog/2017.md @@ -24,7 +24,7 @@ This release contains bug fixes for the previous release 1.1.54310: #### New Features: {#new-features} - Custom partitioning key for the MergeTree family of table engines. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) table engine. +- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) table engine. - Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse. - Added support for time zones with non-integer offsets from UTC. - Added support for arithmetic operations with time intervals. diff --git a/docs/es/introduction/adopters.md b/docs/es/introduction/adopters.md index e41e8005cc7..4c0aa78d57b 100644 --- a/docs/es/introduction/adopters.md +++ b/docs/es/introduction/adopters.md @@ -37,7 +37,7 @@ toc_title: Adoptante | Exness | Comercio | Métricas, Registro | — | — | [Charla en ruso, mayo 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | Sistema abierto. | Red Ad | Producto principal | — | — | [Publicación de blog en japonés, julio 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | Video Streaming | Analítica | — | — | [Diapositivas en chino, octubre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| Idealista | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| Idealista | Inmobiliario | Analítica | — | — | [Blog Post en Inglés, Abril 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | Infovista | Red | Analítica | — | — | [Diapositivas en español, octubre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Juego | Métricas, Registro | — | — | [Diapositivas en ruso, septiembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | Integros | Plataforma para servicios de video | Analítica | — | — | [Diapositivas en ruso, mayo 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/es/sql-reference/data-types/domains/ipv4.md b/docs/es/sql-reference/data-types/domains/ipv4.md index c97229610d3..6e271f10fd2 100644 --- a/docs/es/sql-reference/data-types/domains/ipv4.md +++ b/docs/es/sql-reference/data-types/domains/ipv4.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` domain admite formato de entrada personalizado como cadenas IPv4: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/es/sql-reference/data-types/domains/ipv6.md b/docs/es/sql-reference/data-types/domains/ipv6.md index bee82ff2898..2f45a353053 100644 --- a/docs/es/sql-reference/data-types/domains/ipv6.md +++ b/docs/es/sql-reference/data-types/domains/ipv6.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` domain admite entradas personalizadas como cadenas IPv6: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/es/sql-reference/functions/array-functions.md b/docs/es/sql-reference/functions/array-functions.md index 3a0ad14b24e..677996efabd 100644 --- a/docs/es/sql-reference/functions/array-functions.md +++ b/docs/es/sql-reference/functions/array-functions.md @@ -702,13 +702,13 @@ arrayDifference(array) **Parámetros** -- `array` – [Matriz](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Matriz](https://clickhouse.tech/docs/en/data_types/array/). **Valores devueltos** Devuelve una matriz de diferencias entre los elementos adyacentes. -Tipo: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [En\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Flotante\*](https://clickhouse.yandex/docs/en/data_types/float/). +Tipo: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [En\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Flotante\*](https://clickhouse.tech/docs/en/data_types/float/). **Ejemplo** @@ -754,7 +754,7 @@ arrayDistinct(array) **Parámetros** -- `array` – [Matriz](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Matriz](https://clickhouse.tech/docs/en/data_types/array/). **Valores devueltos** diff --git a/docs/es/whats-new/changelog/2017.md b/docs/es/whats-new/changelog/2017.md index 97b2cafd198..33e48b0409f 100644 --- a/docs/es/whats-new/changelog/2017.md +++ b/docs/es/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ Esta versión contiene correcciones de errores para la versión anterior 1.1.543 #### Novedad: {#new-features} - Clave de partición personalizada para la familia MergeTree de motores de tabla. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) motor de mesa. +- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) motor de mesa. - Se agregó soporte para cargar [CatBoost](https://catboost.yandex/) modelos y aplicarlos a los datos almacenados en ClickHouse. - Se agregó soporte para zonas horarias con desplazamientos no enteros de UTC. - Se agregó soporte para operaciones aritméticas con intervalos de tiempo. diff --git a/docs/fa/introduction/adopters.md b/docs/fa/introduction/adopters.md index a4ad16faf6c..654f3a24736 100644 --- a/docs/fa/introduction/adopters.md +++ b/docs/fa/introduction/adopters.md @@ -37,7 +37,7 @@ toc_title: "\u067E\u0630\u06CC\u0631\u0627" | اعمال | بازرگانی | معیارهای ورود به سیستم | — | — | [بحث در روسیه, بیشتر 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | ژنی | شبکه تبلیغاتی | محصول اصلی | — | — | [پست وبلاگ در ژاپن, جولای 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | جریان ویدیو | تجزیه و تحلیل | — | — | [اسلاید در چین, اکتبر 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| Idealista | املاک و مستغلات | تجزیه و تحلیل | — | — | [پست وبلاگ به زبان انگلیسی, مارس 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| Idealista | املاک و مستغلات | تجزیه و تحلیل | — | — | [پست وبلاگ به زبان انگلیسی, مارس 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | اینفویستا | شبکه ها | تجزیه و تحلیل | — | — | [اسلاید به زبان انگلیسی, اکتبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | نام | بازی ها | معیارهای ورود به سیستم | — | — | [اسلاید در روسیه, سپتامبر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | پوششی | بستر های نرم افزاری برای خدمات تصویری | تجزیه و تحلیل | — | — | [اسلاید در روسیه, بیشتر 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/fa/sql-reference/data-types/domains/ipv4.md b/docs/fa/sql-reference/data-types/domains/ipv4.md index 645e839f6d8..a010409d58b 100644 --- a/docs/fa/sql-reference/data-types/domains/ipv4.md +++ b/docs/fa/sql-reference/data-types/domains/ipv4.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` دامنه پشتیبانی از فرمت ورودی سفارشی به عنوان ایپو4 رشته: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/fa/sql-reference/data-types/domains/ipv6.md b/docs/fa/sql-reference/data-types/domains/ipv6.md index 6677916c49b..64a9487cb07 100644 --- a/docs/fa/sql-reference/data-types/domains/ipv6.md +++ b/docs/fa/sql-reference/data-types/domains/ipv6.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` دامنه پشتیبانی از ورودی های سفارشی به عنوان ایپو6 رشته: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/fa/sql-reference/functions/array-functions.md b/docs/fa/sql-reference/functions/array-functions.md index 1988ed4266e..6f4e8326557 100644 --- a/docs/fa/sql-reference/functions/array-functions.md +++ b/docs/fa/sql-reference/functions/array-functions.md @@ -702,13 +702,13 @@ arrayDifference(array) **پارامترها** -- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [& حذف](https://clickhouse.tech/docs/en/data_types/array/). **مقادیر بازگشتی** بازگرداندن مجموعه ای از تفاوت بین عناصر مجاور. -نوع: [اینترنت\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [شناور\*](https://clickhouse.yandex/docs/en/data_types/float/). +نوع: [اینترنت\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [شناور\*](https://clickhouse.tech/docs/en/data_types/float/). **مثال** @@ -754,7 +754,7 @@ arrayDistinct(array) **پارامترها** -- `array` – [& حذف](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [& حذف](https://clickhouse.tech/docs/en/data_types/array/). **مقادیر بازگشتی** diff --git a/docs/fa/whats-new/changelog/2017.md b/docs/fa/whats-new/changelog/2017.md index 939ed966c22..ea4946cf185 100644 --- a/docs/fa/whats-new/changelog/2017.md +++ b/docs/fa/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ toc_title: '2017' #### ویژگی های جدید: {#new-features} - کلید پارتیشن بندی سفارشی برای خانواده ادغام موتورهای جدول. -- [کافکا](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) موتور جدول. +- [کافکا](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) موتور جدول. - اضافه شدن پشتیبانی برای بارگذاری [مانتو](https://catboost.yandex/) مدل ها و استفاده از داده های ذخیره شده در کلیک. - اضافه شدن پشتیبانی برای مناطق زمانی با شیپور خاموشی غیر عدد صحیح از مجموعه مقالات. - اضافه شدن پشتیبانی برای عملیات ریاضی با فواصل زمانی. diff --git a/docs/fr/introduction/adopters.md b/docs/fr/introduction/adopters.md index 833fc111fbe..e970c61955c 100644 --- a/docs/fr/introduction/adopters.md +++ b/docs/fr/introduction/adopters.md @@ -37,7 +37,7 @@ toc_title: Adoptant | Exness | Trading | Métriques, Journalisation | — | — | [Parler en russe, mai 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | Geniee | Réseau publicitaire | Produit principal | — | — | [Billet de Blog en japonais, juillet 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | Le Streaming Vidéo | Analytics | — | — | [Diapositives en chinois, octobre 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| Idealista | Immobilier | Analytics | — | — | [Billet de Blog en anglais, avril 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| Idealista | Immobilier | Analytics | — | — | [Billet de Blog en anglais, avril 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | Infovista | Réseau | Analytics | — | — | [Diapositives en anglais, octobre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Jeu | Métriques, Journalisation | — | — | [Diapositives en russe, septembre 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | Integros | Plate-forme pour les services vidéo | Analytics | — | — | [Diapositives en russe, mai 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/fr/sql-reference/data-types/domains/ipv4.md b/docs/fr/sql-reference/data-types/domains/ipv4.md index 7cf36c0aaef..12895992e77 100644 --- a/docs/fr/sql-reference/data-types/domains/ipv4.md +++ b/docs/fr/sql-reference/data-types/domains/ipv4.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` le domaine prend en charge le format d'entrée personnalisé en tant que chaînes IPv4: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/fr/sql-reference/data-types/domains/ipv6.md b/docs/fr/sql-reference/data-types/domains/ipv6.md index 1d0f3cd47fd..77510a950cb 100644 --- a/docs/fr/sql-reference/data-types/domains/ipv6.md +++ b/docs/fr/sql-reference/data-types/domains/ipv6.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` le domaine prend en charge l'entrée personnalisée en tant que chaînes IPv6: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/fr/sql-reference/functions/array-functions.md b/docs/fr/sql-reference/functions/array-functions.md index 5590774732d..ef09800614f 100644 --- a/docs/fr/sql-reference/functions/array-functions.md +++ b/docs/fr/sql-reference/functions/array-functions.md @@ -702,13 +702,13 @@ arrayDifference(array) **Paramètre** -- `array` – [Tableau](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/). **Valeurs renvoyées** Renvoie un tableau de différences entre les éléments adjacents. -Type: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Flottant\*](https://clickhouse.yandex/docs/en/data_types/float/). +Type: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Flottant\*](https://clickhouse.tech/docs/en/data_types/float/). **Exemple** @@ -754,7 +754,7 @@ arrayDistinct(array) **Paramètre** -- `array` – [Tableau](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Tableau](https://clickhouse.tech/docs/en/data_types/array/). **Valeurs renvoyées** diff --git a/docs/fr/whats-new/changelog/2017.md b/docs/fr/whats-new/changelog/2017.md index be2cb7de9f4..c812f345fdd 100644 --- a/docs/fr/whats-new/changelog/2017.md +++ b/docs/fr/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ Cette version contient des corrections de bugs pour la version précédente 1.1. #### Nouveauté: {#new-features} - Clé de partitionnement personnalisée pour la famille MergeTree des moteurs de table. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) tableau moteur. +- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) tableau moteur. - Ajout du support pour le chargement [CatBoost](https://catboost.yandex/) modèles et les appliquer aux données stockées dans ClickHouse. - Ajout du support pour les fuseaux horaires avec des décalages non entiers de UTC. - Ajout du support pour les opérations arithmétiques avec des intervalles de temps. diff --git a/docs/ja/introduction/adopters.md b/docs/ja/introduction/adopters.md index 084b5034a62..a1a89f6795f 100644 --- a/docs/ja/introduction/adopters.md +++ b/docs/ja/introduction/adopters.md @@ -37,7 +37,7 @@ toc_title: "\u30A2\u30C0\u30D7\u30BF\u30FC" | Exness | 取引 | 指標、ロギング | — | — | [ロシア語で話す,May2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | 魔神 | 広告ネットワーク | 主な製品 | — | — | [ブログ投稿日本語,July2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | ビデオストリーミング | 分析 | — | — | [中国語でのスライド,October2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| イデアリスタ | 不動産 | 分析 | — | — | [ブログ投稿英語,April2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| イデアリスタ | 不動産 | 分析 | — | — | [ブログ投稿英語,April2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | インフォビスタ | ネット | 分析 | — | — | [2019年のスライド](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | ゲーム | 指標、ロギング | — | — | [2019年ロシア](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | インテグロス | Platformビデオサービス | 分析 | — | — | [ロシア語でのスライド,月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/ja/introduction/distinctive-features.md b/docs/ja/introduction/distinctive-features.md index 5cf44ee0002..88dc91e0a3b 100644 --- a/docs/ja/introduction/distinctive-features.md +++ b/docs/ja/introduction/distinctive-features.md @@ -69,4 +69,4 @@ ClickHouseには、精度を犠牲にしてパフォーマンスを得るため 2. 既に挿入されたデータの変更または削除を、高頻度かつ低遅延に行う機能はありません。 [GDPR](https://gdpr-info.eu)に準拠するなど、データをクリーンアップまたは変更するために、バッチ削除およびバッチ更新が利用可能です。 3. インデックスが疎であるため、ClickHouseは、キーで単一行を取得するようなクエリにはあまり適していません。 -[Original article](https://clickhouse.yandex/docs/en/introduction/distinctive_features/) +[Original article](https://clickhouse.tech/docs/en/introduction/distinctive_features/) diff --git a/docs/ja/introduction/history.md b/docs/ja/introduction/history.md index af5dc40145d..162ed3ba415 100644 --- a/docs/ja/introduction/history.md +++ b/docs/ja/introduction/history.md @@ -48,4 +48,4 @@ Yandex.Metricaには、Metrageと呼ばれるデータを集計するための OLAPServerの制限を取り除き、レポートのための非集計データを扱う問題を解決するために、私達は ClickHouse DBMSを開発しました。 -[Original article](https://clickhouse.yandex/docs/en/introduction/history/) +[Original article](https://clickhouse.tech/docs/en/introduction/history/) diff --git a/docs/ja/introduction/performance.md b/docs/ja/introduction/performance.md index d6404853ccd..7750a10c0ec 100644 --- a/docs/ja/introduction/performance.md +++ b/docs/ja/introduction/performance.md @@ -5,9 +5,9 @@ toc_title: "\u30D1\u30D5\u30A9\u30FC\u30DE\u30F3\u30B9" # パフォーマンス {#pahuomansu} -Yandexの内部テスト結果によると、ClickHouseは、テスト可能なクラスのシステム間で同等の動作シナリオで最高のパフォーマンス(長時間のクエリで最も高いスループットと、短時間のクエリで最小のレイテンシの両方)を示します。 [別のページで](https://clickhouse.yandex/benchmark/dbms/)テスト結果を表示できます 。 +Yandexの内部テスト結果によると、ClickHouseは、テスト可能なクラスのシステム間で同等の動作シナリオで最高のパフォーマンス(長時間のクエリで最も高いスループットと、短時間のクエリで最小のレイテンシの両方)を示します。 [別のページで](https://clickhouse.tech/benchmark/dbms/)テスト結果を表示できます 。 -これは、多数の独立したベンチマークでも確認されています。インターネット検索で見つけることは難しくありませんし、 [私達がまとめた関連リンク集](https://clickhouse.yandex/#independent-benchmarks) から見つけることもできます。 +これは、多数の独立したベンチマークでも確認されています。インターネット検索で見つけることは難しくありませんし、 [私達がまとめた関連リンク集](https://clickhouse.tech/#independent-benchmarks) から見つけることもできます。 ## 単一の巨大なクエリのスループット {#dan-yi-noju-da-nakuerinosurupututo} @@ -27,4 +27,4 @@ Yandexの内部テスト結果によると、ClickHouseは、テスト可能な 少なくとも1000行のパケットにデータを挿入することをお勧めします。または、1秒あたり1回のリクエストを超えないでください。タブ区切りのダンプデータをMergeTreeテーブルに挿入する場合、挿入速度は50〜200MB/sになります。挿入された行のサイズが約1Kbの場合、速度は毎秒50,000〜200,000行になります。行が小さい場合、パフォーマンスは1秒あたりの行数で高くなります(Banner System データ- `>` 500,000行/秒、Graphite データ- `>` 1,000,000行/秒)。パフォーマンスを向上させるために、複数のINSERTクエリを並行して作成することで、パフォーマンスを線形に向上できます。 -[Original article](https://clickhouse.yandex/docs/ja/introduction/performance/) +[Original article](https://clickhouse.tech/docs/ja/introduction/performance/) diff --git a/docs/ja/sql-reference/data-types/domains/ipv4.md b/docs/ja/sql-reference/data-types/domains/ipv4.md index e355ae4f70f..c329028ad40 100644 --- a/docs/ja/sql-reference/data-types/domains/ipv4.md +++ b/docs/ja/sql-reference/data-types/domains/ipv4.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` ドメインはIPv4文字列としてカスタム入力形式をサポート: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/ja/sql-reference/data-types/domains/ipv6.md b/docs/ja/sql-reference/data-types/domains/ipv6.md index 73227e7a2b7..26583429ec8 100644 --- a/docs/ja/sql-reference/data-types/domains/ipv6.md +++ b/docs/ja/sql-reference/data-types/domains/ipv6.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` ドメイ: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/ja/sql-reference/functions/array-functions.md b/docs/ja/sql-reference/functions/array-functions.md index 5a70770a54b..bd30262cc1e 100644 --- a/docs/ja/sql-reference/functions/array-functions.md +++ b/docs/ja/sql-reference/functions/array-functions.md @@ -702,13 +702,13 @@ arrayDifference(array) **パラメータ** -- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [配列](https://clickhouse.tech/docs/en/data_types/array/). **戻り値** 隣接する要素間の差分の配列を返します。 -タイプ: [UInt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [フロート\*](https://clickhouse.yandex/docs/en/data_types/float/). +タイプ: [UInt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Int\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [フロート\*](https://clickhouse.tech/docs/en/data_types/float/). **例** @@ -754,7 +754,7 @@ arrayDistinct(array) **パラメータ** -- `array` – [配列](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [配列](https://clickhouse.tech/docs/en/data_types/array/). **戻り値** diff --git a/docs/ja/whats-new/changelog/2017.md b/docs/ja/whats-new/changelog/2017.md index ada7b74e431..9561062f31d 100644 --- a/docs/ja/whats-new/changelog/2017.md +++ b/docs/ja/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ toc_title: '2017' #### 新しい機能: {#new-features} - カスタムパーティショニングキーのMergeTree家族のテーブルエンジンです。 -- [カフカ](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) テーブルエンジン。 +- [カフカ](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) テーブルエンジン。 - ロードのサポートを追加 [CatBoost](https://catboost.yandex/) モデルとClickHouseに格納されたデータにそれらを適用します。 - サポートが追加された時間帯と非整数オフセットからのUTCです。 - 時間間隔での算術演算のサポートが追加されました。 diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/domains/ipv4.md index 2903404774b..68b67bcca60 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/domains/ipv4.md @@ -26,7 +26,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` поддерживает вставку в виде строк с текстовым представлением IPv4 адреса: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/domains/ipv6.md index 045a2ad1960..c88ee74adea 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/domains/ipv6.md @@ -26,7 +26,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` поддерживает вставку в виде строк с текстовым представлением IPv6 адреса: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 71b6bda47d0..7abebc6a059 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -692,7 +692,7 @@ arrayDifference(array) **Параметры** -- `array` – [Массив](https://clickhouse.yandex/docs/ru/data_types/array/). +- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/). **Возвращаемое значение** @@ -742,7 +742,7 @@ arrayDistinct(array) **Параметры** -- `array` – [Массив](https://clickhouse.yandex/docs/ru/data_types/array/). +- `array` – [Массив](https://clickhouse.tech/docs/ru/data_types/array/). **Возвращаемое значение** diff --git a/docs/tools/blog.py b/docs/tools/blog.py new file mode 100644 index 00000000000..f5415bec608 --- /dev/null +++ b/docs/tools/blog.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +import datetime +import logging +import os +import time + +import nav # monkey patches mkdocs + +import mkdocs.commands +from mkdocs import config +from mkdocs import exceptions + +import mdx_clickhouse +import redirects + +import util + + +def build_for_lang(lang, args): + logging.info(f'Building {lang} blog') + + try: + theme_cfg = { + 'name': None, + 'custom_dir': os.path.join(os.path.dirname(__file__), '..', args.theme_dir), + 'language': lang, + 'direction': 'ltr', + 'static_templates': ['404.html'], + 'extra': { + 'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching + } + } + + # the following list of languages is sorted according to + # https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers + languages = { + 'en': 'English', + 'ru': 'Русский' + } + + site_names = { + 'en': 'ClickHouse Blog', + 'ru': 'Блог ClickHouse ' + } + + assert len(site_names) == len(languages) + + site_dir = os.path.join(args.blog_output_dir, lang) + + plugins = ['macros'] + if args.htmlproofer: + plugins.append('htmlproofer') + + website_url = 'https://clickhouse.tech' + site_name = site_names.get(lang, site_names['en']) + blog_nav, post_meta = nav.build_blog_nav(lang, args) + raw_config = dict( + site_name=site_name, + site_url=f'{website_url}/blog/{lang}/', + docs_dir=os.path.join(args.blog_dir, lang), + site_dir=site_dir, + strict=True, + theme=theme_cfg, + nav=blog_nav, + copyright='©2016–2020 Yandex LLC', + use_directory_urls=True, + repo_name='ClickHouse/ClickHouse', + repo_url='https://github.com/ClickHouse/ClickHouse/', + edit_uri=f'edit/master/website/blog/{lang}', + markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS, + plugins=plugins, + extra=dict( + now=datetime.datetime.now().isoformat(), + rev=args.rev, + rev_short=args.rev_short, + rev_url=args.rev_url, + website_url=website_url, + events=args.events, + languages=languages, + includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'), + is_amp=False, + is_blog=True, + post_meta=post_meta + ) + ) + + cfg = config.load_config(**raw_config) + mkdocs.commands.build.build(cfg) + + redirects.build_blog_redirects(args) + + # TODO: AMP for blog + # if not args.skip_amp: + # amp.build_amp(lang, args, cfg) + + logging.info(f'Finished building {lang} blog') + + except exceptions.ConfigurationError as e: + raise SystemExit('\n' + str(e)) + + +def build_blog(args): + tasks = [] + for lang in args.blog_lang.split(','): + if lang: + tasks.append((lang, args,)) + util.run_function_in_parallel(build_for_lang, tasks, threads=False) diff --git a/docs/tools/build.py b/docs/tools/build.py index b7ddbc29629..1c8165fb36f 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -20,8 +20,8 @@ from mkdocs import exceptions import mkdocs.commands.build import amp +import blog import mdx_clickhouse - import redirects import single_page import test @@ -95,25 +95,6 @@ def build_for_lang(lang, args): else: site_dir = os.path.join(args.docs_output_dir, lang) - markdown_extensions = [ - 'mdx_clickhouse', - 'admonition', - 'attr_list', - 'codehilite', - 'nl2br', - 'sane_lists', - 'pymdownx.details', - 'pymdownx.magiclink', - 'pymdownx.superfences', - 'extra', - { - 'toc': { - 'permalink': True, - 'slugify': mdx_clickhouse.slugify - } - } - ] - plugins = ['macros'] if args.htmlproofer: plugins.append('htmlproofer') @@ -133,7 +114,7 @@ def build_for_lang(lang, args): repo_name='ClickHouse/ClickHouse', repo_url='https://github.com/ClickHouse/ClickHouse/', edit_uri=f'edit/master/docs/{lang}', - markdown_extensions=markdown_extensions, + markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS, plugins=plugins, extra=dict( now=datetime.datetime.now().isoformat(), @@ -147,14 +128,15 @@ def build_for_lang(lang, args): events=args.events, languages=languages, includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'), - is_amp=False + is_amp=False, + is_blog=False ) ) if os.path.exists(config_path): raw_config['config_file'] = config_path else: - raw_config['nav'] = nav.build_nav(lang, args) + raw_config['nav'] = nav.build_docs_nav(lang, args) cfg = config.load_config(**raw_config) @@ -187,7 +169,7 @@ def build_docs(args): if lang: tasks.append((lang, args,)) util.run_function_in_parallel(build_for_lang, tasks, threads=False) - redirects.build_redirects(args) + redirects.build_docs_redirects(args) def build(args): @@ -204,6 +186,9 @@ def build(args): from github import build_releases build_releases(args, build_docs) + if not args.skip_blog: + blog.build_blog(args) + if not args.skip_website: website.process_benchmark_results(args) website.minify_website(args) @@ -215,9 +200,11 @@ if __name__ == '__main__': website_dir = os.path.join('..', 'website') arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--lang', default='en,es,fr,ru,zh,ja,tr,fa') + arg_parser.add_argument('--blog-lang', default='en,ru') arg_parser.add_argument('--docs-dir', default='.') arg_parser.add_argument('--theme-dir', default=website_dir) arg_parser.add_argument('--website-dir', default=website_dir) + arg_parser.add_argument('--blog-dir', default=os.path.join(website_dir, 'blog')) arg_parser.add_argument('--output-dir', default='build') arg_parser.add_argument('--enable-stable-releases', action='store_true') arg_parser.add_argument('--stable-releases-limit', type=int, default='3') @@ -230,6 +217,7 @@ if __name__ == '__main__': arg_parser.add_argument('--skip-amp', action='store_true') arg_parser.add_argument('--skip-pdf', action='store_true') arg_parser.add_argument('--skip-website', action='store_true') + arg_parser.add_argument('--skip-blog', action='store_true') arg_parser.add_argument('--skip-git-log', action='store_true') arg_parser.add_argument('--test-only', action='store_true') arg_parser.add_argument('--minify', action='store_true') @@ -249,6 +237,7 @@ if __name__ == '__main__': logging.getLogger('MARKDOWN').setLevel(logging.INFO) args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), 'docs') + args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), 'blog') from github import choose_latest_releases, get_events args.stable_releases = choose_latest_releases(args) if args.enable_stable_releases else [] @@ -259,6 +248,7 @@ if __name__ == '__main__': if args.test_only: args.skip_multi_page = True + args.skip_blog = True args.skip_website = True args.skip_pdf = True args.skip_amp = True diff --git a/docs/tools/mdx_clickhouse.py b/docs/tools/mdx_clickhouse.py index 393658be2d7..5ea93002cd2 100755 --- a/docs/tools/mdx_clickhouse.py +++ b/docs/tools/mdx_clickhouse.py @@ -18,6 +18,30 @@ import amp import website +def slugify(value, separator): + return slugify_impl.slugify(value, separator=separator, word_boundary=True, save_order=True) + + +MARKDOWN_EXTENSIONS = [ + 'mdx_clickhouse', + 'admonition', + 'attr_list', + 'codehilite', + 'nl2br', + 'sane_lists', + 'pymdownx.details', + 'pymdownx.magiclink', + 'pymdownx.superfences', + 'extra', + { + 'toc': { + 'permalink': True, + 'slugify': slugify + } + } +] + + class ClickHouseLinkMixin(object): def handleMatch(self, m, data): @@ -72,10 +96,6 @@ def makeExtension(**kwargs): return ClickHouseMarkdown(**kwargs) -def slugify(value, separator): - return slugify_impl.slugify(value, separator=separator, word_boundary=True, save_order=True) - - def get_translations(dirname, lang): import babel.support return babel.support.Translations.load( diff --git a/docs/tools/nav.py b/docs/tools/nav.py index 3c4fd304bd3..71bd2d8052f 100644 --- a/docs/tools/nav.py +++ b/docs/tools/nav.py @@ -1,4 +1,5 @@ import collections +import datetime import logging import os @@ -19,7 +20,8 @@ def build_nav_entry(root, args): return None, None, None result_items = [] index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md')) - current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title', find_first_header(index_content))) + current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title')) + current_title = current_title or index_meta.get('title', find_first_header(index_content)) for filename in os.listdir(root): path = os.path.join(root, filename) if os.path.isdir(path): @@ -47,7 +49,7 @@ def build_nav_entry(root, args): return index_meta.get('toc_priority', 10000), current_title, result -def build_nav(lang, args): +def build_docs_nav(lang, args): docs_dir = os.path.join(args.docs_dir, lang) _, _, nav = build_nav_entry(docs_dir, args) result = [] @@ -64,10 +66,50 @@ def build_nav(lang, args): key = list(result[0].keys())[0] result[0][key][index_key] = 'index.md' result[0][key].move_to_end(index_key, last=False) - print('result', result) return result +def build_blog_nav(lang, args): + blog_dir = os.path.join(args.blog_dir, lang) + years = sorted(os.listdir(blog_dir), reverse=True) + result_nav = [{'hidden': 'index.md'}] + post_meta = collections.OrderedDict() + for year in years: + year_dir = os.path.join(blog_dir, year) + if not os.path.isdir(year_dir): + continue + result_nav.append({year: collections.OrderedDict()}) + posts = [] + post_meta_items = [] + for post in os.listdir(year_dir): + meta, _ = util.read_md_file(os.path.join(year_dir, post)) + post_date = meta['date'] + post_title = meta['title'] + if datetime.date.fromisoformat(post_date) > datetime.date.today(): + continue + posts.append( + (post_date, post_title, os.path.join(year, post),) + ) + if post_title in post_meta: + raise RuntimeError(f'Duplicate post title: {post_title}') + if not post_date.startswith(f'{year}-'): + raise RuntimeError(f'Post date {post_date} doesn\'t match the folder year {year}: {post_title}') + post_url_part = post.replace('.md', '') + post_meta_items.append((post_date, { + 'date': post_date, + 'title': post_title, + 'image': meta.get('image'), + 'url': f'/blog/{lang}/{year}/{post_url_part}/' + },)) + for _, title, path in sorted(posts, reverse=True): + result_nav[-1][year][title] = path + for _, post_meta_item in sorted(post_meta_items, + reverse=True, + key=lambda item: item[0]): + post_meta[post_meta_item['title']] = post_meta_item + return result_nav, post_meta + + def _custom_get_navigation(files, config): nav_config = config['nav'] or mkdocs.structure.nav.nest_paths(f.src_path for f in files.documentation_pages()) items = mkdocs.structure.nav._data_to_navigation(nav_config, files, config) diff --git a/docs/tools/redirects.py b/docs/tools/redirects.py index fc4d60aaf5a..2f5ebc8a620 100644 --- a/docs/tools/redirects.py +++ b/docs/tools/redirects.py @@ -25,24 +25,34 @@ def write_redirect_html(out_path, to_url): ''') -def build_redirect_html(args, from_path, to_path): - for lang in args.lang.split(','): - out_path = os.path.join( - args.docs_output_dir, lang, - from_path.replace('/index.md', '/index.html').replace('.md', '/index.html') - ) - version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/' - target_path = to_path.replace('/index.md', '/').replace('.md', '/') - to_url = f'/docs{version_prefix}{lang}/{target_path}' - to_url = to_url.strip() - write_redirect_html(out_path, to_url) +def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path): + out_path = os.path.join( + output_dir, lang, + from_path.replace('/index.md', '/index.html').replace('.md', '/index.html') + ) + version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/' + target_path = to_path.replace('/index.md', '/').replace('.md', '/') + to_url = f'/{base_prefix}{version_prefix}{lang}/{target_path}' + to_url = to_url.strip() + write_redirect_html(out_path, to_url) -def build_redirects(args): +def build_docs_redirects(args): with open(os.path.join(args.docs_dir, 'redirects.txt'), 'r') as f: for line in f: - from_path, to_path = line.split(' ', 1) - build_redirect_html(args, from_path, to_path) + for lang in args.lang.split(','): + from_path, to_path = line.split(' ', 1) + build_redirect_html(args, 'docs', lang, args.docs_output_dir, from_path, to_path) + + +def build_blog_redirects(args): + for lang in args.blog_lang.split(','): + redirects_path = os.path.join(args.blog_dir, lang, 'redirects.txt') + if os.path.exists(redirects_path): + with open(redirects_path, 'r') as f: + for line in f: + from_path, to_path = line.split(' ', 1) + build_redirect_html(args, 'blog', lang, args.blog_output_dir, from_path, to_path) def build_static_redirects(args): diff --git a/docs/tools/website.py b/docs/tools/website.py index ed950bd06e3..6d4803158a4 100644 --- a/docs/tools/website.py +++ b/docs/tools/website.py @@ -17,20 +17,56 @@ import jsmin import mdx_clickhouse +def handle_iframe(iframe, soup): + if not iframe.attrs['src'].startswith('https://www.youtube.com/'): + raise RuntimeError('iframes are allowed only for YouTube') + wrapper = soup.new_tag('div') + wrapper.attrs['class'] = ['embed-responsive', 'embed-responsive-16by9'] + iframe.insert_before(wrapper) + iframe.extract() + wrapper.insert(0, iframe) + if 'width' in iframe.attrs: + del iframe.attrs['width'] + if 'height' in iframe.attrs: + del iframe.attrs['height'] + iframe.attrs['allow'] = 'accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture' + iframe.attrs['class'] = 'embed-responsive-item' + iframe.attrs['frameborder'] = '0' + iframe.attrs['allowfullscreen'] = '1' + + def adjust_markdown_html(content): soup = bs4.BeautifulSoup( content, features='html.parser' ) + for a in soup.find_all('a'): a_class = a.attrs.get('class') if a_class and 'headerlink' in a_class: a.string = '\xa0' + + for iframe in soup.find_all('iframe'): + handle_iframe(iframe, soup) + + for img in soup.find_all('img'): + if img.attrs.get('alt') == 'iframe': + img.name = 'iframe' + img.string = '' + handle_iframe(img, soup) + continue + img_class = img.attrs.get('class') + if img_class: + img.attrs['class'] = img_class + ['img-fluid'] + else: + img.attrs['class'] = 'img-fluid' + for details in soup.find_all('details'): for summary in details.find_all('summary'): if summary.parent != details: summary.extract() details.insert(0, summary) + for div in soup.find_all('div'): div_class = div.attrs.get('class') is_admonition = div_class and 'admonition' in div.attrs.get('class') @@ -41,10 +77,12 @@ def adjust_markdown_html(content): a.attrs['class'] = a_class + ['alert-link'] else: a.attrs['class'] = 'alert-link' + for p in div.find_all('p'): p_class = p.attrs.get('class') if is_admonition and p_class and ('admonition-title' in p_class): p.attrs['class'] = p_class + ['alert-heading', 'display-6', 'mb-2'] + if is_admonition: div.attrs['role'] = 'alert' if ('info' in div_class) or ('note' in div_class): @@ -136,6 +174,7 @@ def get_css_in(args): f"'{args.website_dir}/css/bootstrap.css'", f"'{args.website_dir}/css/docsearch.css'", f"'{args.website_dir}/css/base.css'", + f"'{args.website_dir}/css/blog.css'", f"'{args.website_dir}/css/docs.css'", f"'{args.website_dir}/css/highlight.css'" ] diff --git a/docs/tr/introduction/adopters.md b/docs/tr/introduction/adopters.md index 444902e0b96..1da65ebb903 100644 --- a/docs/tr/introduction/adopters.md +++ b/docs/tr/introduction/adopters.md @@ -37,7 +37,7 @@ toc_title: Benimseyenler | Exness | Ticaret | Metrikler, Günlük Kaydı | — | — | [Rusça konuşun, Mayıs 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | | Geniee | Reklam Ağı | Ana ürün | — | — | [Japonca Blog yazısı, Temmuz 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | Video Akışı | Analiz | — | — | [Çince slaytlar, Ekim 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| Idealista | Emlak | Analiz | — | — | [İngilizce Blog yazısı, Nisan 2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| Idealista | Emlak | Analiz | — | — | [İngilizce Blog yazısı, Nisan 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | Infovista | Ağlar | Analiz | — | — | [İngilizce slaytlar, Ekim 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | Innogames | Oyun | Metrikler, Günlük Kaydı | — | — | [Rusça slaytlar, Eylül 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | Integros | Video hizmetleri platformu | Analiz | — | — | [Rusça slaytlar, Mayıs 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/tr/sql-reference/data-types/domains/ipv4.md b/docs/tr/sql-reference/data-types/domains/ipv4.md index 22ca6e7240c..4caf031c0c3 100644 --- a/docs/tr/sql-reference/data-types/domains/ipv4.md +++ b/docs/tr/sql-reference/data-types/domains/ipv4.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; `IPv4` etki alanı IPv4 dizeleri olarak özel giriş biçimini destekler: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/tr/sql-reference/data-types/domains/ipv6.md b/docs/tr/sql-reference/data-types/domains/ipv6.md index 642fe397e52..7f721cc07f6 100644 --- a/docs/tr/sql-reference/data-types/domains/ipv6.md +++ b/docs/tr/sql-reference/data-types/domains/ipv6.md @@ -33,7 +33,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; `IPv6` etki alanı IPv6 dizeleri olarak özel girişi destekler: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/tr/sql-reference/functions/array-functions.md b/docs/tr/sql-reference/functions/array-functions.md index 9ecb255ebbe..9638481db52 100644 --- a/docs/tr/sql-reference/functions/array-functions.md +++ b/docs/tr/sql-reference/functions/array-functions.md @@ -702,13 +702,13 @@ arrayDifference(array) **Parametre** -- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Dizi](https://clickhouse.tech/docs/en/data_types/array/). **Döndürülen değerler** Bitişik öğeler arasındaki farklar dizisini döndürür. -Tür: [Uİnt\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#uint-ranges), [Tamsayı\*](https://clickhouse.yandex/docs/en/data_types/int_uint/#int-ranges), [Yüzdürmek\*](https://clickhouse.yandex/docs/en/data_types/float/). +Tür: [Uİnt\*](https://clickhouse.tech/docs/en/data_types/int_uint/#uint-ranges), [Tamsayı\*](https://clickhouse.tech/docs/en/data_types/int_uint/#int-ranges), [Yüzdürmek\*](https://clickhouse.tech/docs/en/data_types/float/). **Örnek** @@ -754,7 +754,7 @@ arrayDistinct(array) **Parametre** -- `array` – [Dizi](https://clickhouse.yandex/docs/en/data_types/array/). +- `array` – [Dizi](https://clickhouse.tech/docs/en/data_types/array/). **Döndürülen değerler** diff --git a/docs/tr/whats-new/changelog/2017.md b/docs/tr/whats-new/changelog/2017.md index 98643fe449a..1011ebadb84 100644 --- a/docs/tr/whats-new/changelog/2017.md +++ b/docs/tr/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ Bu sürüm önceki sürüm 1.1.54310 için hata düzeltmeleri içerir: #### Yenilik: {#new-features} - Tablo motorları MergeTree ailesi için özel bölümleme anahtarı. -- [Kafka](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) masa motoru. +- [Kafka](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) masa motoru. - Yükleme için destek eklendi [CatBoost](https://catboost.yandex/) modelleri ve ClickHouse saklanan verilere uygulayarak. - UTC olmayan tamsayı uzaklıklar ile saat dilimleri için destek eklendi. - Zaman aralıklarıyla aritmetik işlemler için destek eklendi. diff --git a/docs/zh/introduction/adopters.md b/docs/zh/introduction/adopters.md index 895ec961751..38b9ca690e3 100644 --- a/docs/zh/introduction/adopters.md +++ b/docs/zh/introduction/adopters.md @@ -35,7 +35,7 @@ toc_title: "\u91C7\u7528\u8005" | [Exness](https://www.exness.com) | 交易 | 指标,日志记录 | — | — | [俄语交谈,2019年5月](https://youtu.be/_rpU-TvSfZ8?t=3215) | | [精灵](https://geniee.co.jp) | 广告网络 | 主要产品 | — | — | [日文博客,2017年7月](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | [虎牙](https://www.huya.com/) | 视频流 | 分析 | — | — | [中文幻灯片,2018年10月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | -| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [英文博客文章,四月2019](https://clickhouse.yandex/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | +| [Idealista](https://www.idealista.com) | 房地产 | 分析 | — | — | [英文博客文章,四月2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | [Infovista](https://www.infovista.com/) | 网络 | 分析 | — | — | [英文幻灯片,十月2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | [InnoGames](https://www.innogames.com) | 游戏 | 指标,日志记录 | — | — | [俄文幻灯片,2019年9月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | | [Integros](https://integros.com) | 视频服务平台 | 分析 | — | — | [俄文幻灯片,2019年5月](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | diff --git a/docs/zh/sql-reference/data-types/domains/ipv4.md b/docs/zh/sql-reference/data-types/domains/ipv4.md index 65c066fb487..9ce12025405 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv4.md +++ b/docs/zh/sql-reference/data-types/domains/ipv4.md @@ -24,7 +24,7 @@ CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; 在写入与查询时,`IPv4`类型能够识别可读性更加友好的输入输出格式: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.tech', '183.247.232.58')('https://clickhouse.tech/docs/en/', '116.106.34.242'); SELECT * FROM hits; ``` diff --git a/docs/zh/sql-reference/data-types/domains/ipv6.md b/docs/zh/sql-reference/data-types/domains/ipv6.md index bc0f95932aa..5b1afc2cd39 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv6.md +++ b/docs/zh/sql-reference/data-types/domains/ipv6.md @@ -24,7 +24,7 @@ CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; 在写入与查询时,`IPv6`类型能够识别可读性更加友好的输入输出格式: ``` sql -INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.tech', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.tech/docs/en/', '2a02:e980:1e::1'); SELECT * FROM hits; ``` diff --git a/docs/zh/whats-new/changelog/2017.md b/docs/zh/whats-new/changelog/2017.md index de62730b093..35d839c50c9 100644 --- a/docs/zh/whats-new/changelog/2017.md +++ b/docs/zh/whats-new/changelog/2017.md @@ -26,7 +26,7 @@ toc_title: '2017' #### 新功能: {#new-features} - MergeTree表引擎系列的自定义分区键。 -- [卡夫卡](https://clickhouse.yandex/docs/en/operations/table_engines/kafka/) 表引擎。 +- [卡夫卡](https://clickhouse.tech/docs/en/operations/table_engines/kafka/) 表引擎。 - 增加了对加载的支持 [CatBoost](https://catboost.yandex/) 模型并将其应用到ClickHouse中存储的数据。 - 增加了对UTC非整数偏移的时区的支持。 - 增加了对具有时间间隔的算术运算的支持。 diff --git a/website/blog/README.md b/website/blog/README.md new file mode 100644 index 00000000000..89d7ddfad57 --- /dev/null +++ b/website/blog/README.md @@ -0,0 +1,47 @@ +## Introduction + +First of all, **relevant guest posts are welcome**! Especially with success stories or demonstration of ClickHouse ecosystem projects. + +The ClickHouse blog is published alongside documentation and the rest of official website. So the posts reside in this same repository in [Markdown](https://github.com/ClickHouse/ClickHouse/tree/master/docs#markdown-cheatsheet) format. + +## How To Add a New Post? + +Basically you need to create a new Markdown file at the following location inside repository `/website/blog///.md` and then [open a pull-request](https://github.com/ClickHouse/ClickHouse/compare) with it. + +Each post needs to have a `yaml` meta-header with the following fields: + +- Required: + - `title`, main name of the article. In Title Case for English. + - `date`, publication date in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format, like `YYYY-MM-DD` (can be in future to postpone publication). +- Optional: + - `image`, URL to main post image. + - `tags`, list of post tags. + +Then after header goes post content in a normal markdown (with some optional extensions). + +The recommended place to store images is this GitHub repo: . It's folder structure matches this folder with blog posts: + +- `///main.jpg` for main post image (linked in `image` header field). +- `///whatever.jpg` for other images (`png` or `gif` are acceptable as well, if necessary). + +### Example + ```markdown +--- +title: 'ClickHouse Meetup in Beijing on June 8, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/main.jpg' +date: '2019-06-13' +tags: ['meetup','Beijing','China','events'] +--- + +24th ClickHouse Meetup globally and 3rd one in China took place in Beijing on Dragon Boat Festival weekend, which appeared to... + +![ClickHouse branded Beijing duck](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/9.jpg) +``` + +## How To Add a New Blog Language? + +If you want to write a guest post, you are welcome to use your native language or make multiple posts in multiple languages + + Unlike documentation, blog languages are independent, i.e. they have partially overlapping sets of posts and it's ok. Most posts are written only in one language because they are not relevant to audiences of other languages. + +At the moment it's not so straightforward to set up a new language for blog and it won't be documented for now, but you can just create a language directory with the first post as described above and we'll configure the website infrastructure to include it during/after merging the pull-request. diff --git a/website/blog/en/2016/evolution-of-data-structures-in-yandex-metrica.md b/website/blog/en/2016/evolution-of-data-structures-in-yandex-metrica.md new file mode 100644 index 00000000000..44739a24191 --- /dev/null +++ b/website/blog/en/2016/evolution-of-data-structures-in-yandex-metrica.md @@ -0,0 +1,108 @@ +--- +title: 'Evolution of Data Structures in Yandex.Metrica' +image: 'https://blog-images.clickhouse.tech/en/2016/evolution-of-data-structures-in-yandex-metrica/main.jpg' +date: '2016-12-13' +tags: ['Yandex.Metrica', 'data structures', 'LSM tree', 'columnar storage'] +--- + +[Yandex.Metrica](https://metrica.yandex.com/) takes in a stream of data representing events that took place on sites or on apps. Our task is to keep this data and present it in an analyzable form. The real challenge lies in trying to determine what form the processed results should be saved in so that they are easy to work with. During the development process, we had to completely change our approach to data storage organization several times. We started with MyISAM tables, then used LSM-trees and eventually came up with column-oriented database, ClickHouse. + +At its founding, Metrica was designed as an offshoot of Yandex.Direct, the search ads service. MySQL tables with MyISAM engine were used in Direct to store statistics and it was natural to use same approach in Metrica. Initially Yandex.Metrica for websites had more than 40 “fixed” report types (for example, the visitor geography report), several in-page analytics tools (like click maps), Webvisor (tool to study individual user actions in great detail), as well as the separate report constructor. But with time to keep up with business goals the system had to become more flexible and provide more customization opportunities for customers. Nowadays instead of using fixed reports Metrica allows to freely add new dimensions (for example, in a keyword report you can break data down further by landing page), segment and compare (between, let's say, traffic sources for all visitors vs. visitors from Moscow), change your set of metrics, etc. These features demanded a completely different approach to data storage than what we used with MyISAM, we will further discuss this transition from technical perspective. + +## MyISAM + +Most SELECT queries that fetch data for reports are made with the conditions WHERE CounterID = AND Date BETWEEN min_date AND max_date. Sometimes there is also filter by region, so it made sense to use complex primary key to turn this into primary key range is read. So table schema for Metrica looks like this: CounterID, Date, RegionID -> Visits, SumVisitTime, etc. Now we'll take a look at what happens when it comes in. + +A MyISAM table is comprised of a data file and an index file. If nothing was deleted from the table and the rows did not change in length during updating, the data file will consist of serialized rows arranged in succession in the order that they were added. The index (including the primary key) is a B-tree, where the leaves contain offsets in the data file. When we read index range data, a lot of offsets in the data file are taken from the index. Then reads are issued for this set of offsets in the data file. + +Let's look at the real-life situation when the index is in RAM (key cache in MySQL or system page cache), but the table data is not cached. Let's assume that we are using HDDs. The time it takes to read data depends on the volume of data that needs to be read and how many Seek operations need to be run. The number of Seek's is determined by the locality of data on the disk. + +Data locality illustrated: +![Data locality](https://blog-images.clickhouse.tech/en/2016/evolution-of-data-structures-in-yandex-metrica/1.jpg) + +Metrica events are received in almost the same order in which they actually took place. In this incoming stream, data from different counters is scattered completely at random. In other words, incoming data is local by time, but not local by CounterID. When writing to a MyISAM table, data from different counters is also placed quite randomly. This means that to read the data report, you will need to perform about as many random reads as there are rows that we need in the table. + +A typical 7200 rpm hard disk can perform 100 to 200 random reads per second. A RAID, if used properly, can handle the same amount multiplied by number of disks in it. One five-year-old SSD can perform 30,000 random reads per second, but we cannot afford to keep our data on SSD. So in this case, if we needed to read 10,000 rows for a report, it would take more than 10 seconds, which would be totally unacceptable. + +InnoDB is much better suited to reading primary key ranges since it uses [a clustered primary key](https://en.wikipedia.org/wiki/Database_index#Clustered) (i.e., the data is stored in an orderly manner on the primary key). But InnoDB was impossible to use due to its slow write speed. If this reminds you of [TokuDB](https://www.percona.com/software/mysql-database/percona-tokudb), then read on. + +It took a lot of tricks like periodic table sorting, complicated manual partitioning schemes, and keeping data in generations to keep Yandex.Metrica working on MyISAM. This approach also had a lot of lot of operational drawbacks, for example slow replication, consistency, unreliable recovery, etc. Nevertheless, as of 2011, we stored more than 580 billion rows in MyISAM tables. + +## Metrage and OLAPServer + +Metrage is an implementation of [LSM Tree](https://en.wikipedia.org/wiki/Log-structured_merge-tree), a fairly common data structure that works well for workloads with intensive stream of writes and mostly primary key reads, like Yandex.Metrica has. LevelDB did not exist in 2010 and TokuDB was proprietary at the time. + +![LSM Tree](https://blog-images.clickhouse.tech/en/2016/evolution-of-data-structures-in-yandex-metrica/2.jpg) + +In Metrage arbitrary data structures (fixed at compile time) can be used as “rows” in it. Every row is a key, value pair. A key is a structure with comparison operations for equality and inequality. The value is an arbitrary structure with operations to update (to add something) and merge (to aggregate or combine with another value). In short, it's a CRDT. Data is located pretty locally on the hard disk, so the primary key range reads are quick. Blocks of data are effectively compressed even with fast algorithms because of ordering (in 2010 we used QuickLZ, since 2011 - LZ4). Storing data in a systematic manner enables us to use a sparse index. + +Since reading is not performed very often (even though lot of rows are read when it does) the increase in latency due to having many chunks and decompressing the data block does not matter. Reading extra rows because of the index sparsity also does not make a difference. + +After transferring reports from MyISAM to Metrage, we immediately saw an increase in Metrica interface speed. Whereas earlier the 90% of page-title reports loaded in 26 seconds, with Metrage they loaded in 0.8 seconds (total time, including time to process all database queries and follow-up data transformations). The time it takes Metrage itself to process queries (for all reports) is as follows according to percent: average = 6 ms, 90tile = 31 ms, 99tile = 334 ms. + +We've been using Metrage for five years and it has proved to be a reliable solution. As of 2015 we stored 3.37 trillion rows in Metrage and used 39 * 2 servers for this. + +Its advantages were simplicity and effectiveness, which made it a far better choice for storing data than MyISAM. Though the system still had one huge drawback: it really only works effectively with fixed reports. Metrage aggregates data and saves aggregated data. But in order to do this, you have to list all the ways in which you want to aggregate data ahead of time. So if we do this in 40 different ways, it means that Metrica will contain 40 types of reports and no more. + +To mitigate this we had to keep for a while a separate storage for custom report wizard, called OLAPServer. It is a simple and very limited implementation of a column-oriented database. It supports only one table set in compile time — a session table. Unlike Metrage, data is not updated in real-time, but rather a few times per day. The only data type supported is fixed-length numbers of 1-8 bytes, so it wasn“t suitable for reports with other kinds of data, for example URLs. + +## ClickHouse + +Using OLAPServer, we developed an understanding of how well column-oriented DBMS's handle ad-hoc analytics tasks with non-aggregated data. If you can retrieve any report from non-aggregated data, then it begs the question of whether data even needs to be aggregated in advance, as we did with Metrage. + +![](https://blog-images.clickhouse.tech/en/2016/evolution-of-data-structures-in-yandex-metrica/3.gif) + +On the one hand, pre-aggregating data can reduce the volume of data that is used at the moment when the report page is loading. On the other hand, though, aggregated data doesn't solve everything. Here are the reasons why: + +- you need to have a list of reports that your users need ahead of time; in other words, the user can't put together a custom report +- when aggregating a lot of keys, the amount of data is not reduced and aggregation is useless; when there are a lot of reports, there are too many aggregation options (combinatorial explosion) +- when aggregating high cardinality keys (for example, URLs) the amount of data does not decrease by much (by less than half) +due to this, the amount of data may not be reduced, but actually grow during aggregation +- users won't view all the reports that we calculate for them (in other words, a lot of the calculations prove useless) +- it's difficult to maintain logical consistency when storing a large number of different aggregations + +As you can see, if nothing is aggregated and we work with non-aggregated data, then it's possible that the volume of computations will even be reduced. But only working with non-aggregated data imposes very high demands on the effectiveness of the system that executes the queries. + +So if we aggregate the data in advance, then we should do it constantly (in real time), but asynchronously with respect to user queries. We should really just aggregate the data in real time; a large portion of the report being received should consist of prepared data. + +If data is not aggregated in advance, all the work has to be done at the moment the user request it (i.e. while they wait for the report page to load). This means that many billions of rows need to be processed in response to the user's query; the quicker this can be done, the better. + +For this you need a good column-oriented DBMS. The market didn‘t have any column-oriented DBMS's that would handle internet-analytics tasks on the scale of Runet (the Russian internet) well enough and would not be prohibitively expensive to license. + +Recently, as an alternative to commercial column-oriented DBMS's, solutions for efficient ad-hoc analytics of data in distributed computing systems began appearing: Cloudera Impala, Spark SQL, Presto, and Apache Drill. Although such systems can work effectively with queries for internal analytical tasks, it is difficult to imagine them as the backend for the web interface of an analytical system accessible to external users. + +At Yandex, we developed and later opensourced our own column-oriented DBMS — ClickHouse. Let's review the basic requirements that we had in mind before we proceeded to development. + +**Ability to work with large datasets.** In current Yandex.Metrica for websites, ClickHouse is used to store all data for reports. As of November, 2016, the database is comprised of 18.3 trillion rows. It‘s made up of non-aggregated data that is used to retrieve reports in real-time. Every row in the largest table contains over 200 columns. + +**The system should scale linearly.** ClickHouse allows you to increase the size of cluster by adding new servers as needed. For example, Yandex.Metrica's main cluster has increased from 60 to 426 servers in three years. In the aim of fault tolerance, our servers are spread across different data centers. ClickHouse can use all hardware resources to process a single query. This way more than 2 terabyte can be processed per second. + +**High efficiency.** We especially pride ourselves on our database's high performance. Based on the results of internal tests, ClickHouse processes queries faster than any other system we could acquire. For example, ClickHouse works an average of 2.8-3.4 times faster than Vertica. With ClickHouse there is no one silver bullet that makes the system work so quickly. + +**Functionality should be sufficient for Web analytics tools.** The database supports the SQL language dialect, subqueries and JOINs (local and distributed). There are numerous SQL extensions: functions for web analytics, arrays and nested data structures, higher-order functions, aggregate functions for approximate calculations using sketching, etc. By working with ClickHouse, you get the convenience of a relational DBMS. + +ClickHouse was initially developed by the Yandex.Metrica team. Furthermore, we were able to make the system flexible and extensible enough that it can be successfully used for different tasks. Although the database can run on large clusters, it can be installed on one server or even on a virtual machine. There are now more than a dozen different ClickHouse applications within our company. + +ClickHouse is well equipped for creating all kinds of analytical tools. Just consider: if the system can handle the challenges of Yandex.Metrica, you can be sure that ClickHouse will cope with other tasks with a lot of performance headroom to spare. + +ClickHouse works well as a time series database; at Yandex it is commonly used as the backend for Graphite instead of Ceres/Whisper. This lets us work with more than a trillion metrics on a single server. + +ClickHouse is used by analytics for internal tasks. Based on our experience at Yandex, ClickHouse performs at about three orders of magnitude higher than traditional methods of data processing (scripts on MapReduce). But this is not a simple quantitative difference. The fact of the matter is that by having such a high calculation speed, you can afford to employ radically different methods of problem solving. + +If an analyst has to make a report and they are competent at their job, they won't just go ahead and construct one report. Rather, they will start by retrieving dozens of other reports to better understand the nature of the data and test various hypotheses. It is often useful to look at data from different angles in order to posit and check new hypotheses, even if you don't have a clear goal. + +This is only possible if the data analysis speed allows you to conduct online research. The faster queries are executed, the more hypotheses you can test. Working with ClickHouse, one even gets the sense that they are able to think faster. + +In traditional systems, data is like a dead weight, figuratively speaking. You can manipulate it, but it takes a lot of time and is inconvenient. If your data is in ClickHouse though, it is much more malleable: you can study it in different cross-sections and drill down to the individual rows of data. + +## Conclusions + +Yandex.Metrica has become the second largest web-analytics system in the world. The volume of data that Metrica takes in grew from 200 million events a day in 2009 to more than 25 billion in 2016. In order to provide users with a wide variety of options while still keeping up with the increasing workload, we've had to constantly modify our approach to data storage. + +Effective hardware utilization is very important to us. In our experience, when you have a large volume of data, it's better not to worry as much about how well the system scales and instead focus on how effectively each unit of resource is used: each processor core, disk and SSD, RAM, and network. After all, if your system is already using hundreds of servers, and you have to work ten times more efficiently, it is unlikely that you can just proceed to install thousands of servers, no matter how scalable your system is. + +To maximize efficiency, it's important to customize your solution to meet the needs of specific type of workload. There is no data structure that copes well with completely different scenarios. For example, it's clear that key-value databases don't work for analytical queries. The greater the load on the system, the narrower the specialization required. One should not be afraid to use completely different data structures for different tasks. + +We were able to set things up so that Yandex.Metrica's hardware was relatively inexpensive. This has allowed us to offer the service free of charge to even very large sites and mobile apps, even larger than Yanex‘s own, while competitors typically start asking for a paid subscription plan. + + diff --git a/website/blog/en/2016/how-to-update-data-in-clickhouse.md b/website/blog/en/2016/how-to-update-data-in-clickhouse.md new file mode 100644 index 00000000000..22c2fa3ccc1 --- /dev/null +++ b/website/blog/en/2016/how-to-update-data-in-clickhouse.md @@ -0,0 +1,169 @@ +--- +title: 'How to Update Data in ClickHouse' +date: '2016-11-20' +image: 'https://blog-images.clickhouse.tech/en/2016/how-to-update-data-in-clickhouse/main.jpg' +tags: ['features', 'update', 'delete', 'CollapsingMergeTree', 'partitions'] +--- + +There is no UPDATE or DELETE commands in ClickHouse at the moment. And that's not because we have some religious believes. ClickHouse is performance-oriented system; and data modifications are hard to store and process optimally in terms of performance. + +But sometimes we have to modify data. And sometimes data should be updated in realtime. Don't worry, we have these cases covered. + +## Work with Partitions + +Data in MergeTree engine family is partitioned by partition_key engine parameter. MergeTree split all the data by this partition key. Partition size is one month. + +That's very useful in many terms. Especially when we're talking about data modification. + +## Yandex.Metrica "hits" Table + +Let's look at an example on Yandex.Metrica server mtlog02-01-1 which store some Yandex.Metrica data for year 2013. Table we are looking at contains user events we call “hits”. This is the engine description for hits table: + +``` text +ENGINE = ReplicatedMergeTree( + '/clickhouse/tables/{layer}-{shard}/hits', -- zookeeper path + '{replica}', -- settings in config describing replicas + EventDate, -- partition key column + intHash32(UserID), -- sampling key + (CounterID, EventDate, intHash32(UserID), WatchID), -- index + 8192 -- index granularity +) +``` + +You can see that the partition key column is EventDate. That means that all the data will be splitted by months using this column. + +With this SQL we can get partitions list and some stats about current partitions: + +```sql +SELECT + partition, + count() as number_of_parts, + formatReadableSize(sum(bytes)) as sum_size +FROM system.parts +WHERE + active + AND database = 'merge' + AND table = 'hits' +GROUP BY partition +ORDER BY partition; +``` +```text +┌─partition─┬─number_of_parts─┬─sum_size───┐ +│ 201306 │ 1 │ 191.34 GiB │ +│ 201307 │ 4 │ 537.86 GiB │ +│ 201308 │ 6 │ 608.77 GiB │ +│ 201309 │ 5 │ 658.68 GiB │ +│ 201310 │ 5 │ 768.74 GiB │ +│ 201311 │ 5 │ 654.61 GiB │ +└───────────┴─────────────────┴────────────┘ +``` +There are 6 partitions with a few parts in each of them. Each partition is around 600 Gb of data. Partition is strictly one piece of data for partition key, here we can see that it is months. Part is one piece of data inside partition. Basically it's one node of LSMT structure, so there are not so many of them, especially for old data. If there are too many of them, they merge and form bigger ones. + +## Partition Operations + +There is a nice set of operations to work with partitions: + +- `DETACH PARTITION` - Move a partition to the 'detached' directory and forget it. +- `DROP PARTITION` - Delete a partition. +- `ATTACH PART|PARTITION` -- Add a new part or partition from the 'detached' directory to the table. +- `FREEZE PARTITION` - Create a backup of a partition. +- `FETCH PARTITION` - Download a partition from another server. + +We can do any data management operations on partitions level: move, copy and delete. Also, special DETACH and ATTACH operations are created to simplify data manipulation. DETACH detaches partition from table, moving all data to detached directory. Data is still there and you can copy it anywhere but detached data is not visible on request level. ATTACH is the opposite: attaches data from detached directory so it become visible. + +This attach-detach commands works almost in no time so you can make your updates almost transparently to database clients. + +Here is the plan how to update data using partitions: + +- Create modified partition with updated data on another table +- Copy data for this partition to detached directory +- `DROP PARTITION` in main table +- `ATTACH PARTITION` in main table + +Partition swap especially useful for huge data updates with low frequency. But they're not so handy when you need to update a lot of data in real time. + +## Update Data on the Fly + +In Yandex.Metrica we have user sessions table. Each row is one session on a website: some pages checked, some time spent, some banners clicked. This data is updated every second: user on a website view more pages, click more buttons, and do other things. Site owner can see that actions in Yandex.Metrica interface in real time. + +So how do we do that? + +We update data not by updating that data, but adding more data about what have changed. This is usually called CRDT approach, and there is an article on Wikipedia about that. + +It was created to solve conflict problem in transactions but this concept also allows updating data. We use our own data model with this approach. We call it Incremental Log. + +## Incremental Log + +Let's look at an example. + +Here we have one session information with user identifier UserID, number of page viewed PageViews, time spent on site in seconds Duration. There is also Sign field, we describe it later. +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` +And let's say we calculate some metrics over this data. + +- `count()`- number of sessions +- `sum(PageViews)`- total number of pages all users checked +- `avg(Duration)` - average session duration, how long user usually spent on the website + +Let's say now we have update on that: user checked one more page, so we should change PageViews from 5 to 6 and Duration from 146 to 185. + +We insert two more rows: +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +First one is delete row. It's exactly the same row what we already have there but with Sign set to -1. Second one is updated row with all data set to new values. + +After that we have three rows of data: +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ 1 │ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +The most important part is modified metrics calculation. We should update our queries like this: + +``` text + -- number of sessions +count() -> sum(Sign) + -- total number of pages all users checked +sum(PageViews) -> sum(Sign * PageViews) + -- average session duration, how long user usually spent on the website +avg(Duration) -> sum(Sign * Duration) / sum(Sign) +``` + +You can see that it works as expected over this data. Deleted row 'hide' old row, same values come with + and - signs inside aggregation and annihilate each other. + +Moreover, it works totally fine with changing keys for grouping. If we want to group data by PageViews, all data for PageView = 5 will be 'hidden' for this rows. + +There are some limitations with this approach: + +- It works only for metrics which can be presented through this Sign operations. It covers most cases, but it's not possible to calculate min or max values. There is an impact to uniq calculations also. But it's fine at least for Yandex.Metrica cases, and there are a lot of different analytical calculations; +- You need to remember somehow old value in external system doing updates, so you can insert this 'delete' rows; +- Some other effects; there is a [great answer](https://groups.google.com/forum/#!msg/clickhouse/VixyOUD-K68/Km8EpkCyAQAJ) on Google Groups. + +## CollapsingMergeTree + +ClickHouse has support of Incremental Log model in Collapsing engines family. + +If you use Collapsing family, 'delete' row and old 'deleted' rows will collapse during merge process. Merge is a background process of merging data into larger chunks. Here is a great article about merges and LSMT structures. + +For most cases 'delete' and 'deleted' rows will be removed in terms of days. What's important here is that you will not have any significant overhead on data size. Using Sign field on selects still required. + +Also there is FINAL modifier available over Collapsing family. Using FINAL guarantees that user will see already collapsing data, thus using Sign field isn't required. FINAL usually make tremendous performance degradation because ClickHouse have to group data by key and delete rows during SELECT execution. But it's useful when you want to check your queries or if you want to see raw, unaggregated data in their final form. + +## Future Plans + +We know that current feature set is not enough. There are some cases which do not fit to limitations. But we have huge plans, and here are some insights what we've preparing: + +- Partitions by custom key: current partitioning scheme is binded to months only. We will remove this limitation and it will be possible to create partitions by any key. All partition operations like FETCH PARTITION will be available. +- UPDATE and DELETE: there are a lot of issues with updates and deletes support. Performance degradation, consistency guarantees, distributed queries and more. But we believe that if you need to update few rows of data in your dataset, it should not be painful. It will be done. + diff --git a/website/blog/en/2016/yandex-opensources-clickhouse.md b/website/blog/en/2016/yandex-opensources-clickhouse.md new file mode 100644 index 00000000000..feffec65d79 --- /dev/null +++ b/website/blog/en/2016/yandex-opensources-clickhouse.md @@ -0,0 +1,12 @@ +--- +title: 'Yandex Opensources ClickHouse' +image: 'https://blog-images.clickhouse.tech/en/2016/yandex-opensources-clickhouse/main.jpg' +date: '2016-06-15' +tags: ['announcement', 'GitHub', 'license'] +--- + +Today [analytical DBMS ClickHouse](https://clickhouse.tech/) initially developed internally at Yandex, became available to everyone. Source code is published on [GitHub](https://github.com/ClickHouse/ClickHouse) under Apache 2.0 license. + +ClickHouse allows interactive analytical query execution on data updated in real time. System is able to scale to tens of trillions of rows and petabytes of stored data. Using ClickHouse opens up opportunities that were hard to imagine: you can store full stream of data and slice and dice it to produce reports without offline aggregation. ClickHouse was initially developed as a backend for [Yandex.Metrica](https://metrika.yandex.com/) — second largest web analytics system in the world. + +[Discussion on Hacker News](https://news.ycombinator.com/item?id=11908254). diff --git a/website/blog/en/2017/clickhouse-at-data-scale-2017.md b/website/blog/en/2017/clickhouse-at-data-scale-2017.md new file mode 100644 index 00000000000..f05cc9e1e89 --- /dev/null +++ b/website/blog/en/2017/clickhouse-at-data-scale-2017.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse at Data@Scale 2017' +image: 'https://blog-images.clickhouse.tech/en/2017/clickhouse-at-data-scale-2017/main.jpg' +date: '2017-06-15' +tags: ['conference', 'Seattle', 'USA', 'America', 'events'] +--- + +![iframe](https://www.youtube.com/embed/bSyQahMVZ7w) + +[Slides](https://presentations.clickhouse.tech/data_at_scale/) diff --git a/website/blog/en/2017/clickhouse-at-percona-live-2017.md b/website/blog/en/2017/clickhouse-at-percona-live-2017.md new file mode 100644 index 00000000000..989dfde932d --- /dev/null +++ b/website/blog/en/2017/clickhouse-at-percona-live-2017.md @@ -0,0 +1,22 @@ +--- +title: 'ClickHouse at Percona Live 2017' +image: 'https://blog-images.clickhouse.tech/en/2017/clickhouse-at-percona-live-2017/main.jpg' +date: '2017-04-28' +tags: ['meetup', 'Santa Clara', 'Bay Area', 'California', 'USA', 'America', 'events', 'Graphouse'] +--- + +For those who haven't heard, [Percona Live](https://percona.com/live/17) is probably one of the largest international conferences about opensource database management systems, having 12 talk tracks in parallel. It's been around for many years and initially, it was focused mainly on MySQL (and had that in its name), but nowadays it is more generic and other products of this category get lots of attention too. Needless to say that for a relatively new player on the market like [ClickHouse](https://clickhouse.tech/), it's been a great opportunity to spread the word about the technology and how exactly it allows us to perform analytics on petabytes of data in real-time. + +![Percona Live](https://blog-images.clickhouse.tech/en/2017/clickhouse-at-percona-live-2017/1.jpg) + +Yandex team members had three chances to talk about ClickHouse from the stage: + +1. A large portion of [Opening Keynote](https://www.percona.com/blog/2017/04/25/percona-live-2017-day-one-keynotes/) has been dedicated to different time-series databases. ClickHouse is not really a specialized time-series database but still outperforms many alternatives if used as such. So Dmitry Andreev, Head of Yandex.Market Infrastructure Development Group, had a short talk about how ClickHouse can be used a as storage backend for Graphite using [Graphouse](https://github.com/clickhouse/graphouse), an open-source adapter that implements this. This setup is used in Yandex.Market and number of other Yandex services and have proven to be very reliable and effective. Chain of short talks has been followed by a live panel about time series in general with the same speakers including Dmitry. Unfortunately, as we figured out later, many keynote attendees perceived ClickHouse as just yet another time-series database and missed the explicitly said part that it opens up way more opportunities to analyze data. +2. Victor Tarnavsky, Head of Yandex.Metrica, and Alexey Milovidov, Head of ClickHouse Development Group, gave a full-length talk about ClickHouse overview, capabilities, features and use cases. Their video has not been recorded, but you can check out [the slides](https://presentations.clickhouse.tech/percona2017/ClickHouse%20Percona%20Santa%20Clara%202.0.pdf). +3. Later on, Dmitry Andreev went deeper on the same topic he covered on an opening keynote. He spoke in more detail about how Graphouse works, shown the benchmark results and future plans of the project. Also, [only slides](https://www.percona.com/live/17/sites/default/files/slides/clickhouse-as-timeseries-database.pdf) are available. + +![Keynote](https://blog-images.clickhouse.tech/en/2017/clickhouse-at-percona-live-2017/2.gif) + +Besides, ClickHouse has been represented in the exhibition accompanying the conference. Altinity, the private company independent from Yandex that provides consulting and support services for ClickHouse, organized the booth and invited Yandex team members to join them to talk about ClickHouse with conference attendees which appeared to be quite productive. + +![ClickHouse Booth](https://blog-images.clickhouse.tech/en/2017/clickhouse-at-percona-live-2017/3.jpg) diff --git a/website/blog/en/2017/clickhouse-meetup-in-berlin-october-5-2017.md b/website/blog/en/2017/clickhouse-meetup-in-berlin-october-5-2017.md new file mode 100644 index 00000000000..bd15350ba34 --- /dev/null +++ b/website/blog/en/2017/clickhouse-meetup-in-berlin-october-5-2017.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse Meetup in Berlin, October 5, 2017' +image: 'https://blog-images.clickhouse.tech/en/2017/clickhouse-meetup-in-berlin-october-5-2017/main.jpg' +date: '2017-10-19' +tags: ['meetup', 'Berlin', 'Germany', 'events'] +--- + +![iframe](https://www.youtube.com/embed/videoseries?list=PL0Z2YDlm0b3hO_3kCUFZLdcIQuI3gghZ8) + +All presentations are available for download at [the event page](https://events.yandex.com/events/meetings/05-10-2017/). diff --git a/website/blog/en/2017/clickhouse-meetup-in-santa-clara-may-4-2017.md b/website/blog/en/2017/clickhouse-meetup-in-santa-clara-may-4-2017.md new file mode 100644 index 00000000000..5974a292853 --- /dev/null +++ b/website/blog/en/2017/clickhouse-meetup-in-santa-clara-may-4-2017.md @@ -0,0 +1,8 @@ +--- +title: 'ClickHouse Meetup in Santa Clara on May 4, 2017' +image: 'https://blog-images.clickhouse.tech/en/2017/clickhouse-meetup-in-santa-clara-may-4-2017/main.jpg' +date: '2017-05-11' +tags: ['meetup', 'Santa Clara', 'Bay Area', 'California', 'USA', 'America', 'events'] +--- + +After [Percona Live 2017](clickhouse-at-percona-live-2017.md), Yandex ClickHouse team stayed for one more week in San Francisco Bay Area to meet with local companies in person to talk about ClickHouse and how it can be applied to their tasks. On the last evening we even managed to organize our own meetup with active ClickHouse users in the area, not as large as we regularly host in Russia, but still had some very interesting discussions. diff --git a/website/blog/en/2017/join-the-clickhouse-meetup-in-berlin.md b/website/blog/en/2017/join-the-clickhouse-meetup-in-berlin.md new file mode 100644 index 00000000000..5521127edba --- /dev/null +++ b/website/blog/en/2017/join-the-clickhouse-meetup-in-berlin.md @@ -0,0 +1,13 @@ +--- +title: 'Join the ClickHouse Meetup in Berlin' +image: 'https://blog-images.clickhouse.tech/en/2017/join-the-clickhouse-meetup-in-berlin/main.jpg' +date: '2017-10-19' +tags: ['announcement', 'meetup', 'Berlin', 'Germany', 'events'] +--- + +Come learn about ClickHouse, our open source high-performance column-oriented database management system at a meetup on October 5, 2017 at the Park Inn at Alexanderplatz 7 in Berlin. + +ClickHouse can generate custom data reports in real time and process billions of rows and dozens of gigabytes of data per single server per second. It works up to a thousand times faster than traditional approaches. ClickHouse is linearly scalable, hardware-efficient, fault-tolerant, and can be deployed across multiple data centers. Among other features, ClickHouse offers a user-friendly SQL query dialect with a number of built-in analytics capabilities. + +Join us at the meetup to learn why hundreds of companies across Europe, US, and China are adopting ClickHouse. Through interactive talks, attendees will learn about product features, how ClickHouse can benefit them, and how to use this system in practice. +Attending the ClickHouse meetup is free. [Please register to join us](https://events.yandex.com/events/meetings/05-10-2017/). diff --git a/website/blog/en/2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15.md b/website/blog/en/2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15.md new file mode 100644 index 00000000000..c3534efee55 --- /dev/null +++ b/website/blog/en/2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15.md @@ -0,0 +1,8 @@ +--- +title: 'Announcing ClickHouse Meetup in Amsterdam on November 15' +image: 'https://blog-images.clickhouse.tech/en/2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15/main.jpg' +date: '2018-10-17' +tags: ['meetup', 'Amsterdam', 'Netherlands', 'events', 'announcement'] +--- + +Yet another meetup of ClickHouse community is planned in Europe, see detailed agenda and register on [the event page](https://events.yandex.com/events/meetings/15-11-2018/). diff --git a/website/blog/en/2018/clickhouse-at-analysys-a10-2018.md b/website/blog/en/2018/clickhouse-at-analysys-a10-2018.md new file mode 100644 index 00000000000..d3700b40e42 --- /dev/null +++ b/website/blog/en/2018/clickhouse-at-analysys-a10-2018.md @@ -0,0 +1,27 @@ +--- +title: 'ClickHouse at Analysys A10 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-at-analysys-a10-2018/main.jpg' +date: '2018-11-04' +tags: ['conference', 'Beijing', 'China', 'events', 'Analysys', 'Asia'] +--- + +[Analysys A10](https://event.analysys.cn/pc/2018/index.html) is a large conference on Big Data that took place on October 26-27 in Beijing. Since China's population is huge, it generates a lot of data and Big Data industry is in very high demand. Yandex ClickHouse team has been honored to participate in this event alongside top management, analysts, and IT professionals from various Chinese companies. + +Each year Analysys also organizes the OLAP contest. The second year in a row the same team of Sundy Li (李本旺) and Winter Zhang (张健) wins it by using ClickHouse as the core of their solution. The task was to calculate complex marketing funnel as fast as possible. + +Sundy Li (李本旺) receives award for winning Analysys OLAP contest 2018 from William Kwok (郭炜): +![Sundy Li and William Kwok](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-analysys-a10-2018/1.jpg) + +The first day of the conference we mostly spent talking with people on ClickHouse booth, while on the second day there were two technical talks about ClickHouse. + +Alexey Milovidov demonstrates ClickHouse and how it works internally: +![Alexey Milovidov](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-analysys-a10-2018/2.jpg) + +Sundy Li (李本旺) explains the audience how they won the OLAP contest using ClickHouse: +![Sundy Li](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-analysys-a10-2018/3.jpg) + +The next day after A10 was a dedicated ClickHouse Community Meetup in Beijing, but it deserves a separate recap post. + +Analysys A10 afterparty: +![Analysys A10 afterparty](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-analysys-a10-2018/4.jpg) + diff --git a/website/blog/en/2018/clickhouse-at-percona-live-europe-2018.md b/website/blog/en/2018/clickhouse-at-percona-live-europe-2018.md new file mode 100644 index 00000000000..1e0b7e4c99d --- /dev/null +++ b/website/blog/en/2018/clickhouse-at-percona-live-europe-2018.md @@ -0,0 +1,25 @@ +--- +title: 'ClickHouse at Percona Live Europe 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/main.jpg' +date: '2018-11-21' +tags: ['conference', 'Frankfurt', 'Germany', 'events', 'Percona Live', 'Europe'] +--- + +Open-source database management systems conference [Percona Live](https://www.percona.com/live/e18/) this time took place on November 5-7 in Germany, Frankfurt am Main. Over last couple years ClickHouse became a solid member of this community as demand in analytics with subsecond latencies appeared to be pretty high. + +There were three talks about ClickHouse in agenda, while only one of them was from Yandex. Also we had a lot of interesting conversations with conference attendees at ClickHouse booth sponsored by Altinity. + +Alexander Zaitsev, CTO and co-founder of Altinity, gives an overview of ClickHouse and then demonstrates case studies and best practices ([slides](https://presentations.clickhouse.tech/percona_europe_2018/Altinity.pdf)): +![](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/1.jpg) + +Fast! Flexible! Free! Fun! +![Fast! Flexible! Free! Fun!](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/2.jpg) + +Aleksey Milovidov, lead ClickHouse developer from Yandex, talks about unusual and unique ClickHouse features ([slides](https://presentations.clickhouse.tech/percona_europe_2018)): +![Aleksey Milovidov](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/3.jpg) + +Aleksandar Aleksandrov and Felix Mattrat, data engineers from MessageBird, show how they use ClickHouse to analyze process of delivery of SMS and other kinds of messages ([slides](http://presentations.clickhouse.tech/percona_europe_2018/MessageBird.pdf)): +![Aleksandar Aleksandrov and Felix Mattrat](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/4.jpg) + +Live demo at ClickHouse booth by Alexey Milovidov: +![Demo at ClickHouse booth by Alexey Milovidov](https://blog-images.clickhouse.tech/en/2018/clickhouse-at-percona-live-europe-2018/5.jpg) diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md new file mode 100644 index 00000000000..cd7cb155c37 --- /dev/null +++ b/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md @@ -0,0 +1,68 @@ +--- +title: 'ClickHouse Community Meetup in Beijing on January 27, 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/main.jpg' +date: '2018-02-08' +tags: ['meetup', 'Beijing', 'China', 'events', 'Asia'] +--- + +Last year there has been an OLAP algorithm contest in China organized by Analysys. The team who have shown the top results and won the competition has been using ClickHouse as the core of their solution. Other teams were mostly using different technologies and didn't really know much about ClickHouse at a time. When the final results were published, many people in China who participated in or were aware of this competition became really eager to learn more about ClickHouse. This spike of interest about ClickHouse in China has eventually lead to the first Chinese ClickHouse Community Meetup that has taken place in Beijing. + +Welcome word by William Kwok, CTO of Analysys, who personally played a huge role in making this event possible: +![William Kwok, CTO of Analysys](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/1.jpg) + +It was probably the most intense ClickHouse Meetup compared to all previous ones worldwide. The main part of the event took over 6 hours non-stop and there were also either pre-meetup and after-party on the same day. Well over 150 people have shown up on Saturday to participate. + +Audience listening for ClickHouse introduction by Alexey Milovidov: +![ClickHouse introduction by Alexey Milovidov](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/2.jpg) + +Alexey Milovidov has started the main meetup session with an introductory talk about ClickHouse, it's usage inside Yandex and history that lead to becoming an open-source analytical DBMS ([slides](https://presentations.clickhouse.tech/meetup12/introduction/)). + +Alexander Zaitsev's practical talk about migrating to ClickHouse: +![Alexander Zaitsev](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/3.jpg) + +Alexander Zaitsev has shared his vast experience in migrating to ClickHouse. LifeStreet, advertisement company where he works, was one of the first companies outside of Yandex which switched to ClickHouse from other analytical DBMS in production. Later on, Alexander also co-founded Altinity, a company that specializes in helping others to migrate to ClickHouse and then effectively use it to achieve their business goals. The talk has covered many specific topics that are important for those who are in the middle of such migration or just considering it ([Slides](https://presentations.clickhouse.tech/meetup12/migration.pptx)). + +Alexey Zatelepin explaining how ClickHouse sparse index works and other implementation details: +![Alexey Zatelepin](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/4.jpg) + +Alexey Zatelepin's technical talk was focused on providing engineers some insights on why ClickHouse is that fast in OLAP workloads and how to leverage its design and core features as a primary index, replication, and distributed tables to achieve great performance and reliability ([slides](https://presentations.clickhouse.tech/meetup12/internals.pdf)). + +Jack Gao gives an extensive overview of ClickHouse and it's use cases in Chinese: +![Jack Gao](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/5.jpg) + +As we have learned during meet up and the rest of our business trip, actually there are many companies in China that are already using or seriously evaluating ClickHouse to use either part of their products or for internal analytics. Three of them are doing this long and extensively enough to give a full talk about their progress and experience. + +In China, in general, and especially in Beijing the knowledge of English is not really common. Chinese people working in the IT industry have to know English well enough to read documentation, but it does not really imply that they can talk or understand verbal English well. So the talks by representatives of local companies were in Chinese. + +Jack Gao, ex-DBA and now an analyst at Sina (major social network) have dedicated a significant part of his talk to go over fundamental topics essential to most ClickHouse users. It partially overlapped with previous talks, but this time in Chinese. Also, he covered not only use case of ClickHouse in Sina but also other publicly known cases by other companies. Considering the reaction of the audience, it has been the most useful talk of the whole meetup, because of the widely useful content, lack of language barrier, and excellent execution of presentation. We even had to sacrifice initially scheduled a short break to give Jack some additional time ([slides](https://presentations.clickhouse.tech/meetup12/power_your_data.pdf)). + +Yang Xujun from Dataliance / UltraPower, which provides outsourced data analysis platform to telecom companies in China, have demonstrated why they decided to move away from reports prepared offline in Apache Hadoop / Spark and exported to MySQL towards ClickHouse. In short: Hadoop is too slow and cumbersome ([slides](https://presentations.clickhouse.tech/meetup12/telecom.pdf)). + +It might sound obvious, but the huge Chinese population generates insane amounts of data to store and process. So IT companies operating mostly on the local Chinese market are often handling amounts of information comparable to even the largest global companies. + +Kent Wang from Splunk Shanghai R&D center has demonstrated the current state of ClickHouse integration into Splunk ecosystem. Basically, they have plugged ClickHouse into their system via JDBC driver to allow data from ClickHouse to be easily accessed in Splunk UI and dashboards. Last spring Yandex ClickHouse team actually had a friendly visit to Splunk office in San Francisco to discuss potential points of interaction and exchange experience, so it was great to hear that there's some real progress in that direction ([slides](https://presentations.clickhouse.tech/meetup12/splunk.pdf)). + +The last talk was for the most tenacious ClickHouse users. Alexey Milovidov has announced some recently released features and improvements and shared what's coming next either in the short and long term [slides](https://presentations.clickhouse.tech/meetup12/news_and_plans/). + +Here is an over 5 hours long video recording of main meetup session: + +![iframe](https://www.youtube.com/embed/UXw8izZGPGk) + +If you are from China or at least can read Chinese, you might consider joining the **[Chinese ClickHouse User Group](http://www.clickhouse.com.cn/)**. + +{## Likely outdated in favor of YouTube + +There is an over 5 hours long video recording of main meetup session, but it'll take a bit of effort to get access to it (especially if you are not from China): http://m.zm518.cn/zhangmen/livenumber/share/entry/?liveId=1460023&sharerId=6fd3bac16125e71d69-899&circleId=b0b78915b2edbfe6c-78f7&followerId=×tamp=1517022274560 +You'll need to install WeChat (probably one of the most popular messengers in the world, everyone in China has it) on your smartphone: Android or iOS. https://play.google.com/store/apps/details?id=com.tencent.mm https://itunes.apple.com/ru/app/wechat/id414478124?mt=8 +On the first launch, WeChat will ask to confirm your phone number via SMS, read some digits via a microphone and accept the user agreement. Go through this. +On your computer, click the red button in the middle of the video behind the link above. It'll show a QR code. Now in WeChat in the top-right corner, there's the “+” button which opens a menu that has a “Scan QR code” item. Use it to scan QR code from your computer screen, then press the “Sign in” button on the smartphone. Now the video on the computer automatically becomes playable. +If you are from China or at least can read Chinese, you might consider joining the Chinese ClickHouse User Group. + +ClickHouse Community Meetup afterparty. +##} + +Pre-meetup meeting of speakers and most active ClickHouse users in China: +![Pre-meetup meeting](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/6.jpg) + +ClickHouse Community Meetup afterparty: +![ClickHouse Community Meetup afterparty](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-january-27-2018/7.jpg) diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018.md new file mode 100644 index 00000000000..a794a5f7a7e --- /dev/null +++ b/website/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018.md @@ -0,0 +1,54 @@ +--- +title: 'ClickHouse Community Meetup in Beijing on October 28, 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/main.jpg' +date: '2018-11-12' +tags: ['meetup', 'Beijing', 'China', 'Asia', 'events'] +--- + +Interest in ClickHouse among Chinese experts is growing rapidly. It was second ClickHouse Meetup in Beijing this year and the venue was more than full, it could fit only about 170 people out of 500 who signed up and around 2000 more joined the live translation online. Many Chinese companies have already adopted ClickHouse in production and are willing to share their experience. + +See the **[video recording of all talks](http://play.yunxi.tv/livestream/flash?id=05527cf6e260448b9d880b99d2cf4d40)** and **[all slides](https://github.com/yandex/clickhouse-presentations/tree/master/meetup19)**. + +Welcome word by William Kwok (郭炜), CTO of Analysys, who played a key role in organizing this event: +![William Kwok](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/1.jpg) + +Nikolay Kochetov from Yandex demonstrating recent advancements in string processing optimization using LowCardinality feature: +![Nikolay Kochetov from Yandex](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/2.jpg) + +Shang Shujie (尚书杰) from Kuaishou gives an overview of ClickHouse and it's usage scenarios: +![Shang Shujie](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/3.jpg) + +Winter Zhang (张健) from QingCloud explains their services based on ClickHouse: +![Winter Zhang](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/4.jpg) + +Audience listening to Zhang's talk: +![Audience listening to Zhang's talk](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/5.jpg) + +Li Junfei (李俊飞) from Tencent explains how ClickHouse fits their data processing infrastructure: +![Li Junfei](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/6.jpg) + +Questions&Answers session: +![Q&A](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/7.jpg) + +Jack Gao (高鹏) from Sina explains their ClickHouse use case and gives some advice based on their extensive experience with ClickHouse: +![Jack Gao](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/8.jpg) + +Chinese developers are also one of the most active worldwide in contributing to ClickHouse source code compared to other countries. Chinese ClickHouse Contributors Awards 2018 ceremony was also part of the meetup agenda with the following: + +1. 张建 (Winter Zhang, zhang2014) received First Place among independent ClickHouse developers in China for 2018, having developed 22 new features, improvements, and fixes in 57 pull requests. +2. Amos Bird received Second Place among independent ClickHouse developers in China for 2018, having developed 16 new features, improvements, and fixes in 42 pull requests. +3. 李本旺 (sundy-li) received Third Place among independent ClickHouse developers in China for 2018, having developed 6 new features, improvements, and fixes in 11 pull requests. + +A special award went to William Kwok (郭炜) for his active role in developing the Chinese ClickHouse Community. + +Sundy Li (李本旺) receives ClickHouse Contributor Award from Alexey Milovidov: +![Sundy Li](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/9.jpg) + +William Kwok (郭炜) receives special award for organizing Chinese ClickHouse community and meetups: +![William Kwok](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/10.jpg) + +Pre-meetup at the Analysys office: +![Pre-meetup](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/11.jpg) + + + diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018.md new file mode 100644 index 00000000000..1db4b33c8dc --- /dev/null +++ b/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018.md @@ -0,0 +1,39 @@ +--- +title: 'ClickHouse Community Meetup in Berlin on July 3, 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018/main.jpg' +date: '2018-07-05' +tags: ['meetup', 'Berlin', 'Germany', 'events'] +--- + +Just a few months ago Brenno Oliveira from Delivery Hero has dropped us an email saying that they want to host a meetup of ClickHouse community in their HQ and together we made it happen. Actually, renting a suitable room is one of the main limiting factors on how often ClickHouse meetups can happen worldwide and it was very kind of Delivery Hero to provide it for free. Bringing interesting speakers was the easy part as there are more and more companies adopting ClickHouse and willing to share their stories. Being an open-source product has its advantages after all. About 50 people have shown up from 75 sign-ups, which is way above the typical rate. + +To get started Alexander Zaitsev from Altinity gave an overview of ClickHouse for those who are not that familiar with the technology yet. He was using use cases from his personal experience and their clients as examples. Here are [the slides](https://presentations.clickhouse.tech/meetup16/introduction.pdf), unfortunately, no video this time. + +Gleb Kanterov talking about the usage of ClickHouse for experimentation metrics at Spotify: +![Gleb Kanterov Spotify](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018/1.jpg) + +![Gleb Kanterov Spotify](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-berlin-on-july-3-2018/2.jpg) + +Spotify relies heavily on what Google Cloud Platform provides, but nevertheless found a spot in their infrastructure where only ClickHouse appeared to satisfy the requirements. Gleb Kanterov has demonstrated their approach to conducting experiments and measuring if they are worth being promoted to production solutions. Using ClickHouse has allowed them to build a framework scalable to thousands of metrics, which in the end makes them move even faster and break fewer things. Checking out [full slides](https://presentations.clickhouse.tech/meetup16/spotify.pdf) is highly recommended and here are a few quotes: + +- **Requirements** + - Serve 100-s of QPS with sub-second latency + - We know in advance what are queries and data + - Maintain 10x metrics with the same cost + - Thousands of metrics + - Billions of rows per day in each of 100-s of tables + - Ready to be used out of the box + - Leverage existing infrastructure as much as feasible + - Hide unnecessary complexity from internal users +- **Why ClickHouse?** + - Build proof of concept using various OLAP storages (ClickHouse, Druid, Pinot,...) + - ClickHouse has the most simple architecture + - Powerful SQL dialect close to Standard SQL + - A comprehensive set of built-in functions and aggregators + - Was ready to be used out of the box + - Superset integration is great + - Easy to query using clickhouse-jdbc and jooq + +The last talk by Alexey Milovidov was pretty technical and mostly intended for a deeper understanding of what's going on inside ClickHouse, see [the slides](https://presentations.clickhouse.tech/meetup16/internals.pdf). There were many experienced users in the audience who didn't mind staying late to hear that and ask very relevant questions. Actually, we had to leave the building way before people were out of topics to discuss. + +If your company regularly hosts technical meetups and you are looking for interesting topics to talk about, ClickHouse might be in pretty high demand. Feel free to write Yandex ClickHouse team via [this form](http://clickhouse.tech/#meet) if you are interested to host a similar event in your city and we'll find a way to cooperate and bring in other ClickHouse community members. diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3.md b/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3.md new file mode 100644 index 00000000000..7e0082cd570 --- /dev/null +++ b/website/blog/en/2018/clickhouse-community-meetup-in-berlin-on-july-3.md @@ -0,0 +1,8 @@ +--- +title: 'Announcing ClickHouse Community Meetup in Berlin on July 3' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-berlin-on-july-3/main.jpg' +date: '2018-06-25' +tags: ['meetup', 'Berlin', 'Germany', 'events', 'announcement'] +--- + +There's yet another upcoming meetup of ClickHouse community in Europe, see detailed agenda and sign up on [the event page](https://bitly.com/2Jv9Bug). diff --git a/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md new file mode 100644 index 00000000000..4f9874af655 --- /dev/null +++ b/website/blog/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md @@ -0,0 +1,20 @@ +--- +title: 'ClickHouse Community Meetup in Paris on October 2, 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/main.jpg' +date: '2018-10-09' +tags: ['meetup', 'Paris', 'France', 'events'] +--- + +Agenda of Paris ClickHouse Meetup was full of use cases, mostly from France-based companies which are actively using ClickHouse. Slides for all talks are [available on the GitHub](https://github.com/clickhouse/clickhouse-presentations/tree/master/meetup18). + +Christophe Kalenzaga and Vianney Foucault, engineers from ContentSquare, company that provided the meetup venue: +![Christophe Kalenzaga and Vianney Foucault](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/1.jpg) + +Matthieu Jacquet from Storetail (Criteo): +![Matthieu Jacquet](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/2.jpg) + +The audience: +![Audience](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/3.jpg) + +Networking after the meetup: +![Networking](https://blog-images.clickhouse.tech/en/2018/clickhouse-community-meetup-in-paris-on-october-2-2018/4.jpg) diff --git a/website/blog/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018.md b/website/blog/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018.md new file mode 100644 index 00000000000..080caf610e5 --- /dev/null +++ b/website/blog/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018.md @@ -0,0 +1,27 @@ +--- +title: 'ClickHouse Meetup in Amsterdam on November 15, 2018' +image: 'https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/main.jpg' +date: '2018-11-22' +tags: ['meetup', 'Amsterdam', 'Netherlands', 'events'] +--- + +20th ClickHouse Meetup took place in Amsterdam, which appeared to be a convenient location for people from all over Europe to join the event, including Austria, Czech Republic and Germany. We were also glad to see people from many local companies including Booking.com, Crobox, Marktplaats (eBay), MessageBird and others. + +Aleksandar Aleksandrov and Felix Mattrat, data engineers from MessageBird, show how they use ClickHouse to analyze process of delivery of SMS and other kinds of messages: +![Aleksandar Aleksandrov and Felix Mattrat](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/1.jpg) + +Nikolay Kochetov from Yandex ClickHouse team demonstrates recent features related to string processing optimization: +![Nikolay Kochetov from Yandex ClickHouse team](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/2.jpg) + +Konstantin Ignatov from Qrator Labs shares his experience in using ClickHouse as time-series database: +![Konstantin Ignatov from Qrator Labs](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/3.jpg) + +Aurimas Jacikevicius from Altinity demonstrates benchmark of ClickHouse against TimescaleDB and InfluxDB under time-series workload: +![Aurimas Jacikevicius from Altinity](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/4.jpg) + +Roy Brondgeest from Crobox showcases [ClickHouse Scala reactive client](https://github.com/crobox/clickhouse-scala-client) and it's bundled [DSL for query building](https://github.com/crobox/clickhouse-scala-client/wiki): +![Roy Brondgeest from Crobox](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/5.jpg) + +Alexey Milovidov from Yandex closes the meetup with talk about performance analysis of ClickHouse queries: +![Alexey Milovidov from Yandex](https://blog-images.clickhouse.tech/en/2018/clickhouse-meetup-in-amsterdam-on-november-15-2018/6.jpg) + diff --git a/website/blog/en/2018/concept-cloud-mergetree-tables.md b/website/blog/en/2018/concept-cloud-mergetree-tables.md new file mode 100644 index 00000000000..9d4818ba8d7 --- /dev/null +++ b/website/blog/en/2018/concept-cloud-mergetree-tables.md @@ -0,0 +1,120 @@ +--- +title: 'Concept: "Cloud" MergeTree Tables' +image: 'https://blog-images.clickhouse.tech/en/2018/concept-cloud-mergetree-tables/main.jpg' +date: '2018-11-23' +tags: ['concept', 'MergeTree', 'future', 'sharding'] +--- + +The main property of the MergeTree cloud tables is the absence of manual control over the sharding scheme of data on a cluster. The data in the cloud tables are distributed around the cluster on its own, while at the same time providing the locality property for a certain key. + +## Requirements + +1. Creating a cloud table makes it visible on all nodes of the cluster. No need to manually create a separate Distributed table and local tables on each node. +2. When ingesting data to a cloud table, while the table is very small, data is distributed across several cluster servers, but as data grows, more servers are involved (for example, starting from gigabytes per server). The user can create a small table and it should not be too cumbersome; but when creating a table, we do not know in advance how much data will be loaded into it. +3. The user specifies a sharding key (arbitrary tuple). Data for the sharding key range (in lexicographical order) is located on some servers. Very small ranges are located on several servers and to access it is enough to read data from a single server, while sufficiently large ranges are spread across all servers. For example, if we are talking about web analytics the sharding key might start with CounterID, the website identifier. Data on a large site like https://yandex.ru should be spread across all servers in the cluster, while data on a small site should be located on only a few servers. Physical explanation: the cluster should scale to simultaneously provide throughput for heavy queries and to handle high QPS of light queries, and for light queries, the latency should not suffer. In general, this is called data locality. +4. The ability for heavy queries to use all the servers in the cluster, rather than 1 / N, where N is the replication coefficient. Thus, one server can contain multiple replicas of different shards. +5. When replacing the server with an empty one (node recovery), the data restore must be parallelized in some way. At least the reads should be spread over different servers to avoid overloading individual servers. +6. On each local server, reading the range of the primary key should be touching not a very large number of file ranges or not too small file ranges (minimizing disk seeks). +7. (Optional) The ability to use individual disks instead of RAID, but at the same time preserving throughput when reading medium-sized primary key ranges and preserving QPS when reading small-sized ranges. +8. The ability to create multiple tables with a common sharding scheme (co-sharding). +9. Rebalancing data when adding new servers; creation of additional replicas with long unavailability of old servers. +10. SELECT queries should not require synchronous requests to the coordinator. No duplicates or missing data visible by SELECT queries during data rebalancing operations. +11. SELECT queries must choose large enough subset of servers considering conditions on sharding key and knowledge of the current sharding scheme. +12. The ability to efficiently distribute data across servers with uneven available disk space. +13. Atomicity of INSERT on a cluster. + +Out of scope and will not be considered: + +1. Erasure data encoding for replication and recovery. +2. Data storage on systems with different disks - HDD and SSD. An example is storing fresh data on an SSD. + +## General Considerations + +A similar problem usually (in Map-Reduce or blob-storage systems) is solved by organizing data in chunks. Chunks are located on the nodes of the cluster. Mappings: table or file -> chunks, chunk -> nodes, are stored in the master, which itself can be replicated. The master observes the liveliness of nodes and maintains a reasonable replication level of all chunks. + +Difficulties arise when there are too many chunks: in this case, the master does not cope with the storage of metadata and with the load. It becomes necessary to make complicated metadata sharding. + +In our case, it may seem tempting to solve a problem in a similar way, where instead of a chunk, an instance of a MergeTree type table containing the data range is used. Chunks in other systems are called “tablets” or “regions”. But there are many problems with this. The number of chunks on one server cannot be large, because then the property is violated - minimizing the number of seeks when reading data ranges. The problem also arises from the fact that each MergeTree table itself is rather cumbersome and consists of a large number of files. On the other hand, tables with a size of one terabyte are more or less normal if the data locality property is maintained. That is if several such tables on one server begin to be used only for not too small data ranges. + +A variety of options can be used for sharding data, including: +Sharding according to some formula with a small number of parameters. Examples are simple hashing, consistent hashing (hash ring, rendezvous hashing, jump consistent hashing, sumbur). The practice of using in other systems shows that in its pure form this approach does not work well, because the sharding scheme is poorly controlled. Fits fine, for example, for caches. It can also be used as part of another algorithm. + +The opposite option is that the data is divided into shards using an explicitly specified table. The table may contain key ranges (or, in another case, hash ranges from keys) and their corresponding servers. This gives a much greater degree of freedom in choosing when and how to transfer data. But at the same time, to scale the cluster, the size of the table has to be dynamically expanded, breaking the existing ranges. + +One of the combined options is that the mapping is made up of two parts: first, the set of various keys is divided into some pre-fixed not too few and not too many “virtual shards” (you can also call “logical shards”, “mini-shards”). This number is several times larger than the hypothetical cluster size in the number of servers. Further, the second mapping explicitly specifies the location of each mini-shard on the servers, and this second mapping can be controlled arbitrarily. + +The complexity of this approach is that partitioning hash ranges gives uniformity, but does not give locality of data for range queries; whereas when splitting by key ranges, it is difficult to choose a uniform distribution in advance since we do not know what the distribution of data will be to the keys. That is, the approach with the choice of a pre-fixed split into mini-shards does not work if data locality is required. + +It turns out that the only acceptable approach in our case is partitioning by key ranges, which can change dynamically (repartitioned). At the same time, for more convenience, manageability, and uniformity of data distribution, the number of partitioning elements can be slightly larger than the number of servers, and the mapping from the partitioning element into servers can be changed separately. + +## Possible Implementation + +Each ClickHouse server can participate in a certain cloud. The cloud is identified by a text string. The membership of a node in the cloud can be ensured by creating a certain type of database on the node (IDatabase). Thus, one node can be registered in several clouds. Registry of the nodes registered in the cloud is maintained in the coordinator. + +Cloud nodes are selected to accommodate the replicas of the shards of cloud tables. The node also sends some additional information to the coordinator for its selection when placing data: the path that determines the locality in the network (for example, data center and rack), the amount of disk space, etc. + +The cloud table is created in the corresponding database registered in the cloud. The table is created on any server and is visible in all databases registered in the cloud. + +Sharding key is set for cloud table on it“s creation, an arbitrary tuple. Sometimes it is practical that the sharding key matches the primary key (example - (CounterID, Date, UserID)), sometimes it makes sense that it is different (for example, the DateTime primary key, sharding key - UserID). + +Sharding is a composition of several mappings: + +1. The set of all possible tuples, the values ​​of the sharding key, is mapped onto many half-intervals that break the half-interval [0, 1). Initially, this number is the size of the partition, it is equal to 1. That is, all values ​​are mapped into a single semi-interval, the whole set [0, 1). Then, as the amount of data in the table increases, the semi-intervals, the split elements, can be divided approximately in half by the median of the distribution of values ​​in lexicographical order. +2. For each half-interval splitting, several cloud servers are selected and remembered in some way, on which replicas of the corresponding data will be located. The choice is made based on the location of servers on the network (for example, at least two replicas in different data centers and all replicas in different racks), the number of replicas already created on this server (choose servers with the minimum) and the amount of free space (from various servers just select the server with the maximum amount of free space). + +As a result, this composition forms a mapping from the sharding key into several replica servers. + +It is assumed that in the course of work both parts of this mapping may change. + +The result of mapping 1 can be called the “virtual shard” or “logical shard”. In the process of work, virtual shards can be divided in half. Going in the opposite direction is impossible - the number of virtual shards can only grow. It is assumed that even for tables occupying the entire cluster, the number of virtual shards will be several times larger than the number of servers (for example, it may be greater by 10 times the replication ratio). Data ranges occupying at least a tenth of all data should be spread across all servers to ensure throughput of heavy queries. The mapping as a whole is specified by the set of boundary values ​​for the sharding key. This set is small (roughly kilobytes) and stored in the coordinator. + +The mapping of virtual shards on real servers can change arbitrarily: the number of replicas can increase when servers are not available for a long time or increase and then decrease to move replicas between servers. +## How to Satisfy All Requirements + +List items below correspond to the requirement numbers above: + +1. IDatabase synchronously goes to the coordinator to get or change the list of tables. The list of cloud tables is stored in the coordinator in the node corresponding to the cloud. That is, all the tables in the cloud are visible on each server entering the cloud. +2. It is ensured by the fact that initially the partition consists of a single element, but begins to break up further with increasing data volume. Each replica responsible for the local storage of this data can initiate the splitting, once the criterion for the data volume has been reached. Multiple replicas may decide to do this competitively, and the decision is made using atomic CAS. To have fewer problems, it is possible to randomize somewhat the moment of deciding repartition. The criterion when it is necessary to additionally break virtual shards turns out to be non-trivial. For example, you can break up to the number of servers * the replication rate quite soon, by growing a shard to several gigabytes. But it is already worth breaking shards even when shards are 1 / N in size from the server size (for example, around a terabyte). In coordinator, you should store the last and previous splits immediately and do not do the splitting too often. +3. It is ensured by the fact that the number of virtual shards will be several times (user-defined) more than the number of servers. Note: for additional data spreading, you can impose some spreading transformation on the sharding key. Not thought out. For example, instead of a key (CounterID, Date, UserID) use for sharding (hash (UserID)% 10, CounterID, Date, UserID). But in this case, even small CounterIDs will fall into 10 ranges. +4. Similarly. +5. If several virtual shards are located on a single server, their replicas will be spread over a larger number of servers, and during recovery, there will be more fanout. +6. Small requests will use one shard. While large requests will use several shards on the same server. But since each shard will be somewhat smaller, the data in the MergeTree table will probably be presented by a smaller set of parts. For example, we now have a maximum part size of 150 GiB, and for large tables, many such large chunks are formed in one partition. And if there are several tables, there will be a smaller number of large chunks in each. On the other hand, when inserting data, a larger number of small pieces will be generated on each server. And these small parts will cause an increase in the number of seeks. But not much, as the fresh data will be in the page cache. That is why too many virtual shards per server might not work well. +7. Pretty hard. You can have groups of neighboring shards on different disks of the same server. But then reading of medium size ranges will not be parallelized (since the whole range will be on one disk). In RAID, the problem is solved by the fact that the size of the chunk is relatively small (typically 1 megabyte). It would be possible to come up with a separate distribution of data in different pieces on different disks. But it is too difficult to design and implement carefully. Probably it“s better not to do the whole thing, and as a minimum, make it so that when on the JBOD server, one server disk is selected for the location of one shard. +8. It is possible to identify the sharding scheme with a string, which may be common to different tables. The criterion for splitting shards is determined based on the total amount of data for all tables with the same sharding scheme. +9. It is solved completely by changing the mapping of virtual shards on the servers. This mapping can be controlled independently of everything else. +10. Servers can cache the sharding map (both parts of it) for a while and update it usually asynchronously. When rebalancing data due to the splitting of virtual shards, you should keep the old data for a longer time. Similarly, when transferring replicas between servers. Upon request, the initiator server also asks if the remote server has the necessary data: data for the required shard according to the sharding scheme that is cached by the initiator server. For the query, one live replica of each shard is selected, on which there is data. If suddenly there were none, then it is worthwhile to update the sharding map synchronously, as for some reason all the replicas were transferred somewhere. +11. It is trivial. +12. It is solved on the basis that more than one shard accounts for one server and the fact that the distribution of shards replicas among servers is more or less arbitrary and can take into account the amount of disk space. +## Issues + +To ingest data into a table, you can send an INSERT query to any server. The data will be divided into ranges and recorded on the desired servers. At the same time, it is synchronously ensured that we use a fresh sharding map - it is requested before the data is inserted and it is checked that it is not out of date, simultaneously with the commit in ZK. + +When a SELECT query is used, if the old sharding map was used, the latest data will not be visible. Therefore, the asynchronous update interval of the sharding map for SELECT should be made customizable, and an option should be added to synchronously use the latest sharding map. + +For fairly large tables, it turns out that an INSERT request breaks the data into many small pieces and writes to all servers (example: with 500 servers, you need to commit 5000 replicas of shards). This should work since the probability of inaccessibility or inhibition of all replicas of one shard is still low. But it will work slowly and, possibly, unstable. With a lot of INSERTs, there will be a terrible load on the coordinator. Although it can withstand one INSERT per second normally. To achieve high throughput of INSERTs, it is sufficient to simply make them parallel, but with the same low frequency of INSERTs in general. However, this is still a big problem. + +There are the following possible solutions: + +1. You can add something to the beginning of the sharding key. For example, Date % 10 or toMinute. Then INSERTs will touch fewer shards (in the typical case when recent data is inserted), but at the same time during some time intervals, some shards will be hotter than others. Normally, if it reduces the number of active shards, for example, from 5000 on INSERT to 500. It is also very inconvenient for users. +2. You can come up with some kind of incomprehensible sharding scheme, where the fresh data first falls into some fresh shard where it is not clear where from where it is then lazily overwritten. A fresh shard is essentially a distributed queue. At the same time, a fresh shard with SELECT is always requested. Not so good. And still, it contradicts the atomicity of these transfers of data, visible at SELECT. Alternatively, you could relax the requirements if you allow SELECT not to see some of the fresh data. +It looks like it“s generally not working well at a cluster size of over 500 servers. +Another problem is that to properly spread the ranges of the primary key, the number of virtual shards must be no less than the number of servers squared. And this is too much. +How to Get Around These Issues +For sharding, you can add some more intermediate mappings. There are the following options: +1. Splitting each shard into a set of shards in an arbitrary way. For example, 10 pieces. This is equivalent to adding a random number 0.N-1 to the beginning of the sharding key, which means nothing. Then with INSERT, you can only insert into one randomly selected shard, or a minimum sized shard, or some kind of round-robin; and as a result, INSERT becomes easier. But this increases the fanout of all point SELECTs. For convenience, such a partition can be done dynamically - only large enough shards can be divided in such a way (this will help avoid excessive splitting of old shards in the case when the sharding key starts with Date and the data is inserted in the Date order) or do such a partition starting from the situation when the number of shards is large enough (restriction on top of fanout INSERT requests). +An additional advantage: in the case of servers with JBOD, it is possible to prefer to place such second-level shards on the disks of one server, which half emulates RAID-0. +But there is a serious drawback: there is no possibility to do local IN / JOIN. For example, this possibility is assumed if the sharding key is hash (UserID), and we do JOIN by UserID. It would be possible to avoid this drawback by always placing all the “symmetric” shards on one server. +2. A mapping that spreads the data while keeping the number of virtual shards. The essence of this mapping is as follows: + - The spreading factor is set, for example, `N = 10.` As the very first mapping, 10 times more ranges are generated. For example, if we want to end up with 7 shards, then we divide the data into 70 ranges. + - Then these ranges are renumbered in a circle with numbers from 0.6 and the ranges with the same number will fall into one shard, as a result, there will be 7 shards again. + - The continuous analogue of this mapping: `x in [0, 1) -> fractional_part (x * N)`, multiplication by N on a circle. + +If you draw it on the picture in Cartesian coordinates, you get a “saw” with 10 teeth. + +After this, it becomes obvious that this mapping simultaneously spreads the data and preserves its locality. + +See also: [Arnold's cat map](https://en.wikipedia.org/wiki/Arnold%27s_cat_map). + +But what is described here does not exactly work. First, until a sufficient amount of data has been accumulated, it is impossible to create a uniform division into parts (there is no place to count quantiles). Secondly, according to such a simple scheme, it is impossible to divide the intervals. + +There is an option in which, instead of dividing a range in half, it uses splitting into 4 parts, which are then mapped into two shards. It is also not clear how this will work. diff --git a/website/blog/en/2019/clickhouse-at-percona-live-2019.md b/website/blog/en/2019/clickhouse-at-percona-live-2019.md new file mode 100644 index 00000000000..15a9973c0d0 --- /dev/null +++ b/website/blog/en/2019/clickhouse-at-percona-live-2019.md @@ -0,0 +1,38 @@ +--- +title: 'ClickHouse at Percona Live 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/main.jpg' +date: '2019-06-04' +tags: ['Percona Live','USA','Texas','Austin', 'events', 'conference'] +--- + +This year American episode of [Percona Live](https://www.percona.com/live/19/) took place in nice waterfront location in Austin, TX, which welcomed open source database experts with pretty hot weather. ClickHouse community is undeniably growing and it became a common database product to give a talk about or at least compare or refer to, while just [two short years ago](../2017/clickhouse-at-percona-live-2017.md) it was more like “wth is ClickHouse?”. + +Alexey Rubin from VirtualHealth compared two column-oriented databases: ClickHouse and MariaDB Column Store. Bottom line was no surprise, ClickHouse is noticeably faster and MariaDB is more familiar for MySQL users, details were useful though. +![Alexey Rubin from VirtualHealth](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/1.jpg) + +Alexey Milovidov from Yandex have demonstrated how exactly ClickHouse became even faster in recent releases. +![Alexey Milovidov from Yandex](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/2.jpg) + +Alexander Zaitsev and Robert Hodges from Altinity have given an entry level tutorial to ClickHouse, which included loading in demo dataset and going through realistic queries against it with some extra variation demonstrating possible query optimization techniques. [Slides](https://www.percona.com/live/19/sites/default/files/slides/Making%20HTAP%20Real%20with%20TiFlash%20--%20A%20TiDB%20Native%20Columnar%20Extension%20-%20FileId%20-%20174070.pdf). Also Altinity was sponsoring the ClickHouse booth in Expo Hall which became an easy spot for people interested in ClickHouse to chat outside of talks. +![Alexander Zaitsev and Robert Hodges from Altinity](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/3.jpg) + +Ruoxi Sun from PingCAP introduced TiFlash, column-oriented add-on to TiDB for analytics based on ClickHouse source code. Basically it provides [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree/)-like table engine that is hooked up to TiDB replication and has in-memory row-friendly cache for recent updates. Unfortunately, PingCAP has no plans to bring TiFlash to opensource at the moment. [Slides](https://www.percona.com/live/19/sites/default/files/slides/Making%20HTAP%20Real%20with%20TiFlash%20--%20A%20TiDB%20Native%20Columnar%20Extension%20-%20FileId%20-%20174070.pdf). +![Ruoxi Sun from PingCAP](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/4.jpg) + +ClickHouse has also been covered in talk by Jervin Real and Francisco Bordenave from Percona with overview of moving and replicating data around MySQL-compatible storage solutions. [Slides](https://www.percona.com/live/19/sites/default/files/slides/Replicating%20MySQL%20Data%20to%20TiDB%20For%20Real-Time%20Analytics%20-%20FileId%20-%20187672.pdf). +![Jervin Real](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/5.jpg) + +ClickHouse represented columnar storage systems in venture beyond relational by Marcos Albe from Percona. +![Marcos Albe from Percona](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/6.jpg) + +Jervin Real from Percona have demonstrated real case study of applying ClickHouse in practice. It heavily involved manual partitions manipulation, hopefully audience have understood that it is an option, but not exactly a best practice for most use cases. [Slides](https://www.percona.com/live/19/sites/default/files/slides/Low%20Cost%20Transactional%20and%20Analytics%20With%20MySQL%20and%20Clickhouse,%20Have%20Your%20Cake%20and%20Eat%20It%20Too!%20-%20FileId%20-%20187674.pdf). +![Jervin Real from Percona](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/7.jpg) + +Evgeny Potapov from ITSumma went through modern options for time-series storage and once more confirmed ClickHouse is leading the way in this field as well. +![Evgeny Potapov from ITSumma](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/8.jpg) + +Event location in the center of US provided equal opportunities for peoplefrom East and West Coast to show up, but presence of people from other countries was also quite noticeable. The content they all brought in was top notch as usual. +![The venue](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/9.jpg) + +Austin after the Event. +![Austin](https://blog-images.clickhouse.tech/en/2019/clickhouse-at-percona-live-2019/10.jpg) diff --git a/website/blog/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md b/website/blog/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md new file mode 100644 index 00000000000..ab55f746bdd --- /dev/null +++ b/website/blog/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md @@ -0,0 +1,17 @@ +--- +title: 'ClickHouse Lecture at Institute of Computing Technology, Chinese Academy of Science on June 11, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019/main.jpg' +tags: ['lecture', 'events', 'China', 'Beijing', 'university', 'academy', 'institute'] +date: '2019-06-14' +--- + +Alexey Milovidov, head of ClickHouse development group at Yandex, have given an open two-part lecture at [Institute of Computing Technology, Chinese Academy of Science](http://english.ict.cas.cn/): + +- ClickHouse history and evolution of Yandex.Metrica storage system +- Internal implementation of ClickHouse and reasoning behind design decisions + +The event has been organised by [Amos Bird](https://github.com/amosbird), who is one of the most active ClickHouse community members and contributors, at the same time being a last year PhD student at this institution. + +Alexey with the event announcement: +![Alexey with the event announcement](https://blog-images.clickhouse.tech/en/2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019/1.jpg) + diff --git a/website/blog/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019.md b/website/blog/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019.md new file mode 100644 index 00000000000..ace5967736c --- /dev/null +++ b/website/blog/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019.md @@ -0,0 +1,35 @@ +--- +title: 'ClickHouse Meetup in Beijing on June 8, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/main.jpg' +date: '2019-06-13' +tags: ['meetup','Beijing','China','events'] +--- + +24th ClickHouse Meetup globally and 3rd one in China took place in Beijing on Dragon Boat Festival weekend, which appeared to have a rich history and be a popular opportunity for Chinese people to travel around the country. Nevertheless the ClickHouse Meetup venue was more than full as usual, this time kindly provided by Gaea Mobile, with hundreds more people watching live broadcast online. Yandex ClickHouse team have extensively used this trip as an opportunity to strengthen the bond with ClickHouse Community in China by also giving an [open lecture in Institute of Computing Technology, Chinese Academy of Science](clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md) and by having a private conversations with the most active local corporate users including ByteDance and JD.com. + +Welcome word by William Kwok, CTO of Analysys, who played the key role in making this event in particular possible and also in establishment of ClickHouse Community in China: +![William Kwok, CTO of Analysys](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/1.jpg) + +He's also administering ClickHouse WeChat groups, feel free to ask him for invite (@guodaxia2999 at WeChat): +![@guodaxia2999 at WeChat](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/2.jpg) + +Alexey Milovidov from ClickHouse core developers team at Yandex got the content part of main event part started with overview of new features and roadmap overview: +![Alexey Milovidov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/3.jpg) + +Amos Bird, one of the most active ClickHouse contributors either in China and worldwide, shares his experience of using ClickHouse for graph processing ([slides](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup24/2.%20SQLGraph%20--%20When%20ClickHouse%20marries%20graph%20processing%20Amoisbird.pdf)): +![Amos Bird](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/4.jpg) + +Yan Gangqiang from Golden Data shares details of their approach to data storage for surveys system based on ClickHouse ([slides](https://presentations.clickhouse.tech/meetup24/3.%20金数据数据架构调整方案Public.pdf)): +![Yan Gangqiang](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/5.jpg) + +ClickHouse for beginners talk by Percent ([slides](https://presentations.clickhouse.tech/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf)): +![Percent](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/6.jpg) + +ClickHouse core developer Nikolay Kochetov demonstrates upcoming query execution pipeline changes ([slides](https://presentations.clickhouse.tech/meetup24/5.%20Clickhouse%20query%20execution%20pipeline%20changes/)): +![Nikolay Kochetov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/7.jpg) + +Pre-meetup meeting with active ClickHouse community members in China: +![Pre-meetup meeting](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/8.jpg) + +ClickHouse branded Beijing duck :) +![ClickHouse branded Beijing duck](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-beijing-on-june-8-2019/9.jpg) diff --git a/website/blog/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019.md b/website/blog/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019.md new file mode 100644 index 00000000000..71176ab3d47 --- /dev/null +++ b/website/blog/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019.md @@ -0,0 +1,41 @@ +--- +title: 'ClickHouse Meetup in Limassol on May 7, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/main.jpg' +date: '2019-05-14' +tags: ['meetup', 'Cyprus', 'Limassol', 'events'] +--- + +The first open-air ClickHouse Meetup took place in the heart of Limassol, the second-largest city of Cyprus, on the roof kindly provided by Exness Group. The views were stunning, but speakers did a great job competing with them for audience attention. Over one hundred people have joined in, which once again confirms high interest in ClickHouse around the globe. Meetup content is also available as [video recording](https://www.youtube.com/watch?v=_rpU-TvSfZ8). + +![Intro](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/1.jpg) + +[Kirill Shvakov](https://github.com/kshvakov) has played the key role in making this event possible by reaching out to the ClickHouse Community at Cyprus, finding the great venue, and other speakers. Most of the worldwide ClickHouse Meetups happen thanks to active community members like Kirill, if you want to help us organize ClickHouse Meetup in your area, please reach the Yandex ClickHouse team via [this form](https://clickhouse.tech/#meet) or any other convenient way. + +![Kirill Shvakov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/2.jpg) + +Kirill is well known for his top-notch [ClickHouse Go Driver](https://github.com/clickhouse/clickhouse-go) running over native protocol, but his opening talk was about his experience optimizing ClickHouse queries and solving real-world tasks at Integros and Wisebits. [Slides](https://presentations.clickhouse.tech/meetup22/strategies.pdf). [Full query listings](https://github.com/kshvakov/ClickHouse-Meetup-Exness). + +The event has begun in the early evening... +![Evening in Limassol](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/3.jpg) + +...but it took just around one hour for nature to turn the night mode on. It made the projected slides easier to read. +![Night in Limassol](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/4.jpg) + +Sergey Tomilov with his colleagues from the Exness Platform Team has shared details on the evolution of their systems for analyzing logs and metrics and how they ended up relying on ClickHouse for long-term storage ([slides](https://presentations.clickhouse.tech/meetup22/exness.pdf)): +![Sergey Tomilov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/5.jpg) + +Alexey Milovidov from the Yandex ClickHouse team has demonstrated features from recent ClickHouse releases and gave an update on what's coming soon ([slides](https://presentations.clickhouse.tech/meetup22/new_features/)): +![Alexey Milovidov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/6.jpg) + +Alexander Zaitsev, CTO of Altinity, have shown an overview of how to integrate ClickHouse into environments running on Kubernetes ([slides](https://presentations.clickhouse.tech/meetup22/kubernetes.pdf)): +![Alexander Zaitsev, CTO of Altinity](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/7.jpg) + +Vladimir Goncharov, a backend engineer from Aloha Browser, has closed the ClickHouse Limassol Meetup by demonstrating few projects that allow integrating other opensource tools for logs processing with ClickHouse ([slides](https://presentations.clickhouse.tech/meetup22/aloha.pdf)): +![Vladimir Goncharov](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/8.jpg) + +Unfortunately, midnight was closing in and only the most weather-proof ClickHouse fans have managed to stay the whole event as it started getting pretty chilly. + +![Final](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-limassol-on-may-7-2019/9.jpg) + +More photos from the event are available at [short event afterword by Exness](https://www.facebook.com/events/386638262181785/permalink/402167077295570/). + diff --git a/website/blog/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019.md b/website/blog/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019.md new file mode 100644 index 00000000000..6141b4c2fd5 --- /dev/null +++ b/website/blog/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019.md @@ -0,0 +1,28 @@ +--- +title: 'ClickHouse Meetup in Madrid on April 2, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/main.jpg' +date: '2019-04-11' +tags: ['meetup', 'Spain', 'Madrid', 'events'] +--- + +Madrid ClickHouse Meetup has probably been the largest one in the EU so far with well over one hundred attendees. As usual, we've seen not only people working and living in the same city, Madrid, but also many people who have traveled a long way to join the event and talk about ClickHouse use cases and learn about new and upcoming features. + +Opening word by [Javi Santana](https://twitter.com/javisantana), who personally made this event possible by gathering up all the people and setting up the venue provided by Google Campus for Startups: +![Javi Santana](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/1.jpg) + +Alexander Zaitsev, CTO of Altinity, has introduced ClickHouse to those who're just starting to use it or only considering for future ([slides](https://www.slideshare.net/Altinity/clickhouse-introduction-by-alexander-zaitsev-altinity-cto)): +![Alexander Zaitsev](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/2.jpg) + +Robert Hodges, CEO of Altinity, has probably traveled the longest distance to join the event since he's based in California and he has also [published his thoughts on this event in the Altinity blog](https://www.altinity.com/blog/2019/4/9/madrid-clickhouse-meetup-summary). + +Alexey Milovidov from Yandex has shown the recent advancements in ClickHouse features and briefly walked the audience through the current roadmap ([slides](https://presentations.clickhouse.tech/meetup21/new_features/)): +![Alexey Milovidov from Yandex](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/3.jpg) + +Iago Enriquez from Idealista was talking about their migration from “legacy” commercial DBMS to ClickHouse. It was the first time we've heard that someone talking about using two flagship opensource products by Yandex together in production. They are using [CatBoost](https://catboost.ai/) model inference right from ClickHouse SQL queries to fill in the incompleteness of their source data. Unfortunately, slides of Iago's talk were not allowed to be published. +![Iago Enriquez from Idealista](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/4.jpg) + +David Pardo Villaverde from Corunet gave a talk about how they've used ClickHouse to prepare data for dense model generation for one of their clients. It took a pretty short time on a single server. Fun quote from conclusions: “If I wasn't already married, I'd marry it! \[ClickHouse\]” ([slides](https://presentations.clickhouse.tech/meetup21/predictive_models.pdf)): +![David Pardo Villaverde from Corunet](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/5.jpg) + +Closing talk of the meetup was by Murat Kabilov fro Adjust Gmbh, he was demonstrating his opensource project [pg2ch](https://github.com/mkabilov/pg2ch) that allows to sync data from PostgreSQL to ClickHouse in real-time ([slides](https://presentations.clickhouse.tech/meetup21/postgres_to_clickhouse.pdf)). +![Murat Kabilov fro Adjust Gmbh](https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-madrid-on-april-2-2019/6.jpg) diff --git a/website/blog/en/2019/clickhouse-meetup-in-san-francisco-on-june-4-2019.md b/website/blog/en/2019/clickhouse-meetup-in-san-francisco-on-june-4-2019.md new file mode 100644 index 00000000000..94ad125b71b --- /dev/null +++ b/website/blog/en/2019/clickhouse-meetup-in-san-francisco-on-june-4-2019.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse Meetup in San Francisco on June 4, 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/clickhouse-meetup-in-san-francisco-on-june-4-2019/main.jpg' +date: '2019-06-12' +tags: ['meetup','USA','San Francisco','events', 'California', 'Bay Area'] +--- + +23th ClickHouse Meetup in San Francisco was held in CloudFlare office and co-organized by Altinity. There were about 35 attendees, most of them are experienced ClickHouse users from SF and Bay Area. The meetup started with an introduction by Robert Hodges, Altinity CEO and continued with a lightning talk by Alan Braithwaite from Segment.com about their experience with ClickHouse. Next talk from Alexander Zaitsev about ClickHouse operator for Kubernetes gained much attention from the audience because Kubernetes is in fact very popular even for databases. At the end there was a presentation from the ClickHouse developer Alexey Milovidov about new and upcoming features with a roadmap. There was a discussion about the details of implementation and design of the most appreciated features. We were happy to meet with ClickHouse contributors at the meetup. Slides from the event are [available on GitHub](https://github.com/clickhouse/clickhouse-presentations/tree/master/meetup23). + +As we see increasing demand for ClickHouse events in SF and Bay Area, we have already started planning the next event. diff --git a/website/blog/en/2019/how-to-speed-up-lz4-decompression-in-clickhouse.md b/website/blog/en/2019/how-to-speed-up-lz4-decompression-in-clickhouse.md new file mode 100644 index 00000000000..b5799702d04 --- /dev/null +++ b/website/blog/en/2019/how-to-speed-up-lz4-decompression-in-clickhouse.md @@ -0,0 +1,12 @@ +--- +title: 'How to speed up LZ4 decompression in ClickHouse?' +image: 'https://blog-images.clickhouse.tech/en/2019/how-to-speed-up-lz4-decompression-in-clickhouse/main.jpg' +date: '2019-06-25' +tags: ['performance', 'lz4', 'article', 'decompression'] +--- + +When you run queries in [ClickHouse](https://clickhouse.tech/), you might notice that the profiler often shows the `LZ_decompress_fast` function near the top. What is going on? This question had us wondering how to choose the best compression algorithm. + +ClickHouse stores data in compressed form. When running queries, ClickHouse tries to do as little as possible, in order to conserve CPU resources. In many cases, all the potentially time-consuming computations are already well optimized, plus the user wrote a well thought-out query. Then all that's left to do is to perform decompression. + +[Read further](https://habr.com/en/company/yandex/blog/457612/) diff --git a/website/blog/en/2019/schedule-of-clickhouse-meetups-in-china-for-2019.md b/website/blog/en/2019/schedule-of-clickhouse-meetups-in-china-for-2019.md new file mode 100644 index 00000000000..726d714b765 --- /dev/null +++ b/website/blog/en/2019/schedule-of-clickhouse-meetups-in-china-for-2019.md @@ -0,0 +1,14 @@ +--- +title: 'Schedule of ClickHouse Meetups in China for 2019' +image: 'https://blog-images.clickhouse.tech/en/2019/schedule-of-clickhouse-meetups-in-china-for-2019/main.jpg' +date: '2019-04-18' +tags: ['China', 'Beijing', 'Shanghai', 'Shenzhen', 'announcement', 'meetup'] +--- + +Last year there were two ClickHouse Meetups in Beijing, in [January](../2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md) and [October](../2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md), and they appeared to be in extremely high demand, with fully packed venue and thousands of people watching online. So this year we decided to try to expand meetups to other large cities in China where we see the most interest in ClickHouse based on website visits. Here's the current schedule and sign up pages: + +- [ClickHouse Community Meetup in Beijing](https://www.huodongxing.com/event/2483759276200) on June 8. +- [ClickHouse Community Meetup in Shenzhen](https://www.huodongxing.com/event/3483759917300) on October 20. +- [ClickHouse Community Meetup in Shanghai](https://www.huodongxing.com/event/4483760336000) on October 27. + +到时候那里见! diff --git a/website/blog/en/2020/five-methods-for-database-obfuscation.md b/website/blog/en/2020/five-methods-for-database-obfuscation.md new file mode 100644 index 00000000000..9a6615b0079 --- /dev/null +++ b/website/blog/en/2020/five-methods-for-database-obfuscation.md @@ -0,0 +1,10 @@ +--- +title: 'Five Methods For Database Obfuscation' +image: 'https://blog-images.clickhouse.tech/en/2020/five-methods-for-database-obfuscation/main.jpg' +date: '2020-01-27' +tags: ['article', 'obfuscation'] +--- + +ClickHouse users already know that its biggest advantage is its high-speed processing of analytical queries. But claims like this need to be confirmed with reliable performance testing. + +[Read further](https://habr.com/en/company/yandex/blog/485096/) diff --git a/website/blog/en/index.md b/website/blog/en/index.md new file mode 100644 index 00000000000..227a69408dc --- /dev/null +++ b/website/blog/en/index.md @@ -0,0 +1,3 @@ +--- +is_index: true +--- diff --git a/website/blog/en/redirects.txt b/website/blog/en/redirects.txt new file mode 100644 index 00000000000..80a57d38ebc --- /dev/null +++ b/website/blog/en/redirects.txt @@ -0,0 +1,32 @@ +clickhouse-meetup-in-berlin-october-5-2017.md 2017/clickhouse-meetup-in-berlin-october-5-2017.md +clickhouse-meetup-at-berlin-october-5-2017.md 2017/clickhouse-meetup-in-berlin-october-5-2017.md +clickhouse-meetup-in-santa-clara-may-4-2017.md 2017/clickhouse-meetup-in-santa-clara-may-4-2017.md +clickhouse-meetup-at-santa-clara-may-4-2017.md 2017/clickhouse-meetup-in-santa-clara-may-4-2017.md +clickhouse-community-meetup-in-berlin-on-july-3.md 2018/announcing-clickhouse-community-meetup-in-berlin-on-july-3.md +evolution-of-data-structures-in-yandex-metrica.md 2016/evolution-of-data-structures-in-yandex-metrica.md +how-to-update-data-in-clickhouse.md 2016/how-to-update-data-in-clickhouse.md +yandex-opensources-clickhouse.md 2016/yandex-opensources-clickhouse.md +clickhouse-at-data-scale-2017.md 2017/clickhouse-at-data-scale-2017.md +clickhouse-meetup-in-berlin-october-5-2017.md 2017/clickhouse-meetup-in-berlin-october-5-2017.md +join-the-clickhouse-meetup-in-berlin.md 2017/join-the-clickhouse-meetup-in-berlin.md +clickhouse-at-percona-live-2017.md 2017/clickhouse-at-percona-live-2017.md +clickhouse-meetup-in-santa-clara-may-4-2017.md 2017/clickhouse-meetup-in-santa-clara-may-4-2017.md +announcing-clickhouse-meetup-in-amsterdam-on-november-15.md 2018/announcing-clickhouse-meetup-in-amsterdam-on-november-15.md +clickhouse-community-meetup-in-berlin-on-july-3-2018.md 2018/clickhouse-community-meetup-in-berlin-on-july-3-2018.md +clickhouse-at-analysys-a10-2018.md 2018/clickhouse-at-analysys-a10-2018.md +clickhouse-community-meetup-in-berlin-on-july-3.md 2018/clickhouse-community-meetup-in-berlin-on-july-3.md +clickhouse-community-meetup-in-paris-on-october-2-2018.md 2018/clickhouse-community-meetup-in-paris-on-october-2-2018.md +clickhouse-community-meetup-in-beijing-on-october-28-2018.md 2018/clickhouse-community-meetup-in-beijing-on-october-28-2018.md +clickhouse-meetup-in-amsterdam-on-november-15-2018.md 2018/clickhouse-meetup-in-amsterdam-on-november-15-2018.md +clickhouse-community-meetup-in-beijing-on-january-27-2018.md 2018/clickhouse-community-meetup-in-beijing-on-january-27-2018.md +clickhouse-at-percona-live-europe-2018.md 2018/clickhouse-at-percona-live-europe-2018.md +concept-cloud-mergetree-tables.md 2018/concept-cloud-mergetree-tables.md +clickhouse-meetup-in-limassol-on-may-7-2019.md 2019/clickhouse-meetup-in-limassol-on-may-7-2019.md +schedule-of-clickhouse-meetups-in-china-for-2019.md 2019/schedule-of-clickhouse-meetups-in-china-for-2019.md +clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md 2019/clickhouse-lecture-at-institute-of-computing-technology-chinese-academy-of-science-on-june-11-2019.md +clickhouse-meetup-in-san-francisco-on-june-4-2019.md 2019/clickhouse-meetup-in-san-francisco-on-june-4-2019.md +how-to-speed-up-lz4-decompression-in-clickhouse.md 2019/how-to-speed-up-lz4-decompression-in-clickhouse.md +clickhouse-at-percona-live-2019.md 2019/clickhouse-at-percona-live-2019.md +clickhouse-meetup-in-madrid-on-april-2-2019.md 2019/clickhouse-meetup-in-madrid-on-april-2-2019.md +clickhouse-meetup-in-beijing-on-june-8-2019.md 2019/clickhouse-meetup-in-beijing-on-june-8-2019.md +five-methods-for-database-obfuscation.md 2020/five-methods-for-database-obfuscation.md diff --git a/website/blog/ru/2016/clickhouse-meetup-v-moskve-21-noyabrya-2016.md b/website/blog/ru/2016/clickhouse-meetup-v-moskve-21-noyabrya-2016.md new file mode 100644 index 00000000000..71fb8da8215 --- /dev/null +++ b/website/blog/ru/2016/clickhouse-meetup-v-moskve-21-noyabrya-2016.md @@ -0,0 +1,8 @@ +--- +title: 'ClickHouse Meetup в Москве, 21 ноября 2016' +image: 'https://blog-images.clickhouse.tech/ru/2016/clickhouse-meetup-v-moskve-21-noyabrya-2016/main.jpg' +date: '2016-11-22' +tags: ['мероприятия', 'meetup', 'Москва'] +--- + +[Посмотреть видео](https://events.yandex.ru/lib/talks/4351/) diff --git a/website/blog/ru/2016/clickhouse-na-highload-2016.md b/website/blog/ru/2016/clickhouse-na-highload-2016.md new file mode 100644 index 00000000000..fb950db35b0 --- /dev/null +++ b/website/blog/ru/2016/clickhouse-na-highload-2016.md @@ -0,0 +1,14 @@ +--- +title: 'ClickHouse на HighLoad++ 2016' +image: 'https://blog-images.clickhouse.tech/ru/2016/clickhouse-na-highload-2016/main.jpg' +date: '2016-12-10' +tags: ['мероприятия', 'конференции', 'Москва', 'HighLoad++'] +--- + +![iframe](https://www.youtube.com/embed/TAiCXHgZn50) + +[Расшифровка доклада](https://habrahabr.ru/post/322724/) + +![iframe](https://www.youtube.com/embed/tf38TPvwjJ4) + +[Расшифровка доклада](https://habrahabr.ru/post/322620/) diff --git a/website/blog/ru/2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md b/website/blog/ru/2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md new file mode 100644 index 00000000000..6404ee0465a --- /dev/null +++ b/website/blog/ru/2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse на встрече про инфраструктуру хранения и обработки данных в Яндексе' +image: 'https://blog-images.clickhouse.tech/ru/2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse/main.jpg' +date: '2016-10-16' +tags: ['мероприятия', 'инфраструктура'] +--- + +![iframe](https://www.youtube.com/embed/Ho4_dQk7dAg) + +[Страница мероприятия «Яндекс изнутри: инфраструктура хранения и обработки данных»](https://events.yandex.ru/events/meetings/15-oct-2016/), прошедшего 15 октября 2016 года. diff --git a/website/blog/ru/2016/yandeks-otkryvaet-clickhouse.md b/website/blog/ru/2016/yandeks-otkryvaet-clickhouse.md new file mode 100644 index 00000000000..36daa047ce7 --- /dev/null +++ b/website/blog/ru/2016/yandeks-otkryvaet-clickhouse.md @@ -0,0 +1,10 @@ +--- +title: 'Яндекс открывает ClickHouse' +image: 'https://blog-images.clickhouse.tech/ru/2016/yandeks-otkryvaet-clickhouse/main.jpg' +date: '2016-06-15' +tags: ['анонс', 'GitHub', 'лицензия'] +--- + +Сегодня внутренняя разработка компании Яндекс — [аналитическая СУБД ClickHouse](https://clickhouse.tech/), стала доступна каждому. Исходники опубликованы на [GitHub](https://github.com/yandex/ClickHouse) под лицензией Apache 2.0. + +ClickHouse позволяет выполнять аналитические запросы в интерактивном режиме по данным, обновляемым в реальном времени. Система способна масштабироваться до десятков триллионов записей и петабайт хранимых данных. Использование ClickHouse открывает возможности, которые раньше было даже трудно представить: вы можете сохранять весь поток данных без предварительной агрегации и быстро получать отчёты в любых разрезах. ClickHouse разработан в Яндексе для задач [Яндекс.Метрики](https://metrika.yandex.ru/) — второй по величине системы веб-аналитики в мире. diff --git a/website/blog/ru/2017/clickhouse-meetup-edet-v-minsk.md b/website/blog/ru/2017/clickhouse-meetup-edet-v-minsk.md new file mode 100644 index 00000000000..fb84a16c02a --- /dev/null +++ b/website/blog/ru/2017/clickhouse-meetup-edet-v-minsk.md @@ -0,0 +1,14 @@ +--- +title: 'ClickHouse MeetUp едет в Минск!' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-meetup-edet-v-minsk/main.jpg' +date: '2017-06-13' +tags: ['мероприятия', 'meetup', 'Минск', 'Беларусь', 'анонс'] +--- + +29 июня в Минске впервые выступят с докладами создатели СУБД ClickHоuse и те, кто ежедневно использует её для решения аналитических задач. Докладчики расскажут о последних изменениях и предстоящих обновлениях СУБД, а также о нюансах работы с ней. + +Встреча будет интересна администраторам ClickHouse и тем, кто пока только присматривается к системе. Мы приглашаем белорусских пользователей также поделиться своим опытом использования ClickHоuse и выступить на встрече с блиц-докладами: при регистрации мы предложим вам такую возможность! + +Участие в мероприятии бесплатное, но необходимо заранее зарегистрироваться: количество мест в зале ограничено. + +Посмотреть программу и подать заявку на участие можно на [странице встречи](https://events.yandex.ru/events/meetings/29-june-2017). diff --git a/website/blog/ru/2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017.md b/website/blog/ru/2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017.md new file mode 100644 index 00000000000..80d399203b0 --- /dev/null +++ b/website/blog/ru/2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017.md @@ -0,0 +1,8 @@ +--- +title: 'ClickHouse Meetup в Екатеринбурге, 16 мая 2017' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017/main.jpg' +date: '2017-05-17' +tags: ['мероприятия', 'meetup', 'Екатеринбург'] +--- + +[Посмотреть презентацию](https://presentations.clickhouse.tech/meetup6/) diff --git a/website/blog/ru/2017/clickhouse-meetup-v-minske-itogi.md b/website/blog/ru/2017/clickhouse-meetup-v-minske-itogi.md new file mode 100644 index 00000000000..de38df47af3 --- /dev/null +++ b/website/blog/ru/2017/clickhouse-meetup-v-minske-itogi.md @@ -0,0 +1,16 @@ +--- +title: 'ClickHouse MeetUp в Минске: итоги' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-meetup-v-minske-itogi/main.jpg' +date: '2017-06-19' +tags: ['мероприятия', 'meetup', 'Минск', 'Беларусь'] +--- + +Недавно в Минске мы встретились с пользователями ClickHouse и техническими специалистами, кто только знакомится с СУБД. + +Мы делимся с вами презентациями докладчиков и будем рады ответить на вопросы в [чате ClickHouse в Телеграме](https://t.me/clickhouse_ru). + +[История создания ClickHouse, новости и планы по развитию](https://presentations.clickhouse.tech/meetup7/), Алексей Миловидов + +[Использование ClickHouse для мониторинга связности сети](https://presentations.clickhouse.tech/meetup7/netmon.pdf), Дмитрий Липин + +[Разбираемся во внутреннем устройстве ClickHouse](https://presentations.clickhouse.tech/meetup7/internals.pdf), Виталий Людвиченко diff --git a/website/blog/ru/2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017.md b/website/blog/ru/2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017.md new file mode 100644 index 00000000000..e4a614befad --- /dev/null +++ b/website/blog/ru/2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse Meetup в Новосибирске, 3 апреля 2017' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017/main.jpg' +date: '2017-04-04' +tags: ['мероприятия', 'meetup', 'Новосибирск'] +--- + +[Презентация Алексея Миловидова](https://presentations.clickhouse.tech/meetup4/) + +[Презентация Марии Мансуровой](https://presentations.clickhouse.tech/meetup4/clickhouse_for_analysts.pdf) diff --git a/website/blog/ru/2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md b/website/blog/ru/2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md new file mode 100644 index 00000000000..3bdfd2763b8 --- /dev/null +++ b/website/blog/ru/2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md @@ -0,0 +1,8 @@ +--- +title: 'ClickHouse Meetup в Санкт-Петербурге, 28 февраля 2017' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017/main.jpg' +date: '2017-03-01' +tags: ['мероприятия', 'meetup', 'Санкт-Петербург'] +--- + +![iframe](https://www.youtube.com/embed/CVrwp4Zoex4) diff --git a/website/blog/ru/2017/clickhouse-na-uwdc-2017.md b/website/blog/ru/2017/clickhouse-na-uwdc-2017.md new file mode 100644 index 00000000000..7b801181803 --- /dev/null +++ b/website/blog/ru/2017/clickhouse-na-uwdc-2017.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse на UWDC 2017' +image: 'https://blog-images.clickhouse.tech/ru/2017/clickhouse-na-uwdc-2017/main.jpg' +date: '2017-05-20' +tags: ['мероприятия', 'конференции', 'Челябинск'] +--- + +![iframe](https://www.youtube.com/embed/isYA4e5zg1M?t=2h8m15s) + +[Посмотреть презентацию](https://presentations.clickhouse.tech/uwdc/) diff --git a/website/blog/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019.md b/website/blog/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019.md new file mode 100644 index 00000000000..38e697d6b4c --- /dev/null +++ b/website/blog/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019.md @@ -0,0 +1,38 @@ +--- +title: 'ClickHouse Meetup в Лимассоле, 7 мая 2019' +image: 'https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/main.jpg' +date: '2019-05-14' +tags: ['мероприятия', 'meetup', 'Лимассол', 'Кипр', 'Европа'] +--- + +Первый ClickHouse Meetup под открытым небом прошел в сердце Лимассола, второго по размеру города Кипра, на крыше, любезно предоставленной Exness Group. С крыши открывались сногсшибательные виды, но докладчики отлично справлялись с конкуренцией с ними за внимание аудитории. Более ста человек присоединилось к мероприятие, что в очередной раз подтверждает высокий интерес к ClickHouse по всему земному шару. Контент мероприятия также доступен в формате [видеозаписи](https://www.youtube.com/watch?v=_rpU-TvSfZ8). + +[Кирилл Шваков](https://github.com/kshvakov) сыграл ключевую роль в том, чтобы данное мероприятие стало возможным: наладил коммуникацию с ClickHouse сообществом на Кипре, нашел отличную площадку и докладчиков. Большинство ClickHouse митапов по всему миру происходят благодаря активным участникам сообщества таким как Кирилл. Если вы хотите помочь нам организовать ClickHouse митап в своём регионе, пожалуйста свяжитесь с командой ClickHouse в Яндексе через [эту форму](https://clickhouse.tech/#meet) или любым другим удобным способом. + +![Кирилл Шваков](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/1.jpg) + +Кирилл широко известен благодаря его замечательногму [ClickHouse Go Driver](https://github.com/clickhouse/clickhouse-go), работающему по нативному протоколу, а его открывающий доклад был о его опыте оптимизации ClickHouse запросов и решению реальных прикладных задач в Integros и Wisebits. [Слайды](https://presentations.clickhouse.tech/meetup22/strategies.pdf). [Полные тексты запросов](https://github.com/kshvakov/ClickHouse-Meetup-Exness). + +Мероприятие началось ранним вечером… +![Вечер в Лимассоле](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/2.jpg) + +…но природе потребовалось всего около часа, чтобы включить «ночной режим». Зато проецируемые слайды стало заметно легче читать. +![Ночь в Лимассоле](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/3.jpg) + +Сергей Томилов с его коллегами из Exness Platform Team поделились деталями об эволюции их систем для анализа логов и метрик, а также как они в итоге стали использовать ClickHouse для долгосрочного хранения и анализа данных([слайды](https://presentations.clickhouse.tech/meetup22/exness.pdf)): +![Сергей Томилов](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/4.jpg) + +Алексей Миловидов из команды ClickHouse в Яндексе продемонстрировал функциональность из недавних релизов ClickHouse, а также рассказал о том, что стоит ждать в ближайшем будущем([слайды](https://presentations.clickhouse.tech/meetup22/new_features/)): +![Алексей Миловидов](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/5.jpg) + +Александр Зайцев, технический директор Altinity, показал обзор того, как можно интегрировать ClickHouse в окружения, работающие на Kubernetes([слайды](https://presentations.clickhouse.tech/meetup22/kubernetes.pdf)): +![Александр Зайцев](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/6.jpg) + +Владимир Гончаров, бекенд разработчик из Aloha Browser, закрывал ClickHouse Limassol Meetup демонстрацией нескольких проектов для интеграции других opensource продуктов для анализа логов с ClickHouse ([слайды](https://presentations.clickhouse.tech/meetup22/aloha.pdf)): +![Владимир Гончаров](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/7.jpg) + +К сожалению, приближалась полнось и только самые «морозостойкие» любители ClickHouse продержались всё мероприятие, так стало заметно холодать. + +![Лимассол](https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-limassole-7-maya-2019/8.jpg) + +Больше фотографий с мероприятия доступно в [коротком послесловии от Exness](https://www.facebook.com/events/386638262181785/permalink/402167077295570/). diff --git a/website/blog/ru/2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019.md b/website/blog/ru/2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019.md new file mode 100644 index 00000000000..d3a5471b1a8 --- /dev/null +++ b/website/blog/ru/2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse Meetup в Москве, 5 сентября 2019' +image: 'https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019/main.jpg' +date: '2019-09-06' +tags: ['мероприятия', 'meetup', 'Москва'] +--- + +![iframe](https://www.youtube.com/embed/videoseries?list=PL0Z2YDlm0b3gYSwohnKFUozYy9QdUpcT_) + +[Слайды опубликованы на GitHub](https://github.com/clickhouse/clickhouse-presentations/tree/master/meetup28). diff --git a/website/blog/ru/2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019.md b/website/blog/ru/2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019.md new file mode 100644 index 00000000000..d1dafe580f1 --- /dev/null +++ b/website/blog/ru/2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019.md @@ -0,0 +1,12 @@ +--- +title: 'ClickHouse Meetup в Новосибирске, 26 июня 2019' +image: 'https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019/main.jpg' +date: '2019-06-05' +tags: ['мероприятия', 'meetup', 'Новосибирск'] +--- + +Изюминкой второго ClickHouse митапа в Новосибирске были два низкоуровневых доклада с погружением во внутренности ClickHouse, а остальная часть контента была очень прикладной с конкретными сценариями. Любезно предоставленный S7 зал на сто человек был полон до самого завершения последнего доклада где-то ближе к полуночи. + +![iframe](https://www.youtube.com/embed/videoseries?list=PL0Z2YDlm0b3ionSVt-NYC9Vu_83xxhb4J) + +Как обычно, [все слайды опубликованы на GitHub](https://presentations.clickhouse.tech/meetup25). diff --git a/website/blog/ru/2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md b/website/blog/ru/2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md new file mode 100644 index 00000000000..8f8f9b4aae2 --- /dev/null +++ b/website/blog/ru/2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md @@ -0,0 +1,10 @@ +--- +title: 'ClickHouse Meetup в Санкт-Петербурге, 27 июля 2019' +image: 'https://blog-images.clickhouse.tech/ru/2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019/main.jpg' +date: '2019-08-01' +tags: ['мероприятия', 'meetup', 'Санкт-Петербург'] +--- + +![iframe](https://www.youtube.com/embed/videoseries?list=PL0Z2YDlm0b3j3X7TWrKmnEPcfEG901W-T) + +[Слайды опубликованы на GitHub](https://github.com/yandex/clickhouse-presentations/tree/master/meetup27). diff --git a/website/blog/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019.md b/website/blog/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019.md new file mode 100644 index 00000000000..cbd9e6c01fb --- /dev/null +++ b/website/blog/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019.md @@ -0,0 +1,12 @@ +--- +title: 'ClickHouse Meetup в Минске, 11 июля 2019' +image: 'https://blog-images.clickhouse.tech/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019/main.jpg' +date: '2019-07-12' +tags: ['мероприятия', 'meetup', 'Минск', 'Беларусь'] +--- + +![iframe](https://www.youtube.com/embed/videoseries?list=PL0Z2YDlm0b3hLz6dmyu6gM_X871FG9eCc) + +[Все слайды опубликованы на GitHub](https://github.com/yandex/clickhouse-presentations/tree/master/meetup26). + +![Минск](https://blog-images.clickhouse.tech/ru/2019/clickrouse-meetup-v-minske-11-iyulya-2019/1.jpg) diff --git a/website/blog/ru/index.md b/website/blog/ru/index.md new file mode 100644 index 00000000000..227a69408dc --- /dev/null +++ b/website/blog/ru/index.md @@ -0,0 +1,3 @@ +--- +is_index: true +--- diff --git a/website/blog/ru/redirects.txt b/website/blog/ru/redirects.txt new file mode 100644 index 00000000000..4e34d53af3d --- /dev/null +++ b/website/blog/ru/redirects.txt @@ -0,0 +1,15 @@ +yandeks-otkryvaet-clickhouse.md 2016/yandeks-otkryvaet-clickhouse.md +clickhouse-meetup-v-moskve-21-noyabrya-2016.md 2016/clickhouse-meetup-v-moskve-21-noyabrya-2016.md +clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md 2016/clickhouse-na-vstreche-pro-infrastrukturu-khraneniya-i-obrabotki-dannykh-v-yandekse.md +clickhouse-na-highload-2016.md 2016/clickhouse-na-highload-2016.md +clickhouse-meetup-v-novosibirske-3-aprelya-2017.md 2017/clickhouse-meetup-v-novosibirske-3-aprelya-2017.md +clickhouse-meetup-v-minske-itogi.md 2017/clickhouse-meetup-v-minske-itogi.md +clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md 2017/clickhouse-meetup-v-sankt-peterburge-28-fevralya-2017.md +clickhouse-meetup-v-ekaterinburge-16-maya-2017.md 2017/clickhouse-meetup-v-ekaterinburge-16-maya-2017.md +clickhouse-na-uwdc-2017.md 2017/clickhouse-na-uwdc-2017.md +clickhouse-meetup-edet-v-minsk.md 2017/clickhouse-meetup-edet-v-minsk.md +clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md 2019/clickhouse-meetup-v-sankt-peterburge-27-iyulya-2019.md +clickhouse-meetup-v-moskve-5-sentyabrya-2019.md 2019/clickhouse-meetup-v-moskve-5-sentyabrya-2019.md +clickhouse-meetup-v-novosibirske-26-iyunya-2019.md 2019/clickhouse-meetup-v-novosibirske-26-iyunya-2019.md +clickrouse-meetup-v-minske-11-iyulya-2019.md 2019/clickrouse-meetup-v-minske-11-iyulya-2019.md +clickhouse-meetup-v-limassole-7-maya-2019.md 2019/clickhouse-meetup-v-limassole-7-maya-2019.md diff --git a/website/css/blog.css b/website/css/blog.css new file mode 100644 index 00000000000..80ba393dec1 --- /dev/null +++ b/website/css/blog.css @@ -0,0 +1,8 @@ +body.blog .dropdown-item { + color: #111 !important; +} + +body.blog .dropdown-item:hover, +body.blog .dropdown-item:focus { + background-color: #efefef; +} diff --git a/website/locale/en/LC_MESSAGES/messages.po b/website/locale/en/LC_MESSAGES/messages.po index c2c37ebf2b1..9b264bb67d4 100644 --- a/website/locale/en/LC_MESSAGES/messages.po +++ b/website/locale/en/LC_MESSAGES/messages.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: en\n" @@ -37,27 +37,27 @@ msgstr "ClickHouse - fast open-source OLAP DBMS" msgid "ClickHouse DBMS" msgstr "ClickHouse DBMS" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "open-source" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "relational" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "analytics" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "analytical" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "Big Data" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "web-analytics" @@ -77,6 +77,14 @@ msgstr "" msgid "Yandex LLC" msgstr "Yandex LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "Published date" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "Documentation" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "Rating" diff --git a/website/locale/es/LC_MESSAGES/messages.mo b/website/locale/es/LC_MESSAGES/messages.mo index 9b61bb401bb8d56b03085f60b85d14bdc3c28009..a072272644ef6c4831ba813f62d3c5723d0d5b28 100644 GIT binary patch delta 1299 zcmYMzUr3Wt7{~EPo%yGn`Df;zsDY6oD{y6KrKFfeNr|8#NVw)eF{hyt6>Shf7ZHSz zWaWhgc~ciUib{%+cM$|BK@@pmMd(EeL4PRf`@@$S@6P9(_kGWKo;~MmdHc+c==*Hf zRpWDj?|Qy%i5mU?+;o}cFdRiUPGA*zz3wgQ=J)u;f6umoG2=Mm(x3pAJc z0YkWsv6q9OGgg5bH)m3Rz1YJI?e$sQh(o9ZCb0}3VKXkGwxWhcrEfw74&!DVcJ4pH z4UFHQ*8Ps!(oD)4$5Ql>*kTs-SLyPpe+ssv_No(=z$xUES_HMnw@{@Ucg7Q_(mud# z_!Tw3m=mjn{CFB$QHji;)>}aR0C&{0a`;M70gqq{wxCLU4^{HV$YpUF1$d20=p$<0 z0;;sj*oSVS>5xXTQComoZ=SmAEG?lDi2k6XL*wDk+k}U)9>=f&KO@z%a!yDifyUYSocBFD&-0w;eK*_ovo*I+ z<=i(uU3{Z_drCC={~2-2su^an0`KAme2Qyv5`&n>8hnc!_Jva;E@35>m;N(~dcGO! z&2pCHVha-|J+oNL_#tk>Dcp!N_y~*m3oorPOW}op8HY`A;+DNaB`A3FAFzz^JgTr| zOyE~+Aive|8s$vvM-`IBQ#gR@@f}96h*kI-RbYsy3fP26Y{7MS)|($jYP8EpF1z8) zkD@wy4~NKa&$&>=QTkZxBM_3q4iile_M!^t!)6@9ZhVFvxPk|)PfsK9yD zQohGG_#K-mY=Sncb91a?0PCyizY_0cLD-F!K&|OfRLlCj@o7{``>_RIp#BY?P=zex zFoxKvD&Q6>-V;>e1#G|tR6c*092m@Tq1K*2welP)@F1#?>!=5AqgpwJ=kXJ(?8+KtQ|J5GM-QvEyVL65l#eg{4q? zCXGtei(1p;sDk=Yg=bL(T|>ng!@W4}-Je79`mGpo$DMtCcO_8px%sjVC)mC#5!)Vb W-`$#U|AYog-HX+SO5Bg(tn(K;qjmTI diff --git a/website/locale/es/LC_MESSAGES/messages.po b/website/locale/es/LC_MESSAGES/messages.po index 794662f0dbd..54765875a81 100644 --- a/website/locale/es/LC_MESSAGES/messages.po +++ b/website/locale/es/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: es\n" @@ -36,27 +36,27 @@ msgstr "ClickHouse - DBMS OLAP de código abierto rápido" msgid "ClickHouse DBMS" msgstr "Sistema abierto." -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "de código abierto" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "relacional" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "analítica" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "analítico" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "Grandes Datos" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "Sistema abierto." @@ -76,6 +76,14 @@ msgstr "" msgid "Yandex LLC" msgstr "Sistema abierto." +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "Fecha de publicación" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "Documentación" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "Clasificación" diff --git a/website/locale/fa/LC_MESSAGES/messages.mo b/website/locale/fa/LC_MESSAGES/messages.mo index 36d0063da7a33529fbb93d0dc770594a8f758d80..21ba91517111062c683f8c18545bc2846144588e 100644 GIT binary patch delta 1296 zcmYM!TS!zv9LMp$x_Q6kddp1OG_t_3mJ;vK&58_yKuM(XmPw^XD^Z|pH+|_rgf3=2 zmLXEu21UMTy+ja3_;5rkBIrS(0*j>R^8Lv|huQO)nRCv}fBrMRE%8fS0?enN`<<(7l4Wo8MO8?*%VeFbKl`Ry>3 zm2_MTI*1vxpI|CZVKUBRKU%oi54eTfurI=lVpH7Wa29#h=1>!TMT&(IZ4^eM0!&6P z^IJNVwRBWqES^RM)QR=jkIV5lrei##xN$uy&|=hpRak~K7=r`B{vlN6Mv$0o3>DBb zJ!gJ}6lyKn>EKm{;?dH4eL;Ua3FSoTBhEZl_oxEh<#gFUzo z@1eGG67~KZx(VnH-oZpS`LE%@W1cF-$(%IpRVMO|<)Q*C4fa=}R@{hszYWXr9B#pJ zR3P6`0i+P^D3)L*e#R_}WwmPO`Kf5d`%x=Ci$3f`rFa1~k)oacGYVHdYOYE4ZC zeRqscC*LsNJ$dT=|D5-kmC{XOG2X%z_!yVsFa|J#tMN5**#~a5_z6p}aLGTzsQ05- zZI-e&8fzFh=9*98W1oq=f`dJQ) zGW~(Li zZlh#2gj%SFgQ9t^q5|m0-RR(5T*MQ2n=+lo)B+9d`AKF^;#pKc^Qek=`BTst38N}g ziCUl$73fwZY1@T514mGYtrv;W5~u{O;{m*fs=yplIj_x!ork_1US~Fc%;RJVT6}@# bhFDW%Q*-O)n6p?E@jKD7={%>Z{HXsg=ly(S diff --git a/website/locale/fa/LC_MESSAGES/messages.po b/website/locale/fa/LC_MESSAGES/messages.po index 65eeb6f605a..cc50b4726ff 100644 --- a/website/locale/fa/LC_MESSAGES/messages.po +++ b/website/locale/fa/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: fa\n" @@ -36,27 +36,27 @@ msgstr "ClickHouse - سریع باز-منبع OLAP DBMS" msgid "ClickHouse DBMS" msgstr "خانه عروسکی" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "متن باز" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "رابطه" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "تجزیه و تحلیل" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "تحلیلی" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "داده های بزرگ" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "تجزیه و تحلیل وب سایت" @@ -76,6 +76,14 @@ msgstr "" msgid "Yandex LLC" msgstr "Yandex, LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "تاریخ انتشار" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "مستندات" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "درجهبندی" diff --git a/website/locale/fr/LC_MESSAGES/messages.mo b/website/locale/fr/LC_MESSAGES/messages.mo index 6d5d70236c139ece8255fb2ceaee84ce6c7d96b3..6ca1195547545385bed08d88ae6b5c5682b839fd 100644 GIT binary patch delta 1295 zcmZA0OGs2v9LMp$dVSVtKJwXPnjRL6i$)F26eGnT3qvpj6(?sx(X>!27da6@8;dZC zwA|VvH`5>(M%ZH!Eh@q`6%_<7YLT0Yf~fBgF9Z#D?&q8{_x#WQ{?C~=fqSKq_gT(0 zizrNa?G;nPGLGmF%uUs1z%wrzQuf8LN5Emtr(p|vkdgR?m^9O#LZ?A z>!4A{zlFWX zT=tNg5_ye-xQt3T#7S0Sr%-XPMriOiuo=`=yu@}~M3p{3y|fiINDQk-jklsI6h3V0_r;_OTC)D3hE z9}o8r1&4YD`eXCSkK@vthL86340eY@-cWET96Os@>7?x{*;(!l1a=21V+)>5iT_Re Zm{k~`UB|=TP}qAy)4NvU{mcnEe*r*3iE;n{ delta 1200 zcmYMyUr3Wt7{~Eve{`vv)^yf%MaxK}EpjG^lq6=05L!xsgphSpL^q{De{6K&MgJ}g zioA=Upf0-b^`c?jSQz3(HxgKqT2Zi|pe}+%*7t`mG|t}7IcIyH^PK0LT+`cB_I=oY z+xYC|8{^yM)#(4{lHaV7;SiSN2(H0CVq$3FCQGVxKXE zUvV?*+Xl7~WTFF=NCuDNDO`=OF^Y>A#sVtA5K$Gd0o!mJmf>l4{ydVST}IZjVR!x} zs-m}WkoD~$9c3J&j3F^c-V9ch;4af1yU!u5>fb=;AQK975f%6f((I9I4ppIP+=TC3mr;S^oRco^PSm&;wZH(r z#Ot`5#4-fg!+3;q)JEQfslPH>VnVk!k1Dy315(M#P-h&$9aw`~@Bq^691ksc4bS2O zRH9LCtP\n" "Language: fr\n" @@ -36,27 +36,27 @@ msgstr "ClickHouse-SGBD OLAP open-source rapide" msgid "ClickHouse DBMS" msgstr "SGBD ClickHouse" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "open-source" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "relationnel" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "Analytics" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "analytique" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "Big Data" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "web-analytics" @@ -76,6 +76,14 @@ msgstr "" msgid "Yandex LLC" msgstr "Yandex LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "Date de publication" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "Documentation" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "Évaluation" diff --git a/website/locale/ja/LC_MESSAGES/messages.mo b/website/locale/ja/LC_MESSAGES/messages.mo index 4577e84e199bbcf81da749052a34b8ce358e4e1f..4f72203e20bd51ad0351a33205ed17e01d4d4072 100644 GIT binary patch delta 1279 zcmYMzdq`7J9Ki9PdVTB6`N*l%%xEGCwjO9LnNeA^KQg2TNM$M|nNm&yAs4Z-D5xy0 zW&YDc352jw2oi|}QGXPOMW9p=4J@LFKnS9~KReJkyPxy8_nhDPoqJ!exV19y&TjPz zzs>yf`PW5j>;LbDC6YmN0Btye={SxFIEhI(jX4-Wiu~c?LMv7z4NHt3)aQLzC=!r8 zRC4J!X&k^j+M}3_Q<#ZgFoY5(G6&bO3VY&3C^E$*6=#rJFUPKM#8kXRFtj70PjxKhw5%-}U%^wu3Aop3J}VGC+ty|@WOs2Tczy7Qm72yL9pGAzXMD0Ihm?w;Q{YKokgg~g`dYx=8Ay9T|C??MlTP~V9+ zs9(bmJb}r)!5a8^)JzScZs28ribnbsb>SRVtHs5rnen3r(rvtidS5?k#%`MSJ=FUj z;%$766lvuXDr2WX)cYSS{C5Nss4snBlFAk;b9ex&2-}T!u>xmM&p4a#Yp?({fOgct zI#G`*X!_5X_60M3**Jh2(2!|ALINt3XH;~;DJ;Wj(@xtL@44qb2RBVsCO-y! zw~gO+{=)q2&QSFKciv}~PuPQbcoUc4eO!pcn1iFZ3|}G5K5?nQFBrhw%z4A8_oG;5 zmb5xL%Nb~Q_Fysb9bAedSb!6F7pL(Do?T#;z%yB9G#lZv0jDbmQGfj&=cA%dWWb zK2$~fv5WQX5glb5rjErPCPLP*UCgEz526xi#VB@R6F$HioW({gV;4G#t*pFo;5ap`3PEvQPR`CxSR zbI!l0@c@IGuQWjYSJT?TuoR0ppA}e(Yq1UW-c_X8 z0GC_%80$$aL75K_Um^=_cq&N!Rhl#dg*fAG{6>v?nN1J+u^tO>3$~!Xfqv9i{{%1M zI4a>Ce6gxfJ1X((s04>l^G#wien`?`EsL?b7HmL0um@G5W)~kpJ$DQr;yI*QgnyA3 zJF7-L-%&JQ@=h!#zKlC@2s`jQ#<7*GH)HY!9i8cG+>h^239MurN+^mtnrhT|t&0=x zexq|QDxt$JK7~rC8}(i<#<0)DFOXC8+RISto^P8sHJdf&NsVUL_;RY&#bc3;RkfAz T)E|F1GZo1{osk+Syyp7{`FwRL diff --git a/website/locale/ja/LC_MESSAGES/messages.po b/website/locale/ja/LC_MESSAGES/messages.po index c88f2cabea2..f057f359724 100644 --- a/website/locale/ja/LC_MESSAGES/messages.po +++ b/website/locale/ja/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: ja\n" @@ -33,27 +33,27 @@ msgstr "ツ環板篠ョツ嘉ッツ偲青エツδツ-ツエツスツ-ツシツ" msgid "ClickHouse DBMS" msgstr "クリックハウスDBMS" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "オープンソース" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "関係" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "analytics" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "分析" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "ビッグデータ" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "ウェブ分析" @@ -71,6 +71,14 @@ msgstr "ソフトウェアは、明示または黙示を問わず、いかなる msgid "Yandex LLC" msgstr "Yandex LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "公開日" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "文書" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "評価" diff --git a/website/locale/messages.pot b/website/locale/messages.pot index 75aae739cff..12cb2a98bb8 100644 --- a/website/locale/messages.pot +++ b/website/locale/messages.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -32,27 +32,27 @@ msgstr "" msgid "ClickHouse DBMS" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "" @@ -70,6 +70,14 @@ msgstr "" msgid "Yandex LLC" msgstr "" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "" diff --git a/website/locale/ru/LC_MESSAGES/messages.mo b/website/locale/ru/LC_MESSAGES/messages.mo index 38fc03212af92bef1bc9b77364958447cf048c5d..53d8b37d07246761c4600def95d5998e8cce20ee 100644 GIT binary patch delta 1296 zcmYMzT}YEr7{KvIH=SD3{LH20%5X+Tv@rZ?k|qUt5eZ3TAEts@DbfagkOK=N=%UO> zEe*QJo1)n+GIxVoRV0<_ASU7nCgV89;S46=JZ9n#&Z*M`rslYUZ9JF-r(F zpm*BO`tp&2rg#c{_#HL!6XZ23O5{AUw%lS?9e5WtkUlKHS9k~)un1l3q6*KV9?dh{ zk0WM%8CgPpq2EHmO5QYealG{SGf*RSAajb-v;Z}=MaWq3pe}F(U3lE=??fKC$!nGk z^x$6Vty~1n*bwI7SQ7cyhHo@z2G+0ylgYPET#dC@i`xIvbOJY0pT%-qM&0>t(zY3! zQRnSOy+cni2jAdB0-D2kcG{6j{?E{HEj@C^^QgCY3AbVlqf60+dI=klwd6ADncl`$ z>_ZLILR32bByzjOi=VU~_21P_X9eEDojBz;8-Amv+{3}@9jHQGD1@5oNz|Rqp)R~? z8p}=9%b1QkFdvuj7;47KNkbkxtwCaxt4yNv-NQok4^cQpA&gJZy(N;78T?nziKKc4 zd+A?9<`%y*vhV=voq2)mK>}v|18QKCs2f?pd|W}jd=Apf@3%=V1&yo>_3u`Ly2IDV zCgpwbinY>WKiJ;jyV%s`t@pOHw*^DCiKxV*zH=8^E;TnbIveY~O~LE&#nyy`1 z-R=^1ad6zeVvFj%?{Q`Z1L6aLA`mwyLQF8aDT<))j~!^7ozMHc=bZO_-sik07hUR!&sVx{ z8^8Vh>-ZlkRP_Hh>NZ@|ba0iW8UY1!{s>r$2*U;yF}eU$Fzf z;YQ}S0Lv(&BZ^AoFrLFfT#4_{k2$Qw->3wAjH&@!FoIjK6fZjcBS?;R1)0mPIsG?K z6&=SI^V>5T%D9d?))p`jGKU>xG(C6}mB0yX!x;AB6YRuA+=or9La*XH?!ik=oIs|q zm*~Pb=)*L|^}0XNP^NRpSoYa*8CB9h$YBLMr3uQh8U3j1-N<3RoIbLEJ`54J^Ws&h zGZ@4%Cw_{mz^e-C-$f%shX$^o-p8;8b;FS3bzDn)5BFjMwelZWkFAtX6CFV9nE`CT zQM^M!_tDErw=?J|`Xg2OSNouf`md*BijH0Q9<|wgjKbKq0k!KxcmaD*2|h>d_9C)k z7h3>7=sM~v@^i5VcVH`yIPoN^(hC^FCH!EnpBg;V5deO``6fLM8SV^;Kn23ye~B)=+3ik`wNzE4k>IEl5s#JKZJW c_KvNM+rnL;j^tn8cTcjoI^#@~ diff --git a/website/locale/ru/LC_MESSAGES/messages.po b/website/locale/ru/LC_MESSAGES/messages.po index aed233c1275..981b83f3d7e 100644 --- a/website/locale/ru/LC_MESSAGES/messages.po +++ b/website/locale/ru/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2020-05-20 15:32+0300\n" +"POT-Creation-Date: 2020-06-15 22:29+0300\n" "PO-Revision-Date: 2020-03-26 10:19+0300\n" "Last-Translator: FULL NAME \n" "Language: ru\n" @@ -38,27 +38,27 @@ msgstr "ClickHouse-быстрая СУБД OLAP с открытым исходн msgid "ClickHouse DBMS" msgstr "СУБД ClickHouse" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "открытый исходный код" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "реляционный" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "аналитика" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "аналитический" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "большие данные" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "веб-аналитика" @@ -78,6 +78,14 @@ msgstr "" msgid "Yandex LLC" msgstr "ООО «Яндекс»" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "Дата публикации" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "Документация" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "Рейтинг" diff --git a/website/locale/tr/LC_MESSAGES/messages.mo b/website/locale/tr/LC_MESSAGES/messages.mo index 1cfc686a5ed1392629766d00ad23bfdb739bd789..65218c3b7298969fc73112ec30893466695a3a4b 100644 GIT binary patch delta 1296 zcmYMzTS!zv9LMp$dc5X6v$hRdGO@xQ+)^}6P%Nd4iog&g+;maNOL=+d=0b!)PbCCd zN?>F!32qBv2qNZ75f{3)vYe zTj{vwI)Yx>lbDSQn1$bQ2rbTR4entD4#t~NY=K7#E+T)mWz<3|NU><5O~fQrfLZ8g zee+P+Nk=Uv;{{Ye{n&&JoV9__I-wdXPEI=bDd7Z80*|%25j(#XPKYfA2?%UFR{t1_n{{ zc(@3gFc&vtX&U+0M5pM`f{nNXTio_d%%^=DDK_mokIK*z`fvp`E`xK@(e20WcpP0|$bSVLO&q9F`v|qean}fPOYJ#oryo$M`-(dApQzNXVG$N`j>KZ^ zn2T4i2S<@&+1z9Wwg(l^kq{L{+=xp3Wju-fs1&|Mo&6H(vVB1X7{h2~ED;r0I%+~M zkMmgIwnwpyolc@I;VS9SMji66*FBU>rH0CGtjBIF!Pi)YalF*)XN-qw*P#L$K?N{| z3Va+D*aT{UXQ;b1jXL{T)Q09!N51&4KgYgN(E_XJM_p}Av=5ca5>MokQyG&|)z#G3 z5$x;@^tN?%MyBFlM5UhSJKNsY(;95{H3xcwk*kR%PEyg{fq~9cZ91FHibb(ZN~}yef11>25yXPX;-ZP&5Lqmkgs8>^7m{5} zVqtDfj6@=4Z|^oYyCCdBLT|{z5`vUO;-(29zQ1@A&FT9&=QQtmp7Wg3=dog2?p?^a zZhTUF!+iIYY4rbd!7;04n8hj_#07X8=ixY3-~=|{Q>58@9xeD0tFdyVvsl9ObIp=?Dqu#lJP&@dNmPI-Z@z%#jAu}ZeZm-i z#TCT2rEH^;iB41^`>_X4U=6;+Mx4bE{zN4hWK}KLiV0kc^Rd^PKZoRK7m-+Y*_$6g zRrDJ66W{L9QO05F*yLj&B!=x~HNAKMmB0~$mC_9pgXl<`5N*(J|GRE0*c8C}$S1=Nw1a2bX;FWs3KD)25G z!;HoxR>LAm#;Z6-RrXkj`fCS$OmG|RENX|tsFIDM&iE0kqknZJM^Nh&kP7\n" "Language: tr\n" @@ -36,27 +36,27 @@ msgstr "ClickHouse - hızlı açık kaynak OLAP DBMS" msgid "ClickHouse DBMS" msgstr "ClickHouse DBMS" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "açık kaynak" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "ilişkisel" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "analiz" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "analitik" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "Büyük Veri" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "web-analyt -ics" @@ -76,6 +76,14 @@ msgstr "" msgid "Yandex LLC" msgstr "Yandex LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "Yayım datelandığı tarih" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "Belge" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "Verim" diff --git a/website/locale/zh/LC_MESSAGES/messages.mo b/website/locale/zh/LC_MESSAGES/messages.mo index 7260ce95e89b510777ffebc6c96cb915de5e9939..0a8eabb78ba784581b73d2e56aa0591383c04889 100644 GIT binary patch delta 1281 zcmYMzT}V@57{KvIJ^3-ea#L%5#L#d;Q)rcGmJk+2Ds&Om$TCww``Cya;YAKC5-%i^ zvP_3p8HfZnDq%OHE~IXPu&bhwazq6NQi726f9i$C+4;TC`}v&rJ!i6Vr84y1Zrv7s z2l*HAZ%)+c|8Lw9aS(~ z>x+}ZW-1yn123W`)QfF6itF(gIx&k`>{yPPXf5i5$FL5YF&)QD{R7mUn?`b$C~88_ zw4L?kB?aB$R~W>PsG0lN*TMvmbI97#࿒sM$r=)p&L06$G9{IX6i)d6qnJ1y0z8FSgAu@paI=@)U@{^mt5m^od+03 z?eFA8P|Hx?iEY{Jzh+WTqzg8oKG_iy`%xzjn))yn6F)V+F)pJXVGie1f>oG@KGgmJ z+=PSJgVXp8%W~NNofI4#sAkrHn#gfvPH90d37Gmm)P4gd9>FcdVbpiw1?sp()WmNy`T_h& zJY?GY$wJ59Lfz@m7zKTT$9M=AuogXhdzwfm>KRX%cnWob1>~(akshl)DRW<-EqI~B-`(2X74S!*DbEwK4hPSi z@4D34(e7$*?e2(NNvpAJRTW!zxV+w7-kL}(^MsWgAL;)TeG)0KKTOD2ym51J_G3+^hp{R+w?vG$(G^D^!75mtVk2;v%ZCub9AZ z*g$=&<2C#w(x^sy@Dg6b8l1-nE@KFPp&AS@t0vr!DU9P5yzcV-NRM_Ksb#}1e-E{y z53rB=Ho=8DjOXcm~x#FE(QzcHuZCv4ovi&s%6KuHsR=<>D!%guO;D zzQq6*FsJQa;zFGkk-2Qe`2)42zmdZ{j8cIhHev*IzXLg}i_=|R;4JF-B0m_piuw(# zp&BVCP`#RJ)?aTHc8M0$#7R{C0`A5E=cw}qHjrPyz4#NiV~qXM^9iiO9_+^(_#IdA z5H7OZYAhIH{TXXfHdcGv#7Ui{P!Dvv_%t>VXHk1Uj2ib8)$nu7;u5NnRyJ7oPoU<> zqViWz^ABP>-pO&HC3%V3n>qKuJLkM}5jD|A?8cJIC;4lv5g$X{Z>Ne1q){uFK{b9E zGdO}r@B^xWT$ulu_Vg4gkwr}~;(UZ^?6Hf-UHlAHa1u9Oz}=s77F_x5#jGbkQ\n" "Language: zh\n" @@ -33,27 +33,27 @@ msgstr "ツ暗ェツ氾环催ツ団ツ法ツ人" msgid "ClickHouse DBMS" msgstr "ツ环板msョツ嘉ッツ偲" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "open-source" msgstr "开源" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "relational" msgstr "关系" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytics" msgstr "分析" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "analytical" msgstr "分析" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "Big Data" msgstr "大数据" -#: templates/common_meta.html:24 +#: templates/common_meta.html:28 msgid "web-analytics" msgstr "网络分析" @@ -71,6 +71,14 @@ msgstr "软件按\"原样\"分发,不附带任何明示或暗示的担保或 msgid "Yandex LLC" msgstr "Yandex LLC" +#: templates/blog/content.html:16 +msgid "Published date" +msgstr "发布日期" + +#: templates/blog/nav.html:18 +msgid "Documentation" +msgstr "文件" + #: templates/docs/footer.html:3 msgid "Rating" msgstr "评分" diff --git a/website/main.html b/website/main.html index 73693204e5d..95debc2a233 100644 --- a/website/main.html +++ b/website/main.html @@ -3,6 +3,7 @@ {% set language = config.theme.language %} {% set direction = config.theme.direction %} {% set is_amp = config.extra.is_amp %} +{% set is_blog = config.extra.is_blog %} {% set single_page = config.extra.single_page %} {% set no_footer = True %} {% set og_type = 'article' %} @@ -36,22 +37,38 @@ {% if is_amp %} {% include "templates/docs/amp.html" %} {% else %} - {% set extra_html_attrs = 'data-version="' + data_version + '" data-single-page="' + data_single_page + '"' %} - {% set extra_body_attrs = 'data-spy="scroll" data-target="#toc" data-offset="80"' %} - + {% if not is_blog %} + {% set extra_html_attrs = 'data-version="' + data_version + '" data-single-page="' + data_single_page + '"' %} + {% set extra_body_attrs = 'data-spy="scroll" data-target="#toc" data-offset="80"' %} + {% else %} + {% set extra_body_attrs = 'class="blog"' %} + {% endif %} {% extends "templates/base.html" %} {% block content %} - {% include "templates/docs/nav.html" %} -
-
- {% include "templates/docs/sidebar.html" %} - {% include "templates/docs/content.html" %} - {% if not config.extra.single_page %} - {% include "templates/docs/toc.html" %} - {% endif %} + {% if not is_blog %} + {% include "templates/docs/nav.html" %} +
+
+ {% include "templates/docs/sidebar.html" %} + {% include "templates/docs/content.html" %} + {% if not config.extra.single_page %} + {% include "templates/docs/toc.html" %} + {% endif %} +
-
+ {% else %} + {% include "templates/blog/nav.html" %} +
+
+ {% include "templates/blog/content.html" %} +
+
+ {% if page and page.meta.is_index %} + {% include "templates/index/community.html" %} + {% include "templates/blog/footer.html" %} + {% endif %} + {% endif %} {% endblock %} {% endif %} diff --git a/website/sitemap-index.xml b/website/sitemap-index.xml index e53d6c29c54..75fdc75973c 100644 --- a/website/sitemap-index.xml +++ b/website/sitemap-index.xml @@ -21,6 +21,12 @@ https://clickhouse.tech/docs/fa/sitemap.xml + + https://clickhouse.tech/blog/en/sitemap.xml + + + https://clickhouse.tech/blog/ru/sitemap.xml + https://clickhouse.tech/sitemap-static.xml diff --git a/website/templates/blog/content.html b/website/templates/blog/content.html new file mode 100644 index 00000000000..38ad7933b00 --- /dev/null +++ b/website/templates/blog/content.html @@ -0,0 +1,43 @@ +
+ {% if not page.meta.is_index %} +
+
+ {% if page.meta.image %} + {{ title }} + {% endif %} +

{{ title }}

+
+ +
+ {{ page.content|adjust_markdown_html }} +
+ +
+ {{ page.meta.date }} + {% if page.meta.tags %} + {% for tag in page.meta.tags %} +
+ {{ tag }} +
+ {% endfor %} + {% endif %} +
+ {% include "templates/blog/footer.html" %} +
+ {% else %} + {% for post in config.extra.post_meta.values() %} + + {% set post_image = post.get('image') or '/images/index/intro.svg' %} +
+
+ {{ post['title'] }} +
+
+

{{ post['title'] }}

+ {{ post['date'] }} +
+
+
+ {% endfor %} + {% endif %} +
diff --git a/website/templates/blog/footer.html b/website/templates/blog/footer.html new file mode 100644 index 00000000000..3e94ecce51f --- /dev/null +++ b/website/templates/blog/footer.html @@ -0,0 +1,9 @@ +
+
+
+ +
+
+
diff --git a/website/templates/blog/nav.html b/website/templates/blog/nav.html new file mode 100644 index 00000000000..a7e135296f2 --- /dev/null +++ b/website/templates/blog/nav.html @@ -0,0 +1,45 @@ + diff --git a/website/templates/common_meta.html b/website/templates/common_meta.html index 84bd93d5175..86a852284ee 100644 --- a/website/templates/common_meta.html +++ b/website/templates/common_meta.html @@ -10,7 +10,11 @@ +{% if page and page.meta.image %} + +{% else %} +{% endif %} {% if page and not single_page %} @@ -20,13 +24,18 @@ {% include "templates/docs/ld_json.html" %} +{% if page and page.meta.tags %} + +{% else %} +{% endif %} {% if config and (config.extra.single_page or config.extra.version_prefix) %} {% endif %} -{% if config and page %} +{% if config and page and not is_blog %} {% for code, name in config.extra.languages.items() %} {% endfor %} diff --git a/website/templates/docs/ld_json.html b/website/templates/docs/ld_json.html index 3db89657221..7170a88dad0 100644 --- a/website/templates/docs/ld_json.html +++ b/website/templates/docs/ld_json.html @@ -1,12 +1,17 @@ {% if page and page.meta %} +}{% endif %}] {% endif %} diff --git a/website/templates/index/community.html b/website/templates/index/community.html index e230cac8da9..0adb3150ea0 100644 --- a/website/templates/index/community.html +++ b/website/templates/index/community.html @@ -113,8 +113,8 @@ class="bg-secondary-alt rounded-circle p-2 mr-4 float-left" />
{{ _('ClickHouse Blog') }}
-

{{ _('in') }} {{ _('English') }} - or in {{ _('Russian') }}

+

{{ _('in') }} {{ _('English') }} + or in {{ _('Russian') }}

diff --git a/website/templates/index/nav.html b/website/templates/index/nav.html index e3c680f1885..9bae81eb73c 100644 --- a/website/templates/index/nav.html +++ b/website/templates/index/nav.html @@ -18,8 +18,7 @@ Documentation