From 68abf43767d6f6e998373094147b7901b6222063 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Sun, 9 Oct 2022 00:00:14 +0000 Subject: [PATCH 01/46] Better INTERVAL parsing and execution --- src/Functions/FunctionBinaryArithmetic.h | 71 ++++++++++++ src/Functions/vectorFunctions.cpp | 105 ++++++++++++++++++ src/Parsers/ExpressionListParsers.cpp | 81 ++++++++------ .../02457_tuple_of_intervals.reference | 17 +++ .../0_stateless/02457_tuple_of_intervals.sql | 21 ++++ 5 files changed, 262 insertions(+), 33 deletions(-) create mode 100644 tests/queries/0_stateless/02457_tuple_of_intervals.reference create mode 100644 tests/queries/0_stateless/02457_tuple_of_intervals.sql diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 174e98dd81f..399cffac85e 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -708,6 +708,41 @@ class FunctionBinaryArithmetic : public IFunction return FunctionFactory::instance().get(function_name, context); } + static FunctionOverloadResolverPtr + getFunctionForTupleOfIntervalsArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) + { + bool first_is_date_or_datetime = isDate(type0) || isDateTime(type0) || isDateTime64(type0); + bool second_is_date_or_datetime = isDate(type1) || isDateTime(type1) || isDateTime64(type1); + + /// Exactly one argument must be Date or DateTime + if (first_is_date_or_datetime == second_is_date_or_datetime) + return {}; + + if (!isTuple(type0) && !isTuple(type1)) + return {}; + + /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Tuple. + /// We construct another function and call it. + if constexpr (!is_plus && !is_minus) + return {}; + + if (isTuple(type0) && second_is_date_or_datetime && is_minus) + throw Exception("Wrong order of arguments for function " + String(name) + ": argument of Tuple type cannot be first", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + std::string function_name; + if (is_plus) + { + function_name = "addTupleOfIntervals"; + } + else if (is_minus) + { + function_name = "subtractTupleOfIntervals"; + } + + return FunctionFactory::instance().get(function_name, context); + } + static FunctionOverloadResolverPtr getFunctionForTupleArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) { @@ -906,6 +941,20 @@ class FunctionBinaryArithmetic : public IFunction return function->execute(new_arguments, result_type, input_rows_count); } + ColumnPtr executeDateTimeTupleOfIntervalsPlusMinus(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, + size_t input_rows_count, const FunctionOverloadResolverPtr & function_builder) const + { + ColumnsWithTypeAndName new_arguments = arguments; + + /// Tuple argument must be second. + if (isTuple(arguments[0].type)) + std::swap(new_arguments[0], new_arguments[1]); + + auto function = function_builder->build(new_arguments); + + return function->execute(new_arguments, result_type, input_rows_count); + } + ColumnPtr executeTupleNumberOperator(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, const FunctionOverloadResolverPtr & function_builder) const { @@ -1121,6 +1170,22 @@ public: return function->getResultType(); } + /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Tuple. + if (auto function_builder = getFunctionForTupleOfIntervalsArithmetic(arguments[0], arguments[1], context)) + { + ColumnsWithTypeAndName new_arguments(2); + + for (size_t i = 0; i < 2; ++i) + new_arguments[i].type = arguments[i]; + + /// Tuple argument must be second. + if (isTuple(new_arguments[0].type)) + std::swap(new_arguments[0], new_arguments[1]); + + auto function = function_builder->build(new_arguments); + return function->getResultType(); + } + /// Special case when the function is multiply or divide, one of arguments is Tuple and another is Number. if (auto function_builder = getFunctionForTupleAndNumberArithmetic(arguments[0], arguments[1], context)) { @@ -1553,6 +1618,12 @@ public: return executeDateTimeIntervalPlusMinus(arguments, result_type, input_rows_count, function_builder); } + /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Tuple. + if (auto function_builder = getFunctionForTupleOfIntervalsArithmetic(arguments[0].type, arguments[1].type, context)) + { + return executeDateTimeTupleOfIntervalsPlusMinus(arguments, result_type, input_rows_count, function_builder); + } + /// Special case when the function is plus, minus or multiply, both arguments are tuples. if (auto function_builder = getFunctionForTupleArithmetic(arguments[0].type, arguments[1].type, context)) { diff --git a/src/Functions/vectorFunctions.cpp b/src/Functions/vectorFunctions.cpp index 20571f67eff..4e9f2a71f8c 100644 --- a/src/Functions/vectorFunctions.cpp +++ b/src/Functions/vectorFunctions.cpp @@ -415,6 +415,108 @@ public: } }; +template +class FunctionDateOrDateTimeOperationTupleOfIntervals : public ITupleFunction +{ +public: + static constexpr auto name = Impl::name; + + explicit FunctionDateOrDateTimeOperationTupleOfIntervals(ContextPtr context_) : ITupleFunction(context_) {} + static FunctionPtr create(ContextPtr context_) + { + return std::make_shared(context_); + } + + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 2; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) + throw Exception{ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}. Should be a date or a date with time", + arguments[0].type->getName(), getName()}; + + const auto * cur_tuple = checkAndGetDataType(arguments[1].type.get()); + + if (!cur_tuple) + throw Exception{ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}. Should be a tuple", + arguments[0].type->getName(), getName()}; + + const auto & cur_types = cur_tuple->getElements(); + + Columns cur_elements; + if (arguments[1].column) + cur_elements = getTupleElements(*arguments[1].column); + + size_t tuple_size = cur_types.size(); + if (tuple_size == 0) + return arguments[0].type; + + auto plus = FunctionFactory::instance().get(Impl::func_name, context); + DataTypePtr res_type = arguments[0].type; + for (size_t i = 0; i < tuple_size; ++i) + { + try + { + ColumnWithTypeAndName left{res_type, {}}; + ColumnWithTypeAndName right{cur_elements.empty() ? nullptr : cur_elements[i], cur_types[i], {}}; + auto plus_elem = plus->build({left, right}); + res_type = plus_elem->getResultType(); + } + catch (DB::Exception & e) + { + e.addMessage("While executing function {} for tuple element {}", getName(), i); + throw; + } + } + + return res_type; + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + const auto * cur_tuple = checkAndGetDataType(arguments[1].type.get()); + const auto & cur_types = cur_tuple->getElements(); + auto cur_elements = getTupleElements(*arguments[1].column); + + size_t tuple_size = cur_elements.size(); + if (tuple_size == 0) + return arguments[0].column; + + auto plus = FunctionFactory::instance().get(Impl::func_name, context); + ColumnWithTypeAndName res; + for (size_t i = 0; i < tuple_size; ++i) + { + ColumnWithTypeAndName column{cur_elements[i], cur_types[i], {}}; + auto elem_plus = plus->build(ColumnsWithTypeAndName{i == 0 ? arguments[0] : res, column}); + auto res_type = elem_plus->getResultType(); + res.column = elem_plus->execute({i == 0 ? arguments[0] : res, column}, res_type, input_rows_count); + res.type = res_type; + } + + return res.column; + } +}; + +struct AddTupleOfIntervalsImpl +{ + static constexpr auto name = "addTupleOfIntervals"; + static constexpr auto func_name = "plus"; +}; + +struct SubtractTupleOfIntervalsImpl +{ + static constexpr auto name = "subtractTupleOfIntervals"; + static constexpr auto func_name = "minus"; +}; + +using FunctionAddTupleOfIntervals = FunctionDateOrDateTimeOperationTupleOfIntervals; + +using FunctionSubtractTupleOfIntervals = FunctionDateOrDateTimeOperationTupleOfIntervals; + /// this is for convenient usage in LNormalize template class FunctionLNorm : public ITupleFunction {}; @@ -1282,6 +1384,9 @@ REGISTER_FUNCTION(VectorFunctions) factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index f7a016a59e4..353f22b03b6 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -1684,6 +1684,16 @@ private: class IntervalLayer : public Layer { public: + bool getResult(ASTPtr & node) override + { + if (elements.size() == 1) + node = elements[0]; + else + node = makeASTFunction("tuple", std::move(elements)); + + return true; + } + bool parse(IParser::Pos & pos, Expected & expected, Action & action) override { /// INTERVAL 1 HOUR or INTERVAL expr HOUR @@ -1693,49 +1703,54 @@ public: if (state == 0) { + state = 1; + auto begin = pos; auto init_expected = expected; ASTPtr string_literal; + String literal; + //// A String literal followed INTERVAL keyword, /// the literal can be a part of an expression or /// include Number and INTERVAL TYPE at the same time - if (ParserStringLiteral{}.parse(pos, string_literal, expected)) + if (ParserStringLiteral{}.parse(pos, string_literal, expected) + && string_literal->as().value.tryGet(literal)) { - String literal; - if (string_literal->as().value.tryGet(literal)) + Tokens tokens(literal.data(), literal.data() + literal.size()); + IParser::Pos token_pos(tokens, 0); + Expected token_expected; + ASTPtr expr; + + if (!ParserNumber{}.parse(token_pos, expr, token_expected)) + return false; + + /// case: INTERVAL '1' HOUR + /// back to begin + if (!token_pos.isValid()) { - Tokens tokens(literal.data(), literal.data() + literal.size()); - IParser::Pos token_pos(tokens, 0); - Expected token_expected; - ASTPtr expr; - - if (!ParserNumber{}.parse(token_pos, expr, token_expected)) - { - return false; - } - else - { - /// case: INTERVAL '1' HOUR - /// back to begin - if (!token_pos.isValid()) - { - pos = begin; - expected = init_expected; - } - else - { - /// case: INTERVAL '1 HOUR' - if (!parseIntervalKind(token_pos, token_expected, interval_kind)) - return false; - - elements = {makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), expr)}; - finished = true; - return true; - } - } + pos = begin; + expected = init_expected; + return true; } + + /// case: INTERVAL '1 HOUR' + if (!parseIntervalKind(token_pos, token_expected, interval_kind)) + return false; + + pushResult(makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), expr)); + + /// case: INTERVAL '1 HOUR 1 SECOND ...' + while (token_pos.isValid()) + { + if (!ParserNumber{}.parse(token_pos, expr, token_expected) || + !parseIntervalKind(token_pos, token_expected, interval_kind)) + return false; + + pushResult(makeASTFunction(interval_kind.toNameOfFunctionToIntervalDataType(), expr)); + } + + finished = true; } - state = 1; return true; } diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.reference b/tests/queries/0_stateless/02457_tuple_of_intervals.reference new file mode 100644 index 00000000000..40bbfb35d91 --- /dev/null +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.reference @@ -0,0 +1,17 @@ +SELECT (toIntervalSecond(-1), toIntervalMinute(2), toIntervalMonth(-3), toIntervalYear(1)) +- +2022-11-12 +2022-11-12 +2022-11-12 +- +2023-07-11 00:01:59 +2023-07-11 00:01:59 +2023-07-11 00:01:59 +- +2021-07-31 23:00:00 +2021-07-31 23:00:00 +2021-07-31 23:00:00 +- +2021-06-10 23:59:59.000 +2021-06-10 23:59:59.000 +2021-06-10 23:59:59.000 diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.sql b/tests/queries/0_stateless/02457_tuple_of_intervals.sql new file mode 100644 index 00000000000..2c2feaf522a --- /dev/null +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.sql @@ -0,0 +1,21 @@ +EXPLAIN SYNTAX SELECT INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; + +SELECT '-'; +SELECT '2022-10-11'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH; +SELECT '2022-10-11'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH); +SELECT '2022-10-11'::Date + INTERVAL '1 DAY 1 MONTH'; + +SELECT '-'; +SELECT '2022-10-11'::Date + INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR; +SELECT '2022-10-11'::Date + (INTERVAL -1 SECOND, INTERVAL 2 MINUTE, INTERVAL -3 MONTH, INTERVAL 1 YEAR); +SELECT '2022-10-11'::Date + INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; + +SELECT '-'; +SELECT '2022-10-11'::DateTime - INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR; +SELECT '2022-10-11'::DateTime - (INTERVAL 1 QUARTER, INTERVAL -3 WEEK, INTERVAL 1 YEAR, INTERVAL 1 HOUR); +SELECT '2022-10-11'::DateTime - INTERVAL '1 QUARTER -3 WEEK 1 YEAR 1 HOUR'; + +SELECT '-'; +SELECT '2022-10-11'::DateTime64 - INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND; +SELECT '2022-10-11'::DateTime64 - (INTERVAL 1 YEAR, INTERVAL 4 MONTH, INTERVAL 1 SECOND); +SELECT '2022-10-11'::DateTime64 - INTERVAL '1 YEAR 4 MONTH 1 SECOND'; From af1d306b12756e3c2f5d5de4bb7df0086c95ba77 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Fri, 14 Oct 2022 02:16:12 +0000 Subject: [PATCH 02/46] Add Interval arithmetics --- src/DataTypes/IDataType.h | 1 + src/Functions/FunctionBinaryArithmetic.h | 100 +++++++- src/Functions/FunctionUnaryArithmetic.h | 10 +- src/Functions/vectorFunctions.cpp | 227 +++++++++++++++++- .../02457_tuple_of_intervals.reference | 31 ++- .../0_stateless/02457_tuple_of_intervals.sql | 53 ++-- 6 files changed, 380 insertions(+), 42 deletions(-) diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index c93128ced95..45353796f3c 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -408,6 +408,7 @@ inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); } inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); } inline bool isMap(const DataTypePtr & data_type) {return WhichDataType(data_type).isMap(); } +inline bool isInterval(const DataTypePtr & data_type) {return WhichDataType(data_type).isInterval(); } inline bool isNothing(const DataTypePtr & data_type) { return WhichDataType(data_type).isNothing(); } inline bool isUUID(const DataTypePtr & data_type) { return WhichDataType(data_type).isUUID(); } diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 399cffac85e..e4919d3e9d7 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -633,7 +634,8 @@ class FunctionBinaryArithmetic : public IFunction DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64, DataTypeInt128, DataTypeInt256, DataTypeDecimal32, DataTypeDecimal64, DataTypeDecimal128, DataTypeDecimal256, DataTypeDate, DataTypeDateTime, - DataTypeFixedString, DataTypeString>; + DataTypeFixedString, DataTypeString, + DataTypeInterval>; using Floats = TypeList; @@ -709,10 +711,10 @@ class FunctionBinaryArithmetic : public IFunction } static FunctionOverloadResolverPtr - getFunctionForTupleOfIntervalsArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) + getFunctionForDateTupleOfIntervalsArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) { - bool first_is_date_or_datetime = isDate(type0) || isDateTime(type0) || isDateTime64(type0); - bool second_is_date_or_datetime = isDate(type1) || isDateTime(type1) || isDateTime64(type1); + bool first_is_date_or_datetime = isDateOrDate32(type0) || isDateTime(type0) || isDateTime64(type0); + bool second_is_date_or_datetime = isDateOrDate32(type1) || isDateTime(type1) || isDateTime64(type1); /// Exactly one argument must be Date or DateTime if (first_is_date_or_datetime == second_is_date_or_datetime) @@ -735,7 +737,7 @@ class FunctionBinaryArithmetic : public IFunction { function_name = "addTupleOfIntervals"; } - else if (is_minus) + else { function_name = "subtractTupleOfIntervals"; } @@ -743,6 +745,47 @@ class FunctionBinaryArithmetic : public IFunction return FunctionFactory::instance().get(function_name, context); } + static FunctionOverloadResolverPtr + getFunctionForMergeIntervalsArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) + { + /// Special case when the function is plus or minus, first argument is Interval or Tuple of Intervals + /// and the second argument is the Inteval of a different kind. + /// We construct another function (example: addIntervals) and call it + + if constexpr (!is_plus && !is_minus) + return {}; + + const auto * tuple_data_type_0 = checkAndGetDataType(type0.get()); + const auto * interval_data_type_0 = checkAndGetDataType(type0.get()); + const auto * interval_data_type_1 = checkAndGetDataType(type1.get()); + + if ((!tuple_data_type_0 && !interval_data_type_0) || !interval_data_type_1) + return {}; + + if (interval_data_type_0 && interval_data_type_0->equals(*interval_data_type_1)) + return {}; + + if (tuple_data_type_0) + { + auto & tuple_types = tuple_data_type_0->getElements(); + for (auto & type : tuple_types) + if (!isInterval(type)) + return {}; + } + + std::string function_name; + if (is_plus) + { + function_name = "addInterval"; + } + else + { + function_name = "subtractInterval"; + } + + return FunctionFactory::instance().get(function_name, context); + } + static FunctionOverloadResolverPtr getFunctionForTupleArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) { @@ -955,6 +998,16 @@ class FunctionBinaryArithmetic : public IFunction return function->execute(new_arguments, result_type, input_rows_count); } + ColumnPtr executeIntervalTupleOfIntervalsPlusMinus(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, + size_t input_rows_count, const FunctionOverloadResolverPtr & function_builder) const + { + ColumnsWithTypeAndName new_arguments = arguments; + + auto function = function_builder->build(new_arguments); + + return function->execute(new_arguments, result_type, input_rows_count); + } + ColumnPtr executeTupleNumberOperator(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count, const FunctionOverloadResolverPtr & function_builder) const { @@ -1171,7 +1224,7 @@ public: } /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Tuple. - if (auto function_builder = getFunctionForTupleOfIntervalsArithmetic(arguments[0], arguments[1], context)) + if (auto function_builder = getFunctionForDateTupleOfIntervalsArithmetic(arguments[0], arguments[1], context)) { ColumnsWithTypeAndName new_arguments(2); @@ -1186,6 +1239,18 @@ public: return function->getResultType(); } + /// Special case when the function is plus or minus, one of arguments is Interval/Tuple of Intervals and another is Interval. + if (auto function_builder = getFunctionForMergeIntervalsArithmetic(arguments[0], arguments[1], context)) + { + ColumnsWithTypeAndName new_arguments(2); + + for (size_t i = 0; i < 2; ++i) + new_arguments[i].type = arguments[i]; + + auto function = function_builder->build(new_arguments); + return function->getResultType(); + } + /// Special case when the function is multiply or divide, one of arguments is Tuple and another is Number. if (auto function_builder = getFunctionForTupleAndNumberArithmetic(arguments[0], arguments[1], context)) { @@ -1237,6 +1302,21 @@ public: type_res = std::make_shared(); return true; } + else if constexpr (std::is_same_v || std::is_same_v) + { + if constexpr (std::is_same_v && + std::is_same_v) + { + if constexpr (is_plus || is_minus) + { + if (left.getKind() == right.getKind()) + { + type_res = std::make_shared(left.getKind()); + return true; + } + } + } + } else { using ResultDataType = typename BinaryOperationTraits::ResultDataType; @@ -1619,11 +1699,17 @@ public: } /// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Tuple. - if (auto function_builder = getFunctionForTupleOfIntervalsArithmetic(arguments[0].type, arguments[1].type, context)) + if (auto function_builder = getFunctionForDateTupleOfIntervalsArithmetic(arguments[0].type, arguments[1].type, context)) { return executeDateTimeTupleOfIntervalsPlusMinus(arguments, result_type, input_rows_count, function_builder); } + /// Special case when the function is plus or minus, one of arguments is Interval/Tuple of Intervals and another is Interval. + if (auto function_builder = getFunctionForMergeIntervalsArithmetic(arguments[0].type, arguments[1].type, context)) + { + return executeIntervalTupleOfIntervalsPlusMinus(arguments, result_type, input_rows_count, function_builder); + } + /// Special case when the function is plus, minus or multiply, both arguments are tuples. if (auto function_builder = getFunctionForTupleArithmetic(arguments[0].type, arguments[1].type, context)) { diff --git a/src/Functions/FunctionUnaryArithmetic.h b/src/Functions/FunctionUnaryArithmetic.h index 445eb45fd9d..f5ddc5cb67c 100644 --- a/src/Functions/FunctionUnaryArithmetic.h +++ b/src/Functions/FunctionUnaryArithmetic.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -145,7 +146,8 @@ class FunctionUnaryArithmetic : public IFunction DataTypeDecimal, DataTypeDecimal, DataTypeDecimal, - DataTypeFixedString + DataTypeFixedString, + DataTypeInterval >(type, std::forward(f)); } @@ -211,6 +213,12 @@ public: return false; result = std::make_shared(type.getN()); } + else if constexpr (std::is_same_v) + { + if constexpr (!IsUnaryOperation::negate) + return false; + result = std::make_shared(type.getKind()); + } else { using T0 = typename DataType::FieldType; diff --git a/src/Functions/vectorFunctions.cpp b/src/Functions/vectorFunctions.cpp index 4e9f2a71f8c..007875a8b81 100644 --- a/src/Functions/vectorFunctions.cpp +++ b/src/Functions/vectorFunctions.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -517,6 +518,172 @@ using FunctionAddTupleOfIntervals = FunctionDateOrDateTimeOperationTupleOfInterv using FunctionSubtractTupleOfIntervals = FunctionDateOrDateTimeOperationTupleOfIntervals; +template +struct FunctionTupleOperationInterval : public ITupleFunction +{ +public: + static constexpr auto name = is_minus ? "subtractInterval" : "addInterval"; + + explicit FunctionTupleOperationInterval(ContextPtr context_) : ITupleFunction(context_) {} + + static FunctionPtr create(ContextPtr context_) + { + return std::make_shared(context_); + } + + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 2; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (!isTuple(arguments[0]) && !isInterval(arguments[0])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}, must be Tuple or Interval", + arguments[0]->getName(), getName()); + + if (!isInterval(arguments[1])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}, must be Interval", + arguments[0]->getName(), getName()); + + DataTypes types; + + const auto * tuple = checkAndGetDataType(arguments[0].get()); + + if (tuple) + { + const auto & cur_types = tuple->getElements(); + + for (auto & type : cur_types) + if (!isInterval(type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of Tuple element of first argument of function {}, must be Interval", + types.back()->getName(), getName()); + + types = cur_types; + } + else + { + types = {arguments[0]}; + } + + const auto * interval_last = checkAndGetDataType(types.back().get()); + const auto * interval_new = checkAndGetDataType(arguments[1].get()); + + if (!interval_last->equals(*interval_new)) + types.push_back(arguments[1]); + + return std::make_shared(types); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + if (!isInterval(arguments[1].type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of second argument of function {}, must be Interval", + arguments[0].type->getName(), getName()); + + Columns tuple_columns; + + const auto * first_tuple = checkAndGetDataType(arguments[0].type.get()); + const auto * first_interval = checkAndGetDataType(arguments[0].type.get()); + const auto * second_interval = checkAndGetDataType(arguments[1].type.get()); + + bool can_be_merged; + + if (first_interval) + { + can_be_merged = first_interval->equals(*second_interval); + + if (can_be_merged) + tuple_columns.resize(1); + else + tuple_columns.resize(2); + + tuple_columns[0] = arguments[0].column->convertToFullColumnIfConst(); + } + else if (first_tuple) + { + const auto & cur_types = first_tuple->getElements(); + + for (auto & type : cur_types) + if (!isInterval(type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of Tuple element of first argument of function {}, must be Interval", + type->getName(), getName()); + + auto cur_elements = getTupleElements(*arguments[0].column); + size_t tuple_size = cur_elements.size(); + + if (tuple_size == 0) + { + can_be_merged = false; + } + else + { + const auto * tuple_last_interval = checkAndGetDataType(cur_types.back().get()); + can_be_merged = tuple_last_interval->equals(*second_interval); + } + + if (can_be_merged) + tuple_columns.resize(tuple_size); + else + tuple_columns.resize(tuple_size + 1); + + for (size_t i = 0; i < tuple_size; ++i) + tuple_columns[i] = cur_elements[i]; + } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of first argument of function {}, must be Tuple or Interval", + arguments[0].type->getName(), getName()); + + + ColumnPtr & last_column = tuple_columns.back(); + + if (can_be_merged) + { + ColumnWithTypeAndName left{last_column, arguments[1].type, {}}; + + if constexpr (is_minus) + { + auto minus = FunctionFactory::instance().get("minus", context); + auto elem_minus = minus->build({left, arguments[1]}); + last_column = elem_minus->execute({left, arguments[1]}, arguments[1].type, input_rows_count) + ->convertToFullColumnIfConst(); + } + else + { + auto plus = FunctionFactory::instance().get("plus", context); + auto elem_plus = plus->build({left, arguments[1]}); + last_column = elem_plus->execute({left, arguments[1]}, arguments[1].type, input_rows_count) + ->convertToFullColumnIfConst(); + } + } + else + { + if constexpr (is_minus) + { + auto negate = FunctionFactory::instance().get("negate", context); + auto elem_negate = negate->build({arguments[1]}); + last_column = elem_negate->execute({arguments[1]}, arguments[1].type, input_rows_count); + } + else + { + last_column = arguments[1].column; + } + } + + return ColumnTuple::create(tuple_columns); + } +}; + +using FunctionTupleAddInterval = FunctionTupleOperationInterval; + +using FunctionTupleSubtractInterval = FunctionTupleOperationInterval; + + /// this is for convenient usage in LNormalize template class FunctionLNorm : public ITupleFunction {}; @@ -1384,8 +1551,64 @@ REGISTER_FUNCTION(VectorFunctions) factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction( + { + R"( +Consecutively adds a tuple of intervals to a Date or a DateTime. +[example:tuple] +)", + Documentation::Examples{ + {"tuple", "WITH toDate('2018-01-01') AS date SELECT addTupleOfIntervals(date, (INTERVAL 1 DAY, INTERVAL 1 YEAR))"}, + }, + Documentation::Categories{"Tuple", "Interval", "Date", "DateTime"} + }); + + factory.registerFunction( + { + R"( +Consecutively subtracts a tuple of intervals from a Date or a DateTime. +[example:tuple] +)", + Documentation::Examples{ + {"tuple", "WITH toDate('2018-01-01') AS date SELECT subtractTupleOfIntervals(date, (INTERVAL 1 DAY, INTERVAL 1 YEAR))"}, + }, + Documentation::Categories{"Tuple", "Interval", "Date", "DateTime"} + }); + + factory.registerFunction( + { + R"( +Adds an interval to another interval or tuple of intervals. The returned value is tuple of intervals. +[example:tuple] +[example:interval1] + +If the types of the first interval (or the interval in the tuple) and the second interval are the same they will be merged into one interval. +[example:interval2] +)", + Documentation::Examples{ + {"tuple", "SELECT addInterval((INTERVAL 1 DAY, INTERVAL 1 YEAR), INTERVAL 1 MONTH)"}, + {"interval1", "SELECT addInterval(INTERVAL 1 DAY, INTERVAL 1 MONTH)"}, + {"interval2", "SELECT addInterval(INTERVAL 1 DAY, INTERVAL 1 DAY)"}, + }, + Documentation::Categories{"Tuple", "Interval"} + }); + factory.registerFunction( + { + R"( +Adds an negated interval to another interval or tuple of intervals. The returned value is tuple of intervals. +[example:tuple] +[example:interval1] + +If the types of the first interval (or the interval in the tuple) and the second interval are the same they will be merged into one interval. +[example:interval2] +)", + Documentation::Examples{ + {"tuple", "SELECT subtractInterval((INTERVAL 1 DAY, INTERVAL 1 YEAR), INTERVAL 1 MONTH)"}, + {"interval1", "SELECT subtractInterval(INTERVAL 1 DAY, INTERVAL 1 MONTH)"}, + {"interval2", "SELECT subtractInterval(INTERVAL 2 DAY, INTERVAL 1 DAY)"}, + }, + Documentation::Categories{"Tuple", "Interval"} + }); factory.registerFunction(); factory.registerFunction(); diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.reference b/tests/queries/0_stateless/02457_tuple_of_intervals.reference index 40bbfb35d91..dd190dce891 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.reference +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.reference @@ -1,17 +1,16 @@ SELECT (toIntervalSecond(-1), toIntervalMinute(2), toIntervalMonth(-3), toIntervalYear(1)) -- -2022-11-12 -2022-11-12 -2022-11-12 -- -2023-07-11 00:01:59 -2023-07-11 00:01:59 -2023-07-11 00:01:59 -- -2021-07-31 23:00:00 -2021-07-31 23:00:00 -2021-07-31 23:00:00 -- -2021-06-10 23:59:59.000 -2021-06-10 23:59:59.000 -2021-06-10 23:59:59.000 +--- +3 IntervalSecond +(1,2) Tuple(IntervalHour, IntervalSecond) +(1,1,1) Tuple(IntervalSecond, IntervalHour, IntervalSecond) +(2,1) Tuple(IntervalSecond, IntervalHour) +--- +-3 IntervalSecond +(-1,-2) Tuple(IntervalHour, IntervalSecond) +(-1,-1,-1) Tuple(IntervalSecond, IntervalHour, IntervalSecond) +(-2,-1) Tuple(IntervalSecond, IntervalHour) +--- +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.sql b/tests/queries/0_stateless/02457_tuple_of_intervals.sql index 2c2feaf522a..d4065ab98f8 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.sql +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.sql @@ -1,21 +1,42 @@ EXPLAIN SYNTAX SELECT INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; -SELECT '-'; -SELECT '2022-10-11'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH; -SELECT '2022-10-11'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH); -SELECT '2022-10-11'::Date + INTERVAL '1 DAY 1 MONTH'; +SELECT '---'; -SELECT '-'; -SELECT '2022-10-11'::Date + INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR; -SELECT '2022-10-11'::Date + (INTERVAL -1 SECOND, INTERVAL 2 MINUTE, INTERVAL -3 MONTH, INTERVAL 1 YEAR); -SELECT '2022-10-11'::Date + INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; +WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 HOUR + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + INTERVAL 1 HOUR + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 HOUR as expr SELECT expr, toTypeName(expr); -SELECT '-'; -SELECT '2022-10-11'::DateTime - INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR; -SELECT '2022-10-11'::DateTime - (INTERVAL 1 QUARTER, INTERVAL -3 WEEK, INTERVAL 1 YEAR, INTERVAL 1 HOUR); -SELECT '2022-10-11'::DateTime - INTERVAL '1 QUARTER -3 WEEK 1 YEAR 1 HOUR'; +SELECT '---'; -SELECT '-'; -SELECT '2022-10-11'::DateTime64 - INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND; -SELECT '2022-10-11'::DateTime64 - (INTERVAL 1 YEAR, INTERVAL 4 MONTH, INTERVAL 1 SECOND); -SELECT '2022-10-11'::DateTime64 - INTERVAL '1 YEAR 4 MONTH 1 SECOND'; +WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 HOUR - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 SECOND - INTERVAL 1 HOUR - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 HOUR as expr SELECT expr, toTypeName(expr); + +SELECT '---'; + +WITH '2022-10-11'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH AS e1, + '2022-10-11'::Date + (INTERVAL 1 DAY + INTERVAL 1 MONTH) AS e2, + '2022-10-11'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH) AS e3, + '2022-10-11'::Date + INTERVAL '1 DAY 1 MONTH' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4; + +WITH '2022-10-11'::Date + INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR AS e1, + '2022-10-11'::Date + (INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR) AS e2, + '2022-10-11'::Date + (INTERVAL -1 SECOND, INTERVAL 2 MINUTE, INTERVAL -3 MONTH, INTERVAL 1 YEAR) AS e3, + '2022-10-11'::Date + INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4; + +WITH '2022-10-11'::DateTime - INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR AS e1, + '2022-10-11'::DateTime + (- INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR) AS e2, + '2022-10-11'::DateTime - (INTERVAL 1 QUARTER, INTERVAL -3 WEEK, INTERVAL 1 YEAR, INTERVAL 1 HOUR) AS e3, + '2022-10-11'::DateTime - INTERVAL '1 QUARTER -3 WEEK 1 YEAR 1 HOUR' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4; + + +WITH '2022-10-11'::DateTime64 - INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND AS e1, + '2022-10-11'::DateTime64 + (- INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND) AS e2, + '2022-10-11'::DateTime64 - (INTERVAL 1 YEAR, INTERVAL 4 MONTH, INTERVAL 1 SECOND) AS e3, + '2022-10-11'::DateTime64 - INTERVAL '1 YEAR 4 MONTH 1 SECOND' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4; \ No newline at end of file From cf6471c6b9965a82acb16b4a576e5f8a1ecf123d Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Fri, 14 Oct 2022 02:48:04 +0000 Subject: [PATCH 03/46] Fix style --- src/Functions/FunctionBinaryArithmetic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index e4919d3e9d7..c13cc67a1bc 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -749,7 +749,7 @@ class FunctionBinaryArithmetic : public IFunction getFunctionForMergeIntervalsArithmetic(const DataTypePtr & type0, const DataTypePtr & type1, ContextPtr context) { /// Special case when the function is plus or minus, first argument is Interval or Tuple of Intervals - /// and the second argument is the Inteval of a different kind. + /// and the second argument is the Interval of a different kind. /// We construct another function (example: addIntervals) and call it if constexpr (!is_plus && !is_minus) From dc99e67d3f577b90e72cad5aa2623b1099bf16b4 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 14 Oct 2022 17:45:17 +0300 Subject: [PATCH 04/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 7248728864e..0728d3e1127 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > >(tail -100000 > server.log) 2>&1 & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db >> server.log 2>&1 & server_pid=$! kill -0 $server_pid From c11e4bfbbfbc36999814e5983e19e41b1eecd2ea Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Fri, 14 Oct 2022 15:13:26 +0000 Subject: [PATCH 05/46] Fix build --- src/Functions/vectorFunctions.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Functions/vectorFunctions.cpp b/src/Functions/vectorFunctions.cpp index 007875a8b81..78f35342d60 100644 --- a/src/Functions/vectorFunctions.cpp +++ b/src/Functions/vectorFunctions.cpp @@ -555,7 +555,7 @@ public: { const auto & cur_types = tuple->getElements(); - for (auto & type : cur_types) + for (const auto & type : cur_types) if (!isInterval(type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of Tuple element of first argument of function {}, must be Interval", @@ -607,7 +607,7 @@ public: { const auto & cur_types = first_tuple->getElements(); - for (auto & type : cur_types) + for (const auto & type : cur_types) if (!isInterval(type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of Tuple element of first argument of function {}, must be Interval", From a50c0a7f85f7cef911a946d4ba9a54b61023ad97 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Tue, 18 Oct 2022 07:23:00 +0000 Subject: [PATCH 06/46] Better test --- .../02457_tuple_of_intervals.reference | 21 +++++++--- .../0_stateless/02457_tuple_of_intervals.sql | 42 ++++++++++++++----- 2 files changed, 48 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.reference b/tests/queries/0_stateless/02457_tuple_of_intervals.reference index dd190dce891..d5ffbc33dc0 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.reference +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.reference @@ -1,16 +1,27 @@ SELECT (toIntervalSecond(-1), toIntervalMinute(2), toIntervalMonth(-3), toIntervalYear(1)) --- +-1 +2022-10-12 +2022-10-10 +(2) +(0) +2022-10-12 +2022-10-10 +2022-10-12 +(2) Tuple(IntervalSecond) +(0) Tuple(IntervalSecond) +--- 3 IntervalSecond (1,2) Tuple(IntervalHour, IntervalSecond) (1,1,1) Tuple(IntervalSecond, IntervalHour, IntervalSecond) (2,1) Tuple(IntervalSecond, IntervalHour) ---- -3 IntervalSecond (-1,-2) Tuple(IntervalHour, IntervalSecond) (-1,-1,-1) Tuple(IntervalSecond, IntervalHour, IntervalSecond) (-2,-1) Tuple(IntervalSecond, IntervalHour) --- -1 -1 -1 -1 +1 2022-03-01 +1 2022-02-28 +1 2023-07-11 00:01:59 +1 2021-07-31 23:00:00 +1 2021-06-10 23:59:59.000 diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.sql b/tests/queries/0_stateless/02457_tuple_of_intervals.sql index d4065ab98f8..494914d4d4f 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.sql +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.sql @@ -2,13 +2,29 @@ EXPLAIN SYNTAX SELECT INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; SELECT '---'; +SELECT negate(INTERVAL 1 SECOND); +SELECT addTupleOfIntervals('2022-10-11'::Date, tuple(INTERVAL 1 DAY)); +SELECT subtractTupleOfIntervals('2022-10-11'::Date, tuple(INTERVAL 1 DAY)); +SELECT addInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT subtractInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); + +SELECT '2022-10-11'::Date + tuple(INTERVAL 1 DAY); +SELECT '2022-10-11'::Date - tuple(INTERVAL 1 DAY); +SELECT tuple(INTERVAL 1 DAY) + '2022-10-11'::Date; +SELECT tuple(INTERVAL 1 DAY) - '2022-10-11'::Date; -- { serverError 43 } + +WITH tuple(INTERVAL 1 SECOND) + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH tuple(INTERVAL 1 SECOND) - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + tuple(INTERVAL 1 SECOND) as expr SELECT expr, toTypeName(expr); -- { serverError 43 } +WITH INTERVAL 1 SECOND - tuple(INTERVAL 1 SECOND) as expr SELECT expr, toTypeName(expr); -- { serverError 43 } + +SELECT '---'; + WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); WITH INTERVAL 1 HOUR + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); WITH INTERVAL 1 SECOND + INTERVAL 1 HOUR + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 HOUR as expr SELECT expr, toTypeName(expr); -SELECT '---'; - WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); WITH - INTERVAL 1 HOUR - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); WITH - INTERVAL 1 SECOND - INTERVAL 1 HOUR - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); @@ -16,27 +32,33 @@ WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 HOUR as expr SELECT ex SELECT '---'; -WITH '2022-10-11'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH AS e1, - '2022-10-11'::Date + (INTERVAL 1 DAY + INTERVAL 1 MONTH) AS e2, - '2022-10-11'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH) AS e3, - '2022-10-11'::Date + INTERVAL '1 DAY 1 MONTH' AS e4 -SELECT e1 == e2 AND e2 == e3 AND e3 == e4; +WITH '2022-01-30'::Date + INTERVAL 1 MONTH + INTERVAL 1 DAY AS e1, + '2022-01-30'::Date + (INTERVAL 1 MONTH + INTERVAL 1 DAY) AS e2, + '2022-01-30'::Date + (INTERVAL 1 MONTH, INTERVAL 1 DAY) AS e3, + '2022-01-30'::Date + INTERVAL '1 MONTH 1 DAY' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; + +WITH '2022-01-30'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH AS e1, + '2022-01-30'::Date + (INTERVAL 1 DAY + INTERVAL 1 MONTH) AS e2, + '2022-01-30'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH) AS e3, + '2022-01-30'::Date + INTERVAL '1 DAY 1 MONTH' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; WITH '2022-10-11'::Date + INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR AS e1, '2022-10-11'::Date + (INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR) AS e2, '2022-10-11'::Date + (INTERVAL -1 SECOND, INTERVAL 2 MINUTE, INTERVAL -3 MONTH, INTERVAL 1 YEAR) AS e3, '2022-10-11'::Date + INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR' AS e4 -SELECT e1 == e2 AND e2 == e3 AND e3 == e4; +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; WITH '2022-10-11'::DateTime - INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR AS e1, '2022-10-11'::DateTime + (- INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR) AS e2, '2022-10-11'::DateTime - (INTERVAL 1 QUARTER, INTERVAL -3 WEEK, INTERVAL 1 YEAR, INTERVAL 1 HOUR) AS e3, '2022-10-11'::DateTime - INTERVAL '1 QUARTER -3 WEEK 1 YEAR 1 HOUR' AS e4 -SELECT e1 == e2 AND e2 == e3 AND e3 == e4; +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; WITH '2022-10-11'::DateTime64 - INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND AS e1, '2022-10-11'::DateTime64 + (- INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND) AS e2, '2022-10-11'::DateTime64 - (INTERVAL 1 YEAR, INTERVAL 4 MONTH, INTERVAL 1 SECOND) AS e3, '2022-10-11'::DateTime64 - INTERVAL '1 YEAR 4 MONTH 1 SECOND' AS e4 -SELECT e1 == e2 AND e2 == e3 AND e3 == e4; \ No newline at end of file +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; \ No newline at end of file From 77ea58d5392a6e741240fbafd7515e8213bc6277 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 21 Oct 2022 17:58:20 +0300 Subject: [PATCH 07/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 0728d3e1127..5c9ab1af24e 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db >> server.log 2>&1 & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>> server.log & server_pid=$! kill -0 $server_pid From 6242e93c81c22a1306c4dd35c263f65e22048fe9 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 24 Oct 2022 02:11:08 +0000 Subject: [PATCH 08/46] Fixes & better tests --- src/Functions/vectorFunctions.cpp | 8 ++++---- .../0_stateless/02457_tuple_of_intervals.reference | 5 +++++ tests/queries/0_stateless/02457_tuple_of_intervals.sql | 7 +++++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/Functions/vectorFunctions.cpp b/src/Functions/vectorFunctions.cpp index 78f35342d60..20835f59cc1 100644 --- a/src/Functions/vectorFunctions.cpp +++ b/src/Functions/vectorFunctions.cpp @@ -434,7 +434,7 @@ public: DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - if (!isDate(arguments[0].type) && !isDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) + if (!isDateOrDate32(arguments[0].type) && !isDateTime(arguments[0].type) && !isDateTime64(arguments[0].type)) throw Exception{ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of first argument of function {}. Should be a date or a date with time", arguments[0].type->getName(), getName()}; @@ -545,7 +545,7 @@ public: if (!isInterval(arguments[1])) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}, must be Interval", - arguments[0]->getName(), getName()); + arguments[1]->getName(), getName()); DataTypes types; @@ -559,7 +559,7 @@ public: if (!isInterval(type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of Tuple element of first argument of function {}, must be Interval", - types.back()->getName(), getName()); + type->getName(), getName()); types = cur_types; } @@ -582,7 +582,7 @@ public: if (!isInterval(arguments[1].type)) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of second argument of function {}, must be Interval", - arguments[0].type->getName(), getName()); + arguments[1].type->getName(), getName()); Columns tuple_columns; diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.reference b/tests/queries/0_stateless/02457_tuple_of_intervals.reference index d5ffbc33dc0..e635aec1163 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.reference +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.reference @@ -5,6 +5,11 @@ SELECT (toIntervalSecond(-1), toIntervalMinute(2), toIntervalMonth(-3), toInterv 2022-10-10 (2) (0) +2022-11-12 +2022-09-10 +(1,2) +(1,0) +--- 2022-10-12 2022-10-10 2022-10-12 diff --git a/tests/queries/0_stateless/02457_tuple_of_intervals.sql b/tests/queries/0_stateless/02457_tuple_of_intervals.sql index 494914d4d4f..be9ccb50d92 100644 --- a/tests/queries/0_stateless/02457_tuple_of_intervals.sql +++ b/tests/queries/0_stateless/02457_tuple_of_intervals.sql @@ -8,6 +8,13 @@ SELECT subtractTupleOfIntervals('2022-10-11'::Date, tuple(INTERVAL 1 DAY)); SELECT addInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); SELECT subtractInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT addTupleOfIntervals('2022-10-11'::Date, (INTERVAL 1 DAY, INTERVAL 1 MONTH)); +SELECT subtractTupleOfIntervals('2022-10-11'::Date, (INTERVAL 1 DAY, INTERVAL 1 MONTH)); +SELECT addInterval((INTERVAL 1 DAY, INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT subtractInterval(tuple(INTERVAL 1 DAY, INTERVAL 1 SECOND), INTERVAL 1 SECOND); + +SELECT '---'; + SELECT '2022-10-11'::Date + tuple(INTERVAL 1 DAY); SELECT '2022-10-11'::Date - tuple(INTERVAL 1 DAY); SELECT tuple(INTERVAL 1 DAY) + '2022-10-11'::Date; From 7419a3bd583ea476baa0b57ea8c66e647aa114a8 Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Sat, 29 Oct 2022 18:58:11 +0800 Subject: [PATCH 09/46] fix Signed-off-by: Lloyd-Pottiger --- CMakeLists.txt | 4 ++-- cmake/tools.cmake | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a04f347b2d..d10bc63c15e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -202,7 +202,7 @@ option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold lin if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") # Can be lld or ld-lld or lld-13 or /path/to/lld. - if (LINKER_NAME MATCHES "lld") + if (LINKER_NAME MATCHES "lld" AND OS_LINUX) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index") message (STATUS "Adding .gdb-index via --gdb-index linker option.") @@ -248,7 +248,7 @@ endif () # Create BuildID when using lld. For other linkers it is created by default. # (NOTE: LINKER_NAME can be either path or name, and in different variants) -if (LINKER_NAME MATCHES "lld") +if (LINKER_NAME MATCHES "lld" AND OS_LINUX) # SHA1 is not cryptographically secure but it is the best what lld is offering. set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1") endif () diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 8a17d97cf13..23f34bb24cd 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -57,14 +57,19 @@ if (NOT LINKER_NAME) if (COMPILER_GCC) find_program (LLD_PATH NAMES "ld.lld") find_program (GOLD_PATH NAMES "ld.gold") - elseif (COMPILER_CLANG) + # llvm lld is a generic driver. + # Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld (WebAssembly) instead + elseif (COMPILER_CLANG AND OS_LINUX) find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld") find_program (GOLD_PATH NAMES "ld.gold" "gold") + elseif (COMPILER_CLANG AND OS_DARWIN) + find_program (LLD_PATH NAMES "ld64.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld64.lld" "lld") + find_program (GOLD_PATH NAMES "ld.gold" "gold") endif () endif() -if (OS_LINUX AND NOT LINKER_NAME) - # prefer lld linker over gold or ld on linux +if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME) + # prefer lld linker over gold or ld on linux and macos if (LLD_PATH) if (COMPILER_GCC) # GCC driver requires one of supported linker names like "lld". From c97c78e3fff7792e0ff6fd358df5f3415b7ab2e5 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Mon, 31 Oct 2022 13:49:31 +0100 Subject: [PATCH 10/46] Updated normaliser to clone the alias ast. Implementation: * Updated QueryNormalizer to clone alias ast, when its replaced. Previously just assigning the same leads to exception in LogicalExpressinsOptimizer as it would be the same parent being inserted again. * This bug is not seen with new analyser (allow_experimental_analyzer), so no changes for it. I added a test for the same. Testing: * Added a test for or function with alias and const where 02475_or_function_alias_and_const_where.sql --- src/Interpreters/QueryNormalizer.cpp | 2 +- .../02475_or_function_alias_and_const_where.reference | 2 ++ .../0_stateless/02475_or_function_alias_and_const_where.sql | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02475_or_function_alias_and_const_where.reference create mode 100644 tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 2a8b256c3d1..0f57a8f549c 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -118,7 +118,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) } } else - ast = alias_node; + ast = alias_node->clone(); } } diff --git a/tests/queries/0_stateless/02475_or_function_alias_and_const_where.reference b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.reference new file mode 100644 index 00000000000..b5d8e605a7d --- /dev/null +++ b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.reference @@ -0,0 +1,2 @@ +0 0 +0 0 diff --git a/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql new file mode 100644 index 00000000000..ddb0f70c6de --- /dev/null +++ b/tests/queries/0_stateless/02475_or_function_alias_and_const_where.sql @@ -0,0 +1,2 @@ +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1; +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1 SETTINGS allow_experimental_analyzer=1; \ No newline at end of file From c1e2b2af74910d0fa28c89612c20fd5658bb84ec Mon Sep 17 00:00:00 2001 From: zhongyuankai <872237106@qq.com> Date: Mon, 31 Oct 2022 08:57:31 +0800 Subject: [PATCH 11/46] Optimize TTL merge, completely expired parts can be removed in time --- src/Interpreters/PartLog.cpp | 2 ++ src/Interpreters/PartLog.h | 2 ++ src/Storages/MergeTree/MergeFromLogEntryTask.cpp | 8 +++++--- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 7 +++++-- src/Storages/MergeTree/MergeType.cpp | 2 +- src/Storages/MergeTree/MergeType.h | 2 ++ src/Storages/StorageMergeTree.cpp | 7 ++++++- 7 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index b35ee50b98e..6797d6b3a41 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -31,6 +31,8 @@ PartLogElement::MergeReasonType PartLogElement::getMergeReasonType(MergeType mer return TTL_DELETE_MERGE; case MergeType::TTLRecompress: return TTL_RECOMPRESS_MERGE; + case MergeType::TTLDrop: + return TTL_DROP_MERGE; } throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unknown MergeType {}", static_cast(merge_type)); diff --git a/src/Interpreters/PartLog.h b/src/Interpreters/PartLog.h index 2ce0dfd76de..6180963908d 100644 --- a/src/Interpreters/PartLog.h +++ b/src/Interpreters/PartLog.h @@ -41,6 +41,8 @@ struct PartLogElement TTL_DELETE_MERGE = 3, /// Merge with recompression TTL_RECOMPRESS_MERGE = 4, + /// Merge assigned to drop parts (with TTLMergeSelector) + TTL_DROP_MERGE = 5, }; String query_id; diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 9a9b8a4a6bb..0711ed157a5 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -152,7 +152,9 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() } /// Start to make the main work - size_t estimated_space_for_merge = MergeTreeDataMergerMutator::estimateNeededDiskSpace(parts); + size_t need_total_size = 0; + if (entry.merge_type != MergeType::TTLDrop) + need_total_size = MergeTreeDataMergerMutator::estimateNeededDiskSpace(parts); /// Can throw an exception while reserving space. IMergeTreeDataPart::TTLInfos ttl_infos; @@ -180,7 +182,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() std::optional tagger; ReservationSharedPtr reserved_space = storage.balancedReservation( metadata_snapshot, - estimated_space_for_merge, + need_total_size, max_volume_index, future_merged_part->name, future_merged_part->part_info, @@ -190,7 +192,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare() if (!reserved_space) reserved_space = storage.reserveSpacePreferringTTLRules( - metadata_snapshot, estimated_space_for_merge, ttl_infos, time(nullptr), max_volume_index); + metadata_snapshot, need_total_size, ttl_infos, time(nullptr), max_volume_index); future_merged_part->uuid = entry.new_part_uuid; future_merged_part->updatePath(storage, reserved_space.get()); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 0b5c5285d15..879cd2fa3da 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -309,10 +309,13 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( data_settings->merge_with_ttl_timeout, data_settings->ttl_only_drop_parts); - parts_to_merge = delete_ttl_selector.select(parts_ranges, max_total_size_to_merge); + parts_to_merge = delete_ttl_selector.select( + parts_ranges, + data_settings->ttl_only_drop_parts ? data_settings->max_bytes_to_merge_at_max_space_in_pool : max_total_size_to_merge); + if (!parts_to_merge.empty()) { - future_part->merge_type = MergeType::TTLDelete; + future_part->merge_type = data_settings->ttl_only_drop_parts ? MergeType::TTLDrop : MergeType::TTLDelete; } else if (metadata_snapshot->hasAnyRecompressionTTL()) { diff --git a/src/Storages/MergeTree/MergeType.cpp b/src/Storages/MergeTree/MergeType.cpp index 045114578d0..96862b6cca1 100644 --- a/src/Storages/MergeTree/MergeType.cpp +++ b/src/Storages/MergeTree/MergeType.cpp @@ -20,7 +20,7 @@ MergeType checkAndGetMergeType(UInt32 merge_type) bool isTTLMergeType(MergeType merge_type) { - return merge_type == MergeType::TTLDelete || merge_type == MergeType::TTLRecompress; + return merge_type == MergeType::TTLDelete || merge_type == MergeType::TTLRecompress || merge_type == MergeType::TTLDrop; } } diff --git a/src/Storages/MergeTree/MergeType.h b/src/Storages/MergeTree/MergeType.h index ce9a40c5931..fa5df87b25a 100644 --- a/src/Storages/MergeTree/MergeType.h +++ b/src/Storages/MergeTree/MergeType.h @@ -19,6 +19,8 @@ enum class MergeType TTLDelete = 2, /// Merge with recompression TTLRecompress = 3, + /// Merge assigned to drop parts (with TTLMergeSelector) + TTLDrop = 4, }; /// Check parsed merge_type from raw int and get enum value. diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index a450a9ef3a9..805242fbec9 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -929,7 +929,12 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge( if (isTTLMergeType(future_part->merge_type)) getContext()->getMergeList().bookMergeWithTTL(); - merging_tagger = std::make_unique(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part->parts), *this, metadata_snapshot, false); + /// If merge_type is TTLDrop, no need to reserve disk space + size_t need_total_size = 0; + if (future_part->merge_type != MergeType::TTLDrop) + need_total_size = MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part->parts); + + merging_tagger = std::make_unique(future_part, need_total_size, *this, metadata_snapshot, false); return std::make_shared(future_part, std::move(merging_tagger), std::make_shared()); } From 4f50a999bba357f20656f89c6d7980fadbe2542f Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 1 Nov 2022 21:35:25 +0100 Subject: [PATCH 12/46] remove abandonable_lock part 1 --- .../MergeTree/EphemeralLockInZooKeeper.cpp | 60 +++++++------------ .../MergeTree/EphemeralLockInZooKeeper.h | 23 +++---- src/Storages/StorageReplicatedMergeTree.cpp | 17 ++++++ 3 files changed, 49 insertions(+), 51 deletions(-) diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 7abed125b7a..600f6d30bb6 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -12,11 +12,9 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -EphemeralLockInZooKeeper::EphemeralLockInZooKeeper(const String & path_prefix_, zkutil::ZooKeeper & zookeeper_, const String & holder_path_) - : zookeeper(&zookeeper_), path_prefix(path_prefix_), holder_path(holder_path_) +EphemeralLockInZooKeeper::EphemeralLockInZooKeeper(const String & path_prefix_, zkutil::ZooKeeper & zookeeper_, const String & path_) + : zookeeper(&zookeeper_), path_prefix(path_prefix_), path(path_) { - /// Write the path to the secondary node in the main node. - path = zookeeper->create(path_prefix, holder_path, zkutil::CreateMode::EphemeralSequential); if (path.size() <= path_prefix.size()) throw Exception("Logical error: name of the main node is shorter than prefix.", ErrorCodes::LOGICAL_ERROR); } @@ -24,22 +22,22 @@ EphemeralLockInZooKeeper::EphemeralLockInZooKeeper(const String & path_prefix_, std::optional createEphemeralLockInZooKeeper( const String & path_prefix_, const String & temp_path, zkutil::ZooKeeper & zookeeper_, const String & deduplication_path) { - /// The /abandonable_lock- name is for backward compatibility. - String holder_path_prefix = temp_path + "/abandonable_lock-"; - String holder_path; + String path; - /// Let's create an secondary ephemeral node. if (deduplication_path.empty()) { - holder_path = zookeeper_.create(holder_path_prefix, "", zkutil::CreateMode::EphemeralSequential); + String holder_path = temp_path + "/" + EphemeralLockInZooKeeper::LEGACY_LOCK_OTHER; + path = zookeeper_.create(path_prefix_, holder_path, zkutil::CreateMode::EphemeralSequential); } else { + String holder_path = temp_path + "/" + EphemeralLockInZooKeeper::LEGACY_LOCK_INSERT; + /// Check for duplicates in advance, to avoid superfluous block numbers allocation Coordination::Requests ops; ops.emplace_back(zkutil::makeCreateRequest(deduplication_path, "", zkutil::CreateMode::Persistent)); ops.emplace_back(zkutil::makeRemoveRequest(deduplication_path, -1)); - ops.emplace_back(zkutil::makeCreateRequest(holder_path_prefix, "", zkutil::CreateMode::EphemeralSequential)); + ops.emplace_back(zkutil::makeCreateRequest(path_prefix_, holder_path, zkutil::CreateMode::EphemeralSequential)); Coordination::Responses responses; Coordination::Error e = zookeeper_.tryMulti(ops, responses); if (e != Coordination::Error::ZOK) @@ -55,10 +53,10 @@ std::optional createEphemeralLockInZooKeeper( } } - holder_path = dynamic_cast(responses.back().get())->path_created; + path = dynamic_cast(responses.back().get())->path_created; } - return EphemeralLockInZooKeeper{path_prefix_, zookeeper_, holder_path}; + return EphemeralLockInZooKeeper{path_prefix_, zookeeper_, path}; } void EphemeralLockInZooKeeper::unlock() @@ -66,19 +64,18 @@ void EphemeralLockInZooKeeper::unlock() Coordination::Requests ops; getUnlockOps(ops); zookeeper->multi(ops); - holder_path = ""; + zookeeper = nullptr; } void EphemeralLockInZooKeeper::getUnlockOps(Coordination::Requests & ops) { checkCreated(); ops.emplace_back(zkutil::makeRemoveRequest(path, -1)); - ops.emplace_back(zkutil::makeRemoveRequest(holder_path, -1)); } EphemeralLockInZooKeeper::~EphemeralLockInZooKeeper() { - if (!isCreated()) + if (!isLocked()) return; try @@ -97,33 +94,18 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( zkutil::ZooKeeper & zookeeper_) : zookeeper(&zookeeper_) { - std::vector holders; + String holder_path = temp_path + "/" + EphemeralLockInZooKeeper::LEGACY_LOCK_OTHER; while (true) { Coordination::Stat partitions_stat; Strings partitions = zookeeper->getChildren(block_numbers_path, &partitions_stat); - if (holders.size() < partitions.size()) - { - std::vector> holder_futures; - for (size_t i = 0; i < partitions.size() - holders.size(); ++i) - { - String path = temp_path + "/abandonable_lock-"; - holder_futures.push_back(zookeeper->asyncCreate(path, {}, zkutil::CreateMode::EphemeralSequential)); - } - for (auto & future : holder_futures) - { - auto resp = future.get(); - holders.push_back(resp.path_created); - } - } - Coordination::Requests lock_ops; - for (size_t i = 0; i < partitions.size(); ++i) + for (const auto & partition : partitions) { - String partition_path_prefix = block_numbers_path + "/" + partitions[i] + "/" + path_prefix; + String partition_path_prefix = block_numbers_path + "/" + partition + "/" + path_prefix; lock_ops.push_back(zkutil::makeCreateRequest( - partition_path_prefix, holders[i], zkutil::CreateMode::EphemeralSequential)); + partition_path_prefix, holder_path, zkutil::CreateMode::EphemeralSequential)); } lock_ops.push_back(zkutil::makeCheckRequest(block_numbers_path, partitions_stat.version)); @@ -146,7 +128,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( ErrorCodes::LOGICAL_ERROR); UInt64 number = parse(path.c_str() + prefix_size, path.size() - prefix_size); - locks.push_back(LockInfo{path, holders[i], partitions[i], number}); + locks.push_back(LockInfo{path, partitions[i], number}); } return; @@ -158,19 +140,17 @@ void EphemeralLocksInAllPartitions::unlock() if (!zookeeper) return; - std::vector futures; + std::vector futures; for (const auto & lock : locks) { - Coordination::Requests unlock_ops; - unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.path, -1)); - unlock_ops.emplace_back(zkutil::makeRemoveRequest(lock.holder_path, -1)); - futures.push_back(zookeeper->asyncMulti(unlock_ops)); + futures.push_back(zookeeper->asyncRemove(lock.path)); } for (auto & future : futures) future.get(); locks.clear(); + zookeeper = nullptr; } EphemeralLocksInAllPartitions::~EphemeralLocksInAllPartitions() diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h index 276ffab5254..670f5de2489 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h @@ -19,9 +19,9 @@ namespace ErrorCodes } /// A class that is used for locking a block number in a partition. -/// It creates a secondary ephemeral node in `temp_path` and a main ephemeral node with `path_prefix` -/// that references the secondary node. The reasons for this two-level scheme are historical (of course -/// it would be simpler to allocate block numbers for all partitions in one ZK directory). +/// Before 22.11 it used to create a secondary ephemeral node in `temp_path` with "abandonable_lock-" prefix +/// and a main ephemeral node with `path_prefix` that references the secondary node. The reasons for this two-level scheme are historical. +/// Since 22.11 it creates single ephemeral node with `path_prefix` that references persistent fake "secondary node". class EphemeralLockInZooKeeper : public boost::noncopyable { friend std::optional createEphemeralLockInZooKeeper( @@ -29,9 +29,13 @@ class EphemeralLockInZooKeeper : public boost::noncopyable protected: EphemeralLockInZooKeeper() = delete; - EphemeralLockInZooKeeper(const String & path_prefix_, zkutil::ZooKeeper & zookeeper_, const String & holder_path_); + EphemeralLockInZooKeeper(const String & path_prefix_, zkutil::ZooKeeper & zookeeper_, const String & path_); public: + /// Fake "secondary node" names for blocks with and without "deduplication_path" + static constexpr const char * LEGACY_LOCK_INSERT = "abandonable_lock-insert"; + static constexpr const char * LEGACY_LOCK_OTHER = "abandonable_lock-other"; + EphemeralLockInZooKeeper(EphemeralLockInZooKeeper && rhs) noexcept { *this = std::move(rhs); @@ -43,13 +47,12 @@ public: rhs.zookeeper = nullptr; path_prefix = std::move(rhs.path_prefix); path = std::move(rhs.path); - holder_path = std::move(rhs.holder_path); return *this; } - bool isCreated() const + bool isLocked() const { - return zookeeper && !holder_path.empty() && !path.empty(); + return zookeeper; } String getPath() const @@ -74,12 +77,12 @@ public: /// because the nodes will be already deleted. void assumeUnlocked() { - holder_path.clear(); + zookeeper = nullptr; } void checkCreated() const { - if (!isCreated()) + if (!isLocked()) throw Exception("EphemeralLock is not created", ErrorCodes::LOGICAL_ERROR); } @@ -89,7 +92,6 @@ private: zkutil::ZooKeeper * zookeeper = nullptr; String path_prefix; String path; - String holder_path; }; std::optional createEphemeralLockInZooKeeper( @@ -124,7 +126,6 @@ public: struct LockInfo { String path; - String holder_path; String partition_id; UInt64 number = 0; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 8dad5755dab..b2bb616b119 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -623,6 +623,16 @@ void StorageReplicatedMergeTree::createNewZooKeeperNodes() /// For ALTER PARTITION with multi-leaders futures.push_back(zookeeper->asyncTryCreateNoThrow(zookeeper_path + "/alter_partition_version", String(), zkutil::CreateMode::Persistent)); + /// As for now, "/temp" node must exist, but we want to be able to remove it in future + if (zookeeper->exists(zookeeper_path + "/temp")) + { + /// For block numbers allocation (since 22.11) + futures.push_back(zookeeper->asyncTryCreateNoThrow( + zookeeper_path + "/temp/" + EphemeralLockInZooKeeper::LEGACY_LOCK_INSERT, String(), zkutil::CreateMode::Persistent)); + futures.push_back(zookeeper->asyncTryCreateNoThrow( + zookeeper_path + "/temp/" + EphemeralLockInZooKeeper::LEGACY_LOCK_OTHER, String(), zkutil::CreateMode::Persistent)); + } + for (auto & future : futures) { auto res = future.get(); @@ -700,6 +710,13 @@ bool StorageReplicatedMergeTree::createTableIfNotExists(const StorageMetadataPtr zkutil::CreateMode::Persistent)); ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/temp", "", zkutil::CreateMode::Persistent)); + + /// The following 2 nodes were added in 22.11 + ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/temp/" + EphemeralLockInZooKeeper::LEGACY_LOCK_INSERT, "", + zkutil::CreateMode::Persistent)); + ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/temp/" + EphemeralLockInZooKeeper::LEGACY_LOCK_OTHER, "", + zkutil::CreateMode::Persistent)); + ops.emplace_back(zkutil::makeCreateRequest(zookeeper_path + "/replicas", "last added replica: " + replica_name, zkutil::CreateMode::Persistent)); From a1c028f20c156a7326b21969f433aec54f6eaf17 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 2 Nov 2022 13:43:04 +0100 Subject: [PATCH 13/46] fix --- .../0_stateless/01700_system_zookeeper_path_in.reference | 2 ++ .../0_stateless/02221_system_zookeeper_unrestricted.reference | 4 ++++ .../02221_system_zookeeper_unrestricted_like.reference | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference index dcee18b33e0..664d8e84f27 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference @@ -9,6 +9,8 @@ r1 block_numbers blocks ======== +abandonable_lock-insert +abandonable_lock-other failed_parts last_part parallel diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference index bd0c9cee464..d250d1c9140 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.reference @@ -1,5 +1,9 @@ 1 1 +abandonable_lock-insert +abandonable_lock-insert +abandonable_lock-other +abandonable_lock-other alter_partition_version alter_partition_version block_numbers diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference index f95d60dc07b..67920b13d71 100644 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.reference @@ -1,4 +1,6 @@ 1 +abandonable_lock-insert +abandonable_lock-other alter_partition_version block_numbers blocks @@ -37,6 +39,8 @@ zero_copy_hdfs zero_copy_s3 ------------------------- 1 +abandonable_lock-insert +abandonable_lock-other alter_partition_version block_numbers blocks From acbad99b24757449d36ba468221d5331a53d1f3d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 2 Nov 2022 14:28:49 +0100 Subject: [PATCH 14/46] remove abandonable_lock part 2 --- docker/test/stress/run.sh | 1 + .../MergeTree/ReplicatedMergeTreeQueue.cpp | 62 +++++-------------- .../01158_zookeeper_log_long.reference | 22 +++---- 3 files changed, 27 insertions(+), 58 deletions(-) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 7058853b43e..78f627bf45e 100644 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -481,6 +481,7 @@ else -e "The set of parts restored in place of" \ -e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \ -e "Code: 269. DB::Exception: Destination table is myself" \ + -e "Coordination::Exception: Connection loss" \ /var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index d6d937ce66f..99946e9d938 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1972,54 +1972,24 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( auto quorum_status_future = zookeeper->asyncTryGet(fs::path(queue.zookeeper_path) / "quorum" / "status"); /// Load current inserts - std::unordered_set lock_holder_paths; - for (const String & entry : zookeeper->getChildren(fs::path(queue.zookeeper_path) / "temp")) + Strings partitions = zookeeper->getChildren(fs::path(queue.zookeeper_path) / "block_numbers"); + std::vector paths; + paths.reserve(partitions.size()); + for (const String & partition : partitions) + paths.push_back(fs::path(queue.zookeeper_path) / "block_numbers" / partition); + + auto locks_children = zookeeper->getChildren(paths); + + for (size_t i = 0; i < partitions.size(); ++i) { - if (startsWith(entry, "abandonable_lock-")) - lock_holder_paths.insert(fs::path(queue.zookeeper_path) / "temp" / entry); - } - - if (!lock_holder_paths.empty()) - { - Strings partitions = zookeeper->getChildren(fs::path(queue.zookeeper_path) / "block_numbers"); - std::vector paths; - paths.reserve(partitions.size()); - for (const String & partition : partitions) - paths.push_back(fs::path(queue.zookeeper_path) / "block_numbers" / partition); - - auto locks_children = zookeeper->getChildren(paths); - - struct BlockInfoInZooKeeper + Strings partition_block_numbers = locks_children[i].names; + for (const String & entry : partition_block_numbers) { - String partition; - Int64 number; - String zk_path; - std::future contents_future; - }; - - std::vector block_infos; - for (size_t i = 0; i < partitions.size(); ++i) - { - Strings partition_block_numbers = locks_children[i].names; - for (const String & entry : partition_block_numbers) - { - /// TODO: cache block numbers that are abandoned. - /// We won't need to check them on the next iteration. - if (startsWith(entry, "block-")) - { - Int64 block_number = parse(entry.substr(strlen("block-"))); - String zk_path = fs::path(queue.zookeeper_path) / "block_numbers" / partitions[i] / entry; - block_infos.emplace_back( - BlockInfoInZooKeeper{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)}); - } - } - } - - for (auto & block : block_infos) - { - Coordination::GetResponse resp = block.contents_future.get(); - if (resp.error == Coordination::Error::ZOK && lock_holder_paths.contains(resp.data)) - committing_blocks[block.partition].insert(block.number); + if (!startsWith(entry, "block-")) + continue; + Int64 block_number = parse(entry.substr(strlen("block-"))); + String zk_path = fs::path(queue.zookeeper_path) / "block_numbers" / partitions[i] / entry; + committing_blocks[partitions[i]].insert(block_number); } } diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.reference b/tests/queries/0_stateless/01158_zookeeper_log_long.reference index ee2b465b3d3..a0088610c9d 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.reference +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.reference @@ -5,37 +5,35 @@ log ::1 Request 0 Create /test/01158/default/rmt/log/log- 0 1 \N 0 1 \N \N \N 0 0 0 0 ::1 Response 0 Create /test/01158/default/rmt/log/log- 0 1 \N 0 1 ZOK \N \N /test/01158/default/rmt/log/log-0000000000 0 0 0 0 parts -Request 0 Multi 0 0 \N 5 0 \N \N \N 0 0 0 0 +Request 0 Multi 0 0 \N 4 0 \N \N \N 0 0 0 0 Request 0 Create /test/01158/default/rmt/log/log- 0 1 \N 0 1 \N \N \N 0 0 0 0 Request 0 Remove /test/01158/default/rmt/block_numbers/all/block-0000000000 0 0 -1 0 2 \N \N \N 0 0 0 0 -Request 0 Remove /test/01158/default/rmt/temp/abandonable_lock-0000000000 0 0 -1 0 3 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 4 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 5 \N \N \N 0 0 0 0 -Response 0 Multi 0 0 \N 5 0 ZOK \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 3 \N \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 4 \N \N \N 0 0 0 0 +Response 0 Multi 0 0 \N 4 0 ZOK \N \N 0 0 0 0 Response 0 Create /test/01158/default/rmt/log/log- 0 1 \N 0 1 ZOK \N \N /test/01158/default/rmt/log/log-0000000000 0 0 0 0 Response 0 Remove /test/01158/default/rmt/block_numbers/all/block-0000000000 0 0 -1 0 2 ZOK \N \N 0 0 0 0 -Response 0 Remove /test/01158/default/rmt/temp/abandonable_lock-0000000000 0 0 -1 0 3 ZOK \N \N 0 0 0 0 -Response 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 4 ZOK \N \N /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 0 0 -Response 0 Create /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 5 ZOK \N \N /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 0 0 +Response 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 3 ZOK \N \N /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 0 0 +Response 0 Create /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 4 ZOK \N \N /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 0 0 Request 0 Exists /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 0 \N \N \N 0 0 0 0 Response 0 Exists /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 0 ZOK \N \N 0 0 96 0 blocks Request 0 Multi 0 0 \N 3 0 \N \N \N 0 0 0 0 Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 \N \N \N 0 0 0 0 Request 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/temp/abandonable_lock- 1 1 \N 0 3 \N \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 \N \N \N 0 0 0 0 Response 0 Multi 0 0 \N 3 0 ZOK \N \N 0 0 0 0 Response 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 ZOK \N \N /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 0 0 Response 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 ZOK \N \N 0 0 0 0 -Response 0 Create /test/01158/default/rmt/temp/abandonable_lock- 1 1 \N 0 3 ZOK \N \N /test/01158/default/rmt/temp/abandonable_lock-0000000000 0 0 0 0 +Response 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 ZOK \N \N /test/01158/default/rmt/block_numbers/all/block-0000000000 0 0 0 0 Request 0 Multi 0 0 \N 3 0 \N \N \N 0 0 0 0 Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 \N \N \N 0 0 0 0 Request 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/temp/abandonable_lock- 1 1 \N 0 3 \N \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 \N \N \N 0 0 0 0 Response 0 Multi 0 0 \N 3 0 ZNODEEXISTS \N \N 0 0 0 0 Response 0 Error /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 ZNODEEXISTS \N \N 0 0 0 0 Response 0 Error /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 -Response 0 Error /test/01158/default/rmt/temp/abandonable_lock- 1 1 \N 0 3 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 +Response 0 Error /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 Request 0 Get /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 0 \N \N \N 0 0 0 0 Response 0 Get /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 0 ZOK \N \N 0 0 9 0 duration_ms From 204dac3d5d9d423192e171773952c2983574b29f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 2 Nov 2022 17:14:20 +0300 Subject: [PATCH 15/46] Suggest using https://fiddle.clickhouse.com/ --- .github/ISSUE_TEMPLATE/85_bug-report.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.md b/.github/ISSUE_TEMPLATE/85_bug-report.md index 3d2ed6148e3..08d03c284ca 100644 --- a/.github/ISSUE_TEMPLATE/85_bug-report.md +++ b/.github/ISSUE_TEMPLATE/85_bug-report.md @@ -13,6 +13,8 @@ assignees: '' > A clear and concise description of what works not as it is supposed to. +> A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/). + **Does it reproduce on recent release?** [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv) From 00c9e50ee37f631026ed119a7fdcdfa755b5d9c0 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 2 Nov 2022 16:24:18 +0100 Subject: [PATCH 16/46] fix race between INSERT and ALTER PARTITION --- src/Common/ThreadFuzzer.cpp | 10 +- src/Common/ThreadFuzzer.h | 1 + .../MergeTree/EphemeralLockInZooKeeper.cpp | 4 +- .../MergeTree/EphemeralLockInZooKeeper.h | 3 +- .../MergeTree/ReplicatedMergeTreeSink.cpp | 16 ++- src/Storages/StorageReplicatedMergeTree.cpp | 110 +++++++++++++++--- src/Storages/StorageReplicatedMergeTree.h | 3 + tests/queries/0_stateless/replication.lib | 1 + 8 files changed, 124 insertions(+), 24 deletions(-) diff --git a/src/Common/ThreadFuzzer.cpp b/src/Common/ThreadFuzzer.cpp index 16b51c8bbce..ee6dc222600 100644 --- a/src/Common/ThreadFuzzer.cpp +++ b/src/Common/ThreadFuzzer.cpp @@ -243,15 +243,17 @@ static void injection( } } +void ThreadFuzzer::maybeInjectSleep() +{ + auto & fuzzer = ThreadFuzzer::instance(); + injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us); +} void ThreadFuzzer::signalHandler(int) { DENY_ALLOCATIONS_IN_SCOPE; auto saved_errno = errno; - - auto & fuzzer = ThreadFuzzer::instance(); - injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us); - + maybeInjectSleep(); errno = saved_errno; } diff --git a/src/Common/ThreadFuzzer.h b/src/Common/ThreadFuzzer.h index 743b8c75dc0..ff391dfcd8f 100644 --- a/src/Common/ThreadFuzzer.h +++ b/src/Common/ThreadFuzzer.h @@ -58,6 +58,7 @@ public: static void start(); static bool isStarted(); + static void maybeInjectSleep(); private: uint64_t cpu_time_period_us = 0; double yield_probability = 0; diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 600f6d30bb6..802d648655f 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -62,12 +62,12 @@ std::optional createEphemeralLockInZooKeeper( void EphemeralLockInZooKeeper::unlock() { Coordination::Requests ops; - getUnlockOps(ops); + getUnlockOp(ops); zookeeper->multi(ops); zookeeper = nullptr; } -void EphemeralLockInZooKeeper::getUnlockOps(Coordination::Requests & ops) +void EphemeralLockInZooKeeper::getUnlockOp(Coordination::Requests & ops) { checkCreated(); ops.emplace_back(zkutil::makeRemoveRequest(path, -1)); diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h index 670f5de2489..aaa1fc2af24 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h @@ -71,7 +71,8 @@ public: void unlock(); /// Adds actions equivalent to `unlock()` to the list. - void getUnlockOps(Coordination::Requests & ops); + /// Returns index of the action that removes + void getUnlockOp(Coordination::Requests & ops); /// Do not delete nodes in destructor. You may call this method after 'getUnlockOps' and successful execution of these ops, /// because the nodes will be already deleted. diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 082228d7ebf..50afd8a8eb3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,7 @@ namespace ErrorCodes extern const int DUPLICATE_DATA_PART; extern const int PART_IS_TEMPORARILY_LOCKED; extern const int LOGICAL_ERROR; + extern const int QUERY_WAS_CANCELLED; } struct ReplicatedMergeTreeSink::DelayedChunk @@ -344,12 +346,14 @@ void ReplicatedMergeTreeSink::commitPart( bool deduplicate_block = !block_id.empty(); String block_id_path = deduplicate_block ? storage.zookeeper_path + "/blocks/" + block_id : ""; auto block_number_lock = storage.allocateBlockNumber(part->info.partition_id, zookeeper, block_id_path); + ThreadFuzzer::maybeInjectSleep(); /// Prepare transaction to ZooKeeper /// It will simultaneously add information about the part to all the necessary places in ZooKeeper and remove block_number_lock. Coordination::Requests ops; Int64 block_number = 0; + size_t block_unlock_op_idx = std::numeric_limits::max(); String existing_part_name; if (block_number_lock) { @@ -393,7 +397,8 @@ void ReplicatedMergeTreeSink::commitPart( zkutil::CreateMode::PersistentSequential)); /// Deletes the information that the block number is used for writing. - block_number_lock->getUnlockOps(ops); + block_unlock_op_idx = ops.size(); + block_number_lock->getUnlockOp(ops); /** If we need a quorum - create a node in which the quorum is monitored. * (If such a node already exists, then someone has managed to make another quorum record at the same time, @@ -520,8 +525,12 @@ void ReplicatedMergeTreeSink::commitPart( part->name); } + ThreadFuzzer::maybeInjectSleep(); + storage.lockSharedData(*part, false, {}); + ThreadFuzzer::maybeInjectSleep(); + Coordination::Responses responses; Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT @@ -534,6 +543,11 @@ void ReplicatedMergeTreeSink::commitPart( if (block_number_lock) block_number_lock->assumeUnlocked(); } + else if (multi_code == Coordination::Error::ZNONODE && zkutil::getFailedOpIndex(multi_code, responses) == block_unlock_op_idx) + { + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, + "Insert query (for block {}) was cancelled by concurrent ALTER PARTITION", block_number_lock->getPath()); + } else if (multi_code == Coordination::Error::ZCONNECTIONLOSS || multi_code == Coordination::Error::ZOPERATIONTIMEOUT) { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b2bb616b119..57dd4feb77f 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6394,6 +6394,76 @@ void StorageReplicatedMergeTree::removePartsFromZooKeeper( } } +void StorageReplicatedMergeTree::clearLockedBlockNumbersInPartition( + zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num) +{ + /// Imagine that some INSERT query has allocated block number 42, but it's still in progress. + /// Some DROP PARTITION query gets block number 43 and commits DROP_RANGE all_0_42_999_999. + /// And after that INSERT commits GET_PART all_42_42_0. Oops, intersecting parts. + /// So we have to either wait for unfinished INSERTs or cancel them. + /// It's totally fine to cancel since we are going to remove data anyway. + /// We can safely cancel INSERT query by removing its ephemeral block number. + /// Usually it's bad idea to remove ephemeral nodes owned by someone else, + /// but INSERTs remove such nodes atomically with part commit, so INSERT will fail if node does not exist. + + fs::path partition_path = fs::path(zookeeper_path) / "block_numbers" / partition_id; + Strings queries_in_progress = zookeeper.getChildren(partition_path); + if (queries_in_progress.empty()) + return; + + Strings paths_to_get; + for (const auto & block : queries_in_progress) + { + if (!startsWith(block, "block-")) + continue; + Int64 block_number = parse(block.substr(strlen("block-"))); + if (min_block_num <= block_number && block_number <= max_block_num) + paths_to_get.push_back(partition_path / block); + } + + auto results = zookeeper.get(paths_to_get); + for (size_t i = 0; i < paths_to_get.size(); ++i) + { + auto & result = results[i]; + + /// The query already finished + if (result.error == Coordination::Error::ZNONODE) + continue; + + /// The query is not an insert (it does not have block_id) + if (result.data.ends_with(EphemeralLockInZooKeeper::LEGACY_LOCK_OTHER)) + continue; + + if (result.data.ends_with(EphemeralLockInZooKeeper::LEGACY_LOCK_INSERT)) + { + /// Remove block number, so insert will fail to commit (it will try to remove this node too) + LOG_WARNING(log, "Some query is trying to concurrently insert block {}, will cancel it", paths_to_get[i]); + zookeeper.tryRemove(paths_to_get[i]); + } + else + { + constexpr const char * old_version_warning = "Ephemeral lock {} (referencing {}) is created by a replica " + "that running old version of ClickHouse (< 22.11). Cannot remove it, will wait for this lock to disappear. " + "Upgrade remaining hosts in the cluster to address this warning."; + constexpr const char * new_version_warning = "Ephemeral lock {} has unexpected content ({}), " + "probably it is created by a replica that running newer version of ClickHouse. " + "Cannot remove it, will wait for this lock to disappear. Upgrade remaining hosts in the cluster to address this warning."; + + if (result.data.starts_with(zookeeper_path + "/temp/abandonable_lock-")) + LOG_WARNING(log, old_version_warning, paths_to_get[i], result.data); + else + LOG_WARNING(log, new_version_warning, paths_to_get[i], result.data); + + Stopwatch time_waiting; + const auto & stop_waiting = [this, &time_waiting]() + { + auto timeout = getContext()->getSettingsRef().lock_acquire_timeout.value.seconds(); + return partial_shutdown_called || (timeout < time_waiting.elapsedSeconds()); + }; + zookeeper.waitForDisappear(paths_to_get[i], stop_waiting); + } + } +} void StorageReplicatedMergeTree::getClearBlocksInPartitionOps( Coordination::Requests & ops, zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num) @@ -6403,21 +6473,18 @@ void StorageReplicatedMergeTree::getClearBlocksInPartitionOps( throw Exception(zookeeper_path + "/blocks doesn't exist", ErrorCodes::NOT_FOUND_NODE); String partition_prefix = partition_id + "_"; - zkutil::AsyncResponses get_futures; + Strings paths_to_get; for (const String & block_id : blocks) - { if (startsWith(block_id, partition_prefix)) - { - String path = fs::path(zookeeper_path) / "blocks" / block_id; - get_futures.emplace_back(path, zookeeper.asyncTryGet(path)); - } - } + paths_to_get.push_back(fs::path(zookeeper_path) / "blocks" / block_id); - for (auto & pair : get_futures) + auto results = zookeeper.get(paths_to_get); + + for (size_t i = 0; i < paths_to_get.size(); ++i) { - const String & path = pair.first; - auto result = pair.second.get(); + const String & path = paths_to_get[i]; + auto & result = results[i]; if (result.error == Coordination::Error::ZNONODE) continue; @@ -6574,9 +6641,13 @@ void StorageReplicatedMergeTree::replacePartitionFrom( entry_replace.columns_version = -1; } - /// Remove deduplication block_ids of replacing parts if (replace) + { + /// Cancel concurrent inserts in range + clearLockedBlockNumbersInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); + /// Remove deduplication block_ids of replacing parts clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); + } DataPartsVector parts_to_remove; Coordination::Responses op_results; @@ -6587,13 +6658,13 @@ void StorageReplicatedMergeTree::replacePartitionFrom( for (size_t i = 0; i < dst_parts.size(); ++i) { getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); - ephemeral_locks[i].getUnlockOps(ops); + ephemeral_locks[i].getUnlockOp(ops); } if (auto txn = query_context->getZooKeeperMetadataTransaction()) txn->moveOpsTo(ops); - delimiting_block_lock->getUnlockOps(ops); + delimiting_block_lock->getUnlockOp(ops); /// Check and update version to avoid race with DROP_RANGE ops.emplace_back(zkutil::makeSetRequest(alter_partition_version_path, "", alter_partition_version_stat.version)); /// Just update version, because merges assignment relies on it @@ -6812,6 +6883,9 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta entry_replace.columns_version = -1; } + /// Cancel concurrent inserts in range + clearLockedBlockNumbersInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); + clearBlocksInPartition(*zookeeper, drop_range.partition_id, drop_range.max_block, drop_range.max_block); DataPartsVector parts_to_remove; @@ -6823,7 +6897,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta for (size_t i = 0; i < dst_parts.size(); ++i) { dest_table_storage->getCommitPartOps(ops, dst_parts[i], block_id_paths[i]); - ephemeral_locks[i].getUnlockOps(ops); + ephemeral_locks[i].getUnlockOp(ops); } /// Check and update version to avoid race with DROP_RANGE @@ -6887,7 +6961,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta fs::path(zookeeper_path) / "log/log-", entry_delete.toString(), zkutil::CreateMode::PersistentSequential)); /// Just update version, because merges assignment relies on it ops_src.emplace_back(zkutil::makeSetRequest(fs::path(zookeeper_path) / "log", "", -1)); - delimiting_block_lock->getUnlockOps(ops_src); + delimiting_block_lock->getUnlockOp(ops_src); op_results = zookeeper->multi(ops_src); @@ -7189,6 +7263,7 @@ bool StorageReplicatedMergeTree::dropPartImpl( } Coordination::Requests ops; + /// NOTE Don't need to remove block numbers too, because no in-progress inserts in the range are possible getClearBlocksInPartitionOps(ops, *zookeeper, part_info.partition_id, part_info.min_block, part_info.max_block); size_t clear_block_ops_size = ops.size(); @@ -7249,6 +7324,9 @@ bool StorageReplicatedMergeTree::addOpsToDropAllPartsInPartition( return false; } + /// Cancel concurrent inserts in range + clearLockedBlockNumbersInPartition(zookeeper, partition_id, drop_range_info.min_block, drop_range_info.max_block); + clearBlocksInPartition(zookeeper, partition_id, drop_range_info.min_block, drop_range_info.max_block); String drop_range_fake_part_name = getPartNamePossiblyFake(format_version, drop_range_info); @@ -7266,7 +7344,7 @@ bool StorageReplicatedMergeTree::addOpsToDropAllPartsInPartition( log_entry_ops_idx.push_back(ops.size()); ops.emplace_back(zkutil::makeCreateRequest(fs::path(zookeeper_path) / "log/log-", entry->toString(), zkutil::CreateMode::PersistentSequential)); - delimiting_block_lock->getUnlockOps(ops); + delimiting_block_lock->getUnlockOp(ops); delimiting_block_locks.push_back(std::move(*delimiting_block_lock)); entries.push_back(std::move(entry)); return true; diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 5213f963fdf..0d8c6b47dd5 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -750,6 +750,9 @@ private: mutable std::mutex existing_nodes_cache_mutex; bool existsNodeCached(const std::string & path) const; + /// Cancels INSERTs in the block range by removing ephemeral block numbers + void clearLockedBlockNumbersInPartition(zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num); + void getClearBlocksInPartitionOps(Coordination::Requests & ops, zkutil::ZooKeeper & zookeeper, const String & partition_id, Int64 min_block_num, Int64 max_block_num); /// Remove block IDs from `blocks/` in ZooKeeper for the given partition ID in the given block number range. void clearBlocksInPartition( diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 37c82ec7239..7c7db48e078 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -108,6 +108,7 @@ function check_replication_consistency() $CLICKHOUSE_CLIENT -q "select 'mutations', * from system.mutations where database=currentDatabase() and table like '$table_name_prefix%' order by database, table, mutation_id" $CLICKHOUSE_CLIENT -q "select 'parts', * from system.parts where database=currentDatabase() and table like '$table_name_prefix%' order by database, table, name" echo "Good luck with debugging..." + exit 1 fi } From d8b3a2af8490c3940811b7b080f36bb5e4b756d9 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 2 Nov 2022 19:26:50 +0100 Subject: [PATCH 17/46] fix race between INSERT and DROP --- src/Storages/StorageReplicatedMergeTree.cpp | 2 ++ utils/keeper-data-dumper/main.cpp | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 57dd4feb77f..76f606aaeb7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5311,6 +5311,8 @@ StorageReplicatedMergeTree::allocateBlockNumber( if (!existsNodeCached(partition_path)) { Coordination::Requests ops; + /// Check that table is not being dropped ("host" is the first node that is removed on replica drop) + ops.push_back(zkutil::makeCheckRequest(fs::path(replica_path) / "host", -1)); ops.push_back(zkutil::makeCreateRequest(partition_path, "", zkutil::CreateMode::Persistent)); /// We increment data version of the block_numbers node so that it becomes possible /// to check in a ZK transaction that the set of partitions didn't change diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp index dd3c3a4e2ad..0ea6371b49f 100644 --- a/utils/keeper-data-dumper/main.cpp +++ b/utils/keeper-data-dumper/main.cpp @@ -79,7 +79,10 @@ int main(int argc, char *argv[]) for (size_t i = last_commited_index + 1; i < changelog.next_slot(); ++i) { if (changelog.entry_at(i)->get_val_type() == nuraft::log_val_type::app_log) + { + state_machine->pre_commit(i, changelog.entry_at(i)->get_buf()); state_machine->commit(i, changelog.entry_at(i)->get_buf()); + } } dumpMachine(state_machine); From 816f2319f7bffc07885f6b925cb92d0847dc7572 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Wed, 2 Nov 2022 19:35:22 +0100 Subject: [PATCH 18/46] Updated finished_asts to fix fast test fails with Normalise AST is too deep and updated ConvertFunctionOrLikeData to merge conditions by aliasorColumnName instead of ASTPtr - 42452 Bug fix for parent node corrupted --- src/Interpreters/ConvertFunctionOrLikeVisitor.cpp | 7 ++++--- src/Interpreters/QueryNormalizer.cpp | 4 ++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp index 257bbda68eb..5d48391d56d 100644 --- a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp +++ b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp @@ -15,7 +15,7 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) if (function.name != "or") return; - std::unordered_map> identifier_to_literals; + std::unordered_map> identifier_to_literals; for (auto & child : function.children) { if (auto * expr_list_fn = child->as()) @@ -51,10 +51,11 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) regexp = "(?i)" + regexp; unique_elems.pop_back(); - auto it = identifier_to_literals.find(identifier); + auto it = identifier_to_literals.find(identifier->getAliasOrColumnName()); + if (it == identifier_to_literals.end()) { - it = identifier_to_literals.insert({identifier, std::make_shared(Field{Array{}})}).first; + it = identifier_to_literals.insert({identifier->getAliasOrColumnName(), std::make_shared(Field{Array{}})}).first; auto match = makeASTFunction("multiMatchAny"); match->arguments->children.push_back(arguments[0]); match->arguments->children.push_back(it->second); diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 0f57a8f549c..176a00c4ef1 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -118,7 +118,11 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) } } else + { ast = alias_node->clone(); + if (data.finished_asts.contains(alias_node)) + data.finished_asts[ast] = ast; + } } } From 9a002bd3783ee8cf64d88244c08b8606ad6720b3 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Thu, 3 Nov 2022 11:57:32 +0100 Subject: [PATCH 19/46] =?UTF-8?q?Removed=20finished=5Fasts=20updation=20wh?= =?UTF-8?q?en=20alias=20is=20replaced=20and=20updated=20alias=20of=20the?= =?UTF-8?q?=20replaced=20ast=20it=20its=20initial=20name=20so=20that=20we?= =?UTF-8?q?=20don=E2=80=99t=20replace=20it=20further=20-=2042452=20Bug=20f?= =?UTF-8?q?ix=20for=20parent=20node=20corrupted?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/Interpreters/QueryNormalizer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 176a00c4ef1..6c8bdb8628d 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -119,9 +119,9 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) } else { + auto alias_name = ast->getAliasOrColumnName(); ast = alias_node->clone(); - if (data.finished_asts.contains(alias_node)) - data.finished_asts[ast] = ast; + ast->setAlias(alias_name); } } } From 4c70622202411991e64543a4b588ebfec684e828 Mon Sep 17 00:00:00 2001 From: Smita Kulkarni Date: Thu, 3 Nov 2022 14:42:34 +0100 Subject: [PATCH 20/46] Updated to check size of alias before cloning, so that we throw error early - 42452 Bug fix for parent node corrupted --- src/Interpreters/QueryNormalizer.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 6c8bdb8628d..6a128d37e5d 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -113,12 +113,16 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) if (!is_cycle) { /// In a construct like "a AS b", where a is an alias, you must set alias b to the result of substituting alias a. + /// Check size of the alias before cloning too large alias AST + alias_node->checkSize(data.settings.max_expanded_ast_elements); ast = alias_node->clone(); ast->setAlias(node_alias); } } else { + /// Check size of the alias before cloning too large alias AST + alias_node->checkSize(data.settings.max_expanded_ast_elements); auto alias_name = ast->getAliasOrColumnName(); ast = alias_node->clone(); ast->setAlias(alias_name); From e773eb2fd17c84620738ef9cd16561e2a1764d80 Mon Sep 17 00:00:00 2001 From: zhongyuankai <872237106@qq.com> Date: Fri, 4 Nov 2022 17:19:34 +0800 Subject: [PATCH 21/46] batter --- .../MergeTree/MergeTreeDataMergerMutator.cpp | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 879cd2fa3da..c7d1786135a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -303,19 +303,28 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( if (metadata_snapshot->hasAnyTTL() && merge_with_ttl_allowed && !ttl_merges_blocker.isCancelled()) { /// TTL delete is preferred to recompression - TTLDeleteMergeSelector delete_ttl_selector( + TTLDeleteMergeSelector drop_ttl_selector( next_delete_ttl_merge_times_by_partition, current_time, data_settings->merge_with_ttl_timeout, - data_settings->ttl_only_drop_parts); - - parts_to_merge = delete_ttl_selector.select( - parts_ranges, - data_settings->ttl_only_drop_parts ? data_settings->max_bytes_to_merge_at_max_space_in_pool : max_total_size_to_merge); + true); + parts_to_merge = drop_ttl_selector.select(parts_ranges,data_settings->max_bytes_to_merge_at_max_space_in_pool); if (!parts_to_merge.empty()) { - future_part->merge_type = data_settings->ttl_only_drop_parts ? MergeType::TTLDrop : MergeType::TTLDelete; + future_part->merge_type = MergeType::TTLDrop; + } + else if (!data_settings->ttl_only_drop_parts) + { + TTLDeleteMergeSelector delete_ttl_selector( + next_delete_ttl_merge_times_by_partition, + current_time, + data_settings->merge_with_ttl_timeout, + false); + + parts_to_merge = delete_ttl_selector.select(parts_ranges, max_total_size_to_merge); + if (!parts_to_merge.empty()) + future_part->merge_type = MergeType::TTLDelete; } else if (metadata_snapshot->hasAnyRecompressionTTL()) { From ecf45bb2165af6e9691a7c57f1993b2b60b393a5 Mon Sep 17 00:00:00 2001 From: zhongyuankai <872237106@qq.com> Date: Fri, 4 Nov 2022 20:52:21 +0800 Subject: [PATCH 22/46] fix test --- src/Interpreters/PartLog.cpp | 1 + .../0_stateless/02293_part_log_has_merge_reason.reference | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 6797d6b3a41..f1b29a8a9b2 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -74,6 +74,7 @@ NamesAndTypesList PartLogElement::getNamesAndTypes() {"RegularMerge", static_cast(REGULAR_MERGE)}, {"TTLDeleteMerge", static_cast(TTL_DELETE_MERGE)}, {"TTLRecompressMerge", static_cast(TTL_RECOMPRESS_MERGE)}, + {"TTLDropMerge", static_cast(TTL_DROP_MERGE)}, } ); diff --git a/tests/queries/0_stateless/02293_part_log_has_merge_reason.reference b/tests/queries/0_stateless/02293_part_log_has_merge_reason.reference index 220107cf15b..37156ee37e7 100644 --- a/tests/queries/0_stateless/02293_part_log_has_merge_reason.reference +++ b/tests/queries/0_stateless/02293_part_log_has_merge_reason.reference @@ -1 +1 @@ -MergeParts TTLDeleteMerge +MergeParts TTLDropMerge From ea4235d4ed29ca35c43c51f4d9cfbf1fb1ff4a53 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Sat, 5 Nov 2022 01:30:07 +0000 Subject: [PATCH 23/46] Try to fix lambda parsing --- src/Parsers/ExpressionListParsers.cpp | 10 +++++----- .../0_stateless/02476_fix_lambda_parsing.reference | 4 ++++ .../0_stateless/02476_fix_lambda_parsing.sh | 14 ++++++++++++++ 3 files changed, 23 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/02476_fix_lambda_parsing.reference create mode 100755 tests/queries/0_stateless/02476_fix_lambda_parsing.sh diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index c362340d013..0f103690e37 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -682,7 +682,7 @@ public: bool parseLambda() { - // 0. If empty - create function tuple with 0 args + // 1. If empty - create function tuple with 0 args if (isCurrentElementEmpty()) { auto function = makeASTFunction("tuple"); @@ -693,16 +693,16 @@ public: if (operands.size() != 1 || !operators.empty() || !mergeElement()) return false; - /// 1. If there is already tuple do nothing - if (tryGetFunctionName(elements.back()) == "tuple") + /// 2. If there is already tuple do nothing + if (elements.size() == 1 && tryGetFunctionName(elements.back()) == "tuple") { pushOperand(elements.back()); elements.pop_back(); } - /// 2. Put all elements in a single tuple + /// 3. Put all elements in a single tuple else { - auto function = makeASTFunction("tuple", elements); + auto function = makeASTFunction("tuple", std::move(elements)); elements.clear(); pushOperand(function); } diff --git a/tests/queries/0_stateless/02476_fix_lambda_parsing.reference b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference new file mode 100644 index 00000000000..fce203907eb --- /dev/null +++ b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference @@ -0,0 +1,4 @@ +SELECT f((x, tuple(y)) -> z) +SELECT f((x, tuple(y)) -> z) +SELECT f((x, y) -> z) +SELECT f((x, y) -> z) diff --git a/tests/queries/0_stateless/02476_fix_lambda_parsing.sh b/tests/queries/0_stateless/02476_fix_lambda_parsing.sh new file mode 100755 index 00000000000..2f134010991 --- /dev/null +++ b/tests/queries/0_stateless/02476_fix_lambda_parsing.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +format="$CLICKHOUSE_FORMAT" + +echo "SELECT f((x, tuple(y)) -> z)" | $format +echo "select f(x, tuple(y) -> z)" | $format +echo "select f(x, y -> z)" | $format +echo "select f((x, y) -> z)" | $format From ef6065b331c19a59ea1e7409e169afa206d697ba Mon Sep 17 00:00:00 2001 From: zhongyuankai <872237106@qq.com> Date: Sun, 6 Nov 2022 13:40:26 +0800 Subject: [PATCH 24/46] fix test --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index c7d1786135a..c926b30781a 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -326,7 +326,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge( if (!parts_to_merge.empty()) future_part->merge_type = MergeType::TTLDelete; } - else if (metadata_snapshot->hasAnyRecompressionTTL()) + + if (parts_to_merge.empty() && metadata_snapshot->hasAnyRecompressionTTL()) { TTLRecompressMergeSelector recompress_ttl_selector( next_recompress_ttl_merge_times_by_partition, From 3d0f0e3896b3da9e99bba4bdf6d0761f41bfc138 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky Date: Mon, 7 Nov 2022 13:44:05 +0000 Subject: [PATCH 25/46] Apply another fix --- src/Parsers/ExpressionListParsers.cpp | 12 ++++++++++-- .../02476_fix_lambda_parsing.reference | 8 ++++++-- .../0_stateless/02476_fix_lambda_parsing.sh | 15 +++++++++++---- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 0f103690e37..44886ec4577 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -694,7 +694,7 @@ public: return false; /// 2. If there is already tuple do nothing - if (elements.size() == 1 && tryGetFunctionName(elements.back()) == "tuple") + if (tryGetFunctionName(elements.back()) == "tuple") { pushOperand(elements.back()); elements.pop_back(); @@ -1050,12 +1050,20 @@ public: if (!mergeElement()) return false; - // Special case for (('a', 'b')) -> tuple(('a', 'b')) if (!is_tuple && elements.size() == 1) + { + // Special case for (('a', 'b')) = tuple(('a', 'b')) if (auto * literal = elements[0]->as()) if (literal->value.getType() == Field::Types::Tuple) is_tuple = true; + // Special case for f(x, (y) -> z) = f(x, tuple(y) -> z) + auto test_pos = pos; + auto test_expected = expected; + if (parseOperator(test_pos, "->", test_expected)) + is_tuple = true; + } + finished = true; } diff --git a/tests/queries/0_stateless/02476_fix_lambda_parsing.reference b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference index fce203907eb..18cb46ce23c 100644 --- a/tests/queries/0_stateless/02476_fix_lambda_parsing.reference +++ b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference @@ -1,4 +1,8 @@ -SELECT f((x, tuple(y)) -> z) -SELECT f((x, tuple(y)) -> z) +SELECT f(x, y -> z) +SELECT f(x, y -> z) SELECT f((x, y) -> z) SELECT f((x, y) -> z) +SELECT f((x, y) -> z) +SELECT f(x, (x, y) -> z) +SELECT f(x, (x, y) -> z) +CREATE FUNCTION func AS x -> plus(x, (x -> ('2' + 2)) -> plus(1), 1) diff --git a/tests/queries/0_stateless/02476_fix_lambda_parsing.sh b/tests/queries/0_stateless/02476_fix_lambda_parsing.sh index 2f134010991..641ef59a170 100755 --- a/tests/queries/0_stateless/02476_fix_lambda_parsing.sh +++ b/tests/queries/0_stateless/02476_fix_lambda_parsing.sh @@ -8,7 +8,14 @@ set -e format="$CLICKHOUSE_FORMAT" -echo "SELECT f((x, tuple(y)) -> z)" | $format -echo "select f(x, tuple(y) -> z)" | $format -echo "select f(x, y -> z)" | $format -echo "select f((x, y) -> z)" | $format +echo "SELECT f(x, tuple(y) -> z)" | $format +echo "SELECT f(x, (y) -> z)" | $format + +echo "SELECT f(x, y -> z)" | $format +echo "SELECT f((x, y) -> z)" | $format +echo "SELECT f(tuple(x, y) -> z)" | $format + +echo "SELECT f(x, (x, y) -> z)" | $format +echo "SELECT f(x, tuple(x, y) -> z)" | $format + +echo "CREATE FUNCTION func AS x -> plus(x, (x -> ('2' + 2)) -> plus(1), 1)" | $format | $format From a04b9fa9470ced4e50c78c8e204f67b8b7a2e980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 7 Nov 2022 17:25:04 +0100 Subject: [PATCH 26/46] Add some guard rails around aggregation memory management --- src/AggregateFunctions/AggregateFunctionAggThrow.cpp | 2 ++ src/AggregateFunctions/AggregateFunctionDistinct.h | 5 +++++ src/AggregateFunctions/AggregateFunctionForEach.h | 2 +- src/AggregateFunctions/AggregateFunctionMap.h | 5 +++++ src/AggregateFunctions/IAggregateFunction.h | 11 ++++++++++- src/Processors/Transforms/WindowTransform.cpp | 2 ++ 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp index e74f93cd9b0..ae1bbfddf75 100644 --- a/src/AggregateFunctions/AggregateFunctionAggThrow.cpp +++ b/src/AggregateFunctions/AggregateFunctionAggThrow.cpp @@ -76,6 +76,8 @@ public: data(place).~Data(); } + bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v; } + void add(AggregateDataPtr __restrict, const IColumn **, size_t, Arena *) const override { } diff --git a/src/AggregateFunctions/AggregateFunctionDistinct.h b/src/AggregateFunctions/AggregateFunctionDistinct.h index 9884e92f425..2d7362ba4cc 100644 --- a/src/AggregateFunctions/AggregateFunctionDistinct.h +++ b/src/AggregateFunctions/AggregateFunctionDistinct.h @@ -239,6 +239,11 @@ public: nested_func->destroy(getNestedPlace(place)); } + bool hasTrivialDestructor() const override + { + return std::is_trivially_destructible_v && nested_func->hasTrivialDestructor(); + } + void destroyUpToState(AggregateDataPtr __restrict place) const noexcept override { this->data(place).~Data(); diff --git a/src/AggregateFunctions/AggregateFunctionForEach.h b/src/AggregateFunctions/AggregateFunctionForEach.h index 62794ac0f53..c91c4dd7c86 100644 --- a/src/AggregateFunctions/AggregateFunctionForEach.h +++ b/src/AggregateFunctions/AggregateFunctionForEach.h @@ -174,7 +174,7 @@ public: bool hasTrivialDestructor() const override { - return nested_func->hasTrivialDestructor(); + return std::is_trivially_destructible_v && nested_func->hasTrivialDestructor(); } void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override diff --git a/src/AggregateFunctions/AggregateFunctionMap.h b/src/AggregateFunctions/AggregateFunctionMap.h index d349fc05944..f60cc71e78e 100644 --- a/src/AggregateFunctions/AggregateFunctionMap.h +++ b/src/AggregateFunctions/AggregateFunctionMap.h @@ -228,6 +228,11 @@ public: destroyImpl(place); } + bool hasTrivialDestructor() const override + { + return std::is_trivially_destructible_v && nested_func->hasTrivialDestructor(); + } + void destroyUpToState(AggregateDataPtr __restrict place) const noexcept override { destroyImpl(place); diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index 7d2fe6ae8e3..b3fd055b28d 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -685,7 +685,16 @@ public: static constexpr bool DateTime64Supported = true; IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_) - : IAggregateFunctionHelper(argument_types_, parameters_) {} + : IAggregateFunctionHelper(argument_types_, parameters_) + { + /// To prevent derived classes changing the destroy() without updating hasTrivialDestructor() to match it + /// Enforce that either both of them are changed or none are + constexpr bool declares_destroy_and_hasTrivialDestructor = + std::is_same_v == + std::is_same_v; + static_assert(declares_destroy_and_hasTrivialDestructor, + "destroy() and hasTrivialDestructor() methods of an aggregate function must be either both overridden or not"); + } void create(AggregateDataPtr __restrict place) const override /// NOLINT { diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 83b0b202d74..4d3eb1f0bbd 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -1579,6 +1579,8 @@ struct StatefulWindowFunction : public WindowFunction state->~State(); } + bool hasTrivialDestructor() const override { return std::is_trivially_destructible_v; } + State & getState(const WindowFunctionWorkspace & workspace) { return *static_cast(static_cast(workspace.aggregate_function_state.data())); From 9210e586d24ea572d76ec58729d89e3bb5c0c4dc Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Mon, 7 Nov 2022 20:27:18 +0100 Subject: [PATCH 27/46] fix --- src/Common/ZooKeeper/ZooKeeper.h | 39 ++++++++++++------- .../MergeTree/EphemeralLockInZooKeeper.cpp | 1 + .../MergeTree/ReplicatedMergeTreeSink.cpp | 11 +++--- .../MergeTree/ReplicatedMergeTreeSink.h | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- .../0_stateless/01158_zookeeper_log_long.sql | 2 +- 6 files changed, 36 insertions(+), 21 deletions(-) diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 968d10ad9a5..c9529b78f8a 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -76,7 +76,7 @@ using GetPriorityForLoadBalancing = DB::GetPriorityForLoadBalancing; template concept ZooKeeperResponse = std::derived_from; -template +template struct MultiReadResponses { template @@ -96,7 +96,17 @@ struct MultiReadResponses if constexpr (std::same_as) return dynamic_cast(*resp[index]); else + { + if constexpr (try_multi) + { + /// We should not ignore errors except ZNONODE + /// for consistency with exists, tryGet and tryGetChildren + const auto & error = resp[index].error; + if (error != Coordination::Error::ZOK && error != Coordination::Error::ZNONODE) + throw KeeperException(error); + } return resp[index]; + } }, responses); } @@ -144,6 +154,7 @@ class ZooKeeper public: using Ptr = std::shared_ptr; + using ErrorsList = std::initializer_list; ZooKeeper(const ZooKeeperArgs & args_, std::shared_ptr zk_log_ = nullptr); @@ -217,7 +228,7 @@ public: bool exists(const std::string & path, Coordination::Stat * stat = nullptr, const EventPtr & watch = nullptr); bool existsWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallback watch_callback); - using MultiExistsResponse = MultiReadResponses; + using MultiExistsResponse = MultiReadResponses; template MultiExistsResponse exists(TIter start, TIter end) { @@ -233,7 +244,8 @@ public: std::string get(const std::string & path, Coordination::Stat * stat = nullptr, const EventPtr & watch = nullptr); std::string getWatch(const std::string & path, Coordination::Stat * stat, Coordination::WatchCallback watch_callback); - using MultiGetResponse = MultiReadResponses; + using MultiGetResponse = MultiReadResponses; + using MultiTryGetResponse = MultiReadResponses; template MultiGetResponse get(TIter start, TIter end) @@ -264,13 +276,13 @@ public: Coordination::Error * code = nullptr); template - MultiGetResponse tryGet(TIter start, TIter end) + MultiTryGetResponse tryGet(TIter start, TIter end) { return multiRead( start, end, zkutil::makeGetRequest, [&](const auto & path) { return asyncTryGet(path); }); } - MultiGetResponse tryGet(const std::vector & paths) + MultiTryGetResponse tryGet(const std::vector & paths) { return tryGet(paths.begin(), paths.end()); } @@ -297,7 +309,8 @@ public: Coordination::WatchCallback watch_callback, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); - using MultiGetChildrenResponse = MultiReadResponses; + using MultiGetChildrenResponse = MultiReadResponses; + using MultiTryGetChildrenResponse = MultiReadResponses; template MultiGetChildrenResponse @@ -333,7 +346,7 @@ public: Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL); template - MultiGetChildrenResponse + MultiTryGetChildrenResponse tryGetChildren(TIter start, TIter end, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL) { return multiRead( @@ -343,7 +356,7 @@ public: [&](const auto & path) { return asyncTryGetChildren(path, list_request_type); }); } - MultiGetChildrenResponse + MultiTryGetChildrenResponse tryGetChildren(const std::vector & paths, Coordination::ListRequestType list_request_type = Coordination::ListRequestType::ALL) { return tryGetChildren(paths.begin(), paths.end(), list_request_type); @@ -511,7 +524,7 @@ private: using AsyncFunction = std::function(const std::string &)>; template - MultiReadResponses multiRead(TIter start, TIter end, RequestFactory request_factory, AsyncFunction async_fun) + MultiReadResponses multiRead(TIter start, TIter end, RequestFactory request_factory, AsyncFunction async_fun) { if (getApiVersion() >= DB::KeeperApiVersion::WITH_MULTI_READ) { @@ -523,12 +536,12 @@ private: { Coordination::Responses responses; tryMulti(requests, responses); - return MultiReadResponses{std::move(responses)}; + return MultiReadResponses{std::move(responses)}; } else { auto responses = multi(requests); - return MultiReadResponses{std::move(responses)}; + return MultiReadResponses{std::move(responses)}; } } @@ -536,14 +549,14 @@ private: std::vector> future_responses; if (responses_size == 0) - return MultiReadResponses(std::move(future_responses)); + return MultiReadResponses(std::move(future_responses)); future_responses.reserve(responses_size); for (auto it = start; it != end; ++it) future_responses.push_back(async_fun(*it)); - return MultiReadResponses{std::move(future_responses)}; + return MultiReadResponses{std::move(future_responses)}; } std::unique_ptr impl; diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 802d648655f..6ddb35d109e 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -141,6 +141,7 @@ void EphemeralLocksInAllPartitions::unlock() return; std::vector futures; + futures.reserve(locks.size()); for (const auto & lock : locks) { futures.push_back(zookeeper->asyncRemove(lock.path)); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index b3f5a421763..53654285699 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -469,7 +469,7 @@ void ReplicatedMergeTreeSink::commitPart( else quorum_path = storage.zookeeper_path + "/quorum/status"; - waitForQuorum(zookeeper, existing_part_name, quorum_path, quorum_info.is_active_node_value, replicas_num); + waitForQuorum(zookeeper, existing_part_name, quorum_path, quorum_info.is_active_node_version, replicas_num); } else { @@ -636,7 +636,7 @@ void ReplicatedMergeTreeSink::commitPart( storage.updateQuorum(part->name, false); } - waitForQuorum(zookeeper, part->name, quorum_info.status_path, quorum_info.is_active_node_value, replicas_num); + waitForQuorum(zookeeper, part->name, quorum_info.status_path, quorum_info.is_active_node_version, replicas_num); } } @@ -658,7 +658,7 @@ void ReplicatedMergeTreeSink::waitForQuorum( zkutil::ZooKeeperPtr & zookeeper, const std::string & part_name, const std::string & quorum_path, - const std::string & is_active_node_value, + Int32 is_active_node_version, size_t replicas_num) const { /// We are waiting for quorum to be satisfied. @@ -691,9 +691,10 @@ void ReplicatedMergeTreeSink::waitForQuorum( /// And what if it is possible that the current replica at this time has ceased to be active /// and the quorum is marked as failed and deleted? + Coordination::Stat stat; String value; - if (!zookeeper->tryGet(storage.replica_path + "/is_active", value, nullptr) - || value != is_active_node_value) + if (!zookeeper->tryGet(storage.replica_path + "/is_active", value, &stat) + || stat.version != is_active_node_version) throw Exception("Replica become inactive while waiting for quorum", ErrorCodes::NO_ACTIVE_REPLICAS); } catch (...) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index da87ddc0d63..b094b373f97 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -85,7 +85,7 @@ private: /// Also checks that replica still alive. void waitForQuorum( zkutil::ZooKeeperPtr & zookeeper, const std::string & part_name, - const std::string & quorum_path, const std::string & is_active_node_value, size_t replicas_num) const; + const std::string & quorum_path, int is_active_node_version, size_t replicas_num) const; StorageReplicatedMergeTree & storage; StorageMetadataPtr metadata_snapshot; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a70e6f30989..c547ad29f3d 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6484,7 +6484,7 @@ void StorageReplicatedMergeTree::getClearBlocksInPartitionOps( if (startsWith(block_id, partition_prefix)) paths_to_get.push_back(fs::path(zookeeper_path) / "blocks" / block_id); - auto results = zookeeper.get(paths_to_get); + auto results = zookeeper.tryGet(paths_to_get); for (size_t i = 0; i < paths_to_get.size(); ++i) { diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.sql b/tests/queries/0_stateless/01158_zookeeper_log_long.sql index 501ab805a5a..5f6ca762ae7 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.sql +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.sql @@ -27,7 +27,7 @@ select 'blocks'; select type, has_watch, op_num, path, is_ephemeral, is_sequential, version, requests_size, request_idx, error, watch_type, watch_state, path_created, stat_version, stat_cversion, stat_dataLength, stat_numChildren from system.zookeeper_log -where (session_id, xid) in (select session_id, xid from system.zookeeper_log where path like '/test/01158/' || currentDatabase() || '/rmt/blocks%' and op_num not in (1, 12, 500)) +where (session_id, xid) in (select session_id, xid from system.zookeeper_log where path like '/test/01158/' || currentDatabase() || '/rmt/blocks/%' and op_num not in (1, 12, 500)) order by xid, type, request_idx; drop table rmt; From de6014e4a56b61f3928bc641437012420de84c69 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 7 Nov 2022 22:54:08 +0100 Subject: [PATCH 28/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 5c9ab1af24e..78176890841 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>> server.log & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 > server.log & server_pid=$! kill -0 $server_pid From 48cc1d8492c5838958ec14a7da6a2490ca696986 Mon Sep 17 00:00:00 2001 From: Nikolay Degterinsky <43110995+evillique@users.noreply.github.com> Date: Tue, 8 Nov 2022 00:15:19 +0100 Subject: [PATCH 29/46] Update DateLUTImpl.h --- src/Common/DateLUTImpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/DateLUTImpl.h b/src/Common/DateLUTImpl.h index 3afbb6735dc..2f8aa487621 100644 --- a/src/Common/DateLUTImpl.h +++ b/src/Common/DateLUTImpl.h @@ -1331,7 +1331,7 @@ public: } template - inline auto addQuarters(DateOrTime d, Int64 delta) const + inline auto NO_SANITIZE_UNDEFINED addQuarters(DateOrTime d, Int64 delta) const { return addMonths(d, delta * 3); } From 9ec313b05c2dbf8d699491013c8454b4470668cc Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Tue, 8 Nov 2022 18:32:53 +0800 Subject: [PATCH 30/46] remove generic driver lld Signed-off-by: Lloyd-Pottiger --- cmake/tools.cmake | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 23f34bb24cd..e8fecd9f30b 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -57,13 +57,14 @@ if (NOT LINKER_NAME) if (COMPILER_GCC) find_program (LLD_PATH NAMES "ld.lld") find_program (GOLD_PATH NAMES "ld.gold") - # llvm lld is a generic driver. - # Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld (WebAssembly) instead - elseif (COMPILER_CLANG AND OS_LINUX) - find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld") - find_program (GOLD_PATH NAMES "ld.gold" "gold") - elseif (COMPILER_CLANG AND OS_DARWIN) - find_program (LLD_PATH NAMES "ld64.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld64.lld" "lld") + elseif (COMPILER_CLANG) + # llvm lld is a generic driver. + # Invoke ld.lld (Unix), ld64.lld (macOS), lld-link (Windows), wasm-ld (WebAssembly) instead + if (OS_LINUX) + find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld") + elseif (OS_DARWIN) + find_program (LLD_PATH NAMES "ld64.lld-${COMPILER_VERSION_MAJOR}" "ld64.lld") + endif () find_program (GOLD_PATH NAMES "ld.gold" "gold") endif () endif() From eb19df0094bc6bda65d5b39c222fb8c501d02f75 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 8 Nov 2022 15:21:25 +0100 Subject: [PATCH 31/46] fix --- src/Storages/MergeTree/EphemeralLockInZooKeeper.h | 1 + src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 1 - src/Storages/MergeTree/ReplicatedMergeTreeSink.h | 1 - src/Storages/StorageReplicatedMergeTree.cpp | 4 ++-- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h index aaa1fc2af24..5945fa10d91 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.h +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.h @@ -35,6 +35,7 @@ public: /// Fake "secondary node" names for blocks with and without "deduplication_path" static constexpr const char * LEGACY_LOCK_INSERT = "abandonable_lock-insert"; static constexpr const char * LEGACY_LOCK_OTHER = "abandonable_lock-other"; + static constexpr const char * LEGACY_LOCK_PREFIX = "/temp/abandonable_lock-"; EphemeralLockInZooKeeper(EphemeralLockInZooKeeper && rhs) noexcept { diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 53654285699..5482052e184 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -146,7 +146,6 @@ size_t ReplicatedMergeTreeSink::checkQuorumPrecondition(zkutil::ZooKeeperPtr & z if (is_active.error == Coordination::Error::ZNONODE || host.error == Coordination::Error::ZNONODE) throw Exception("Replica is not active right now", ErrorCodes::READONLY); - quorum_info.is_active_node_value = is_active.data; quorum_info.is_active_node_version = is_active.stat.version; quorum_info.host_node_version = host.stat.version; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h index b094b373f97..e3ec5d14a52 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.h @@ -63,7 +63,6 @@ private: struct QuorumInfo { String status_path; - String is_active_node_value; int is_active_node_version = -1; int host_node_version = -1; }; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index c547ad29f3d..3b85581a157 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6426,7 +6426,7 @@ void StorageReplicatedMergeTree::clearLockedBlockNumbersInPartition( paths_to_get.push_back(partition_path / block); } - auto results = zookeeper.get(paths_to_get); + auto results = zookeeper.tryGet(paths_to_get); for (size_t i = 0; i < paths_to_get.size(); ++i) { auto & result = results[i]; @@ -6454,7 +6454,7 @@ void StorageReplicatedMergeTree::clearLockedBlockNumbersInPartition( "probably it is created by a replica that running newer version of ClickHouse. " "Cannot remove it, will wait for this lock to disappear. Upgrade remaining hosts in the cluster to address this warning."; - if (result.data.starts_with(zookeeper_path + "/temp/abandonable_lock-")) + if (result.data.starts_with(zookeeper_path + EphemeralLockInZooKeeper::LEGACY_LOCK_PREFIX)) LOG_WARNING(log, old_version_warning, paths_to_get[i], result.data); else LOG_WARNING(log, new_version_warning, paths_to_get[i], result.data); From a4cd562914b3f9c7f16ac488b04dc933dca3515c Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 8 Nov 2022 10:12:29 -0500 Subject: [PATCH 32/46] fix next button --- docs/en/operations/system-tables/index.md | 4 ++-- docs/en/operations/utilities/index.md | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/system-tables/index.md b/docs/en/operations/system-tables/index.md index e08a727a62a..5fc302cad34 100644 --- a/docs/en/operations/system-tables/index.md +++ b/docs/en/operations/system-tables/index.md @@ -1,7 +1,8 @@ --- slug: /en/operations/system-tables/ sidebar_position: 52 -sidebar_label: System Tables +sidebar_label: Overview +pagination_next: 'en/operations/system-tables/asynchronous_metric_log' --- # System Tables @@ -72,4 +73,3 @@ If procfs is supported and enabled on the system, ClickHouse server collects the - `OSReadBytes` - `OSWriteBytes` -[Original article](https://clickhouse.com/docs/en/operations/system-tables/) diff --git a/docs/en/operations/utilities/index.md b/docs/en/operations/utilities/index.md index df4af30768c..9de68923ea4 100644 --- a/docs/en/operations/utilities/index.md +++ b/docs/en/operations/utilities/index.md @@ -1,10 +1,11 @@ --- slug: /en/operations/utilities/ sidebar_position: 56 -sidebar_label: Utilities +sidebar_label: Overview +pagination_next: 'en/operations/utilities/clickhouse-copier' --- -# ClickHouse Utility +# ClickHouse Utilities - [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this. - [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster. From e659fb5d85d13c8d6e1b5e0719f3a27c4bfee8c1 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 8 Nov 2022 19:11:44 +0300 Subject: [PATCH 33/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 78176890841..f004e7464a5 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 > server.log & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 & server_pid=$! kill -0 $server_pid From 050f3ca7cf514e358e2d852a56f81ffcb4162908 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 8 Nov 2022 17:25:03 +0100 Subject: [PATCH 34/46] Fix msan warning --- base/glibc-compatibility/musl/getauxval.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/base/glibc-compatibility/musl/getauxval.c b/base/glibc-compatibility/musl/getauxval.c index eba12604b4d..44a9f979f99 100644 --- a/base/glibc-compatibility/musl/getauxval.c +++ b/base/glibc-compatibility/musl/getauxval.c @@ -10,6 +10,12 @@ #include "syscall.h" +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#include +#endif +#endif + #define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0])) /// Suppress TSan since it is possible for this code to be called from multiple threads, From a3c1049f9d286f7efad1eb77774e67ddba181717 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Tue, 8 Nov 2022 19:42:13 +0100 Subject: [PATCH 35/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index f004e7464a5..9b9f0240760 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > >(tail --lines=+0 > server.log) 2>&1 & server_pid=$! kill -0 $server_pid From 236f3329bc4bd7f0241a47224a7adb93321319ac Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 8 Nov 2022 22:54:58 +0300 Subject: [PATCH 36/46] Update run-fuzzer.sh --- docker/test/fuzzer/run-fuzzer.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 9b9f0240760..e57b671342c 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -178,7 +178,7 @@ function fuzz # interferes with gdb export CLICKHOUSE_WATCHDOG_ENABLE=0 # NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server - clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > >(tail --lines=+0 > server.log) 2>&1 & + clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz & server_pid=$! kill -0 $server_pid @@ -297,7 +297,7 @@ quit # The server has died. task_exit_code=210 echo "failure" > status.txt - if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt + if ! zgrep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log.gz > description.txt then echo "Lost connection to server. See the logs." > description.txt fi @@ -392,7 +392,7 @@ th { cursor: pointer; }

AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}

From 090b153b1e297e24f7cd5c44a973492eab6a8784 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 8 Nov 2022 17:37:20 -0500 Subject: [PATCH 37/46] a dir named index with index.md broke nav --- .../sql-reference/statements/alter/{index.md => overview.md} | 0 .../statements/alter/{index/index.md => skipping-index.md} | 3 ++- 2 files changed, 2 insertions(+), 1 deletion(-) rename docs/en/sql-reference/statements/alter/{index.md => overview.md} (100%) rename docs/en/sql-reference/statements/alter/{index/index.md => skipping-index.md} (95%) diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/overview.md similarity index 100% rename from docs/en/sql-reference/statements/alter/index.md rename to docs/en/sql-reference/statements/alter/overview.md diff --git a/docs/en/sql-reference/statements/alter/index/index.md b/docs/en/sql-reference/statements/alter/skipping-index.md similarity index 95% rename from docs/en/sql-reference/statements/alter/index/index.md rename to docs/en/sql-reference/statements/alter/skipping-index.md index 03d4bd47e71..e23cf20cbed 100644 --- a/docs/en/sql-reference/statements/alter/index/index.md +++ b/docs/en/sql-reference/statements/alter/skipping-index.md @@ -1,5 +1,6 @@ --- -slug: /en/sql-reference/statements/alter/index +slug: /en/sql-reference/statements/alter/skipping-index + toc_hidden_folder: true sidebar_position: 42 sidebar_label: INDEX From d2253663356af1149960662942a2b5c87108804f Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 8 Nov 2022 18:04:22 -0500 Subject: [PATCH 38/46] update links --- .../database-engines/materialized-mysql.md | 78 +++++++++---------- .../statements/alter/skipping-index.md | 4 +- .../en/sql-reference/statements/alter/view.md | 2 +- docs/en/sql-reference/statements/grant.md | 2 +- docs/en/sql-reference/statements/kill.md | 2 +- 5 files changed, 44 insertions(+), 44 deletions(-) diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index c8aa65bdd91..5ddcf6bfb41 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -77,15 +77,15 @@ While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = ## Virtual Columns {#virtual-columns} -When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns. +When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](/docs/en/sql-reference/engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns. ### \_version -`_version` — Transaction counter. Type [UInt64](../../sql-reference/data-types/int-uint.md). +`_version` — Transaction counter. Type [UInt64](/docs/en/sql-reference/data-types/int-uint.md). ### \_sign -`_sign` — Deletion mark. Type [Int8](../../sql-reference/data-types/int-uint.md). Possible values: +`_sign` — Deletion mark. Type [Int8](/docs/en/sql-reference/data-types/int-uint.md). Possible values: - `1` — Row is not deleted, - `-1` — Row is deleted. @@ -93,29 +93,29 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree]( | MySQL | ClickHouse | |-------------------------|--------------------------------------------------------------| -| TINY | [Int8](../../sql-reference/data-types/int-uint.md) | -| SHORT | [Int16](../../sql-reference/data-types/int-uint.md) | -| INT24 | [Int32](../../sql-reference/data-types/int-uint.md) | -| LONG | [UInt32](../../sql-reference/data-types/int-uint.md) | -| LONGLONG | [UInt64](../../sql-reference/data-types/int-uint.md) | -| FLOAT | [Float32](../../sql-reference/data-types/float.md) | -| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | -| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) | -| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) | -| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | -| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) | -| YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) | -| TIME | [Int64](../../sql-reference/data-types/int-uint.md) | -| ENUM | [Enum](../../sql-reference/data-types/enum.md) | -| STRING | [String](../../sql-reference/data-types/string.md) | -| VARCHAR, VAR_STRING | [String](../../sql-reference/data-types/string.md) | -| BLOB | [String](../../sql-reference/data-types/string.md) | -| GEOMETRY | [String](../../sql-reference/data-types/string.md) | -| BINARY | [FixedString](../../sql-reference/data-types/fixedstring.md) | -| BIT | [UInt64](../../sql-reference/data-types/int-uint.md) | -| SET | [UInt64](../../sql-reference/data-types/int-uint.md) | +| TINY | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | +| SHORT | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | +| INT24 | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | +| LONG | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | +| LONGLONG | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | +| FLOAT | [Float32](/docs/en/sql-reference/data-types/float.md) | +| DOUBLE | [Float64](/docs/en/sql-reference/data-types/float.md) | +| DECIMAL, NEWDECIMAL | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | +| DATE, NEWDATE | [Date](/docs/en/sql-reference/data-types/date.md) | +| DATETIME, TIMESTAMP | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | +| DATETIME2, TIMESTAMP2 | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | +| YEAR | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | +| TIME | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | +| ENUM | [Enum](/docs/en/sql-reference/data-types/enum.md) | +| STRING | [String](/docs/en/sql-reference/data-types/string.md) | +| VARCHAR, VAR_STRING | [String](/docs/en/sql-reference/data-types/string.md) | +| BLOB | [String](/docs/en/sql-reference/data-types/string.md) | +| GEOMETRY | [String](/docs/en/sql-reference/data-types/string.md) | +| BINARY | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | +| BIT | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | +| SET | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | -[Nullable](../../sql-reference/data-types/nullable.md) is supported. +[Nullable](/docs/en/sql-reference/data-types/nullable.md) is supported. The data of TIME type in MySQL is converted to microseconds in ClickHouse. @@ -133,7 +133,7 @@ Apart of the data types limitations there are few restrictions comparing to `MyS ### DDL Queries {#ddl-queries} -MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](../../sql-reference/statements/alter/index.md), [CREATE](../../sql-reference/statements/create/index.md), [DROP](../../sql-reference/statements/drop), [RENAME](../../sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored. +MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](/docs/en/sql-reference/statements/alter/overview.md), [CREATE](/docs/en/sql-reference/statements/create/index.md), [DROP](/docs/en/sql-reference/statements/drop.md), [RENAME](/docs/en/sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored. ### Data Replication {#data-replication} @@ -151,7 +151,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ `SELECT` query from `MaterializedMySQL` tables has some specifics: - If `_version` is not specified in the `SELECT` query, the - [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier is used, so only rows with + [FINAL](/docs/en/sql-reference/statements/select/from.md/#select-from-final) modifier is used, so only rows with `MAX(_version)` are returned for each primary key value. - If `_sign` is not specified in the `SELECT` query, `WHERE _sign=1` is used by default. So the deleted rows are not @@ -164,7 +164,7 @@ MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ MySQL `PRIMARY KEY` and `INDEX` clauses are converted into `ORDER BY` tuples in ClickHouse tables. ClickHouse has only one physical order, which is determined by `ORDER BY` clause. To create a new physical order, use -[materialized views](../../sql-reference/statements/create/view.md#materialized). +[materialized views](/docs/en/sql-reference/statements/create/view.md/#materialized). **Notes** @@ -173,7 +173,7 @@ ClickHouse has only one physical order, which is determined by `ORDER BY` clause MySQL binlog. - Replication can be easily broken. - Manual operations on database and tables are forbidden. -- `MaterializedMySQL` is affected by the [optimize_on_insert](../../operations/settings/settings.md#optimize-on-insert) +- `MaterializedMySQL` is affected by the [optimize_on_insert](/docs/en/operations/settings/settings.md/#optimize-on-insert) setting. Data is merged in the corresponding table in the `MaterializedMySQL` database when a table in the MySQL server changes. @@ -187,19 +187,19 @@ These are the schema conversion manipulations you can do with table overrides fo * Modify column type. Must be compatible with the original type, or replication will fail. For example, you can modify a UInt32 column to UInt64, but you can not modify a String column to Array(String). - * Modify [column TTL](../table-engines/mergetree-family/mergetree/#mergetree-column-ttl). - * Modify [column compression codec](../../sql-reference/statements/create/table/#codecs). - * Add [ALIAS columns](../../sql-reference/statements/create/table/#alias). - * Add [skipping indexes](../table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) - * Add [projections](../table-engines/mergetree-family/mergetree/#projections). Note that projection optimizations are + * Modify [column TTL](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). + * Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs). + * Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias). + * Add [skipping indexes](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes) + * Add [projections](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here. `INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/) may be more useful in this case. - * Modify [PARTITION BY](../table-engines/mergetree-family/custom-partitioning-key/) - * Modify [ORDER BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * Modify [PRIMARY KEY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * Add [SAMPLE BY](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) - * Add [table TTL](../table-engines/mergetree-family/mergetree/#mergetree-query-clauses) + * Modify [PARTITION BY](/docs/en/sql-reference/table-engines/mergetree-family/custom-partitioning-key/) + * Modify [ORDER BY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Modify [PRIMARY KEY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Add [SAMPLE BY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Add [table TTL](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) ```sql CREATE DATABASE db_name ENGINE = MaterializedMySQL(...) diff --git a/docs/en/sql-reference/statements/alter/skipping-index.md b/docs/en/sql-reference/statements/alter/skipping-index.md index e23cf20cbed..1a61e1b73ec 100644 --- a/docs/en/sql-reference/statements/alter/skipping-index.md +++ b/docs/en/sql-reference/statements/alter/skipping-index.md @@ -14,12 +14,12 @@ The following operations are available: - `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. -- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](../../../../sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. +- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing indices metadata via ZooKeeper. :::note -Index manipulation is supported only for tables with [`*MergeTree`](../../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../../engines/table-engines/mergetree-family/replication.md) variants). +Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/sql-reference/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/sql-reference/engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md index e382cdace30..fd6045a026e 100644 --- a/docs/en/sql-reference/statements/alter/view.md +++ b/docs/en/sql-reference/statements/alter/view.md @@ -8,7 +8,7 @@ sidebar_label: VIEW You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement. Use it when the materialized view was created without the `TO [db.]name` clause. The `allow_experimental_alter_materialized_view_structure` setting must be enabled. -If a materialized view uses the `TO [db.]name` construction, you must [DETACH](../detach.md) the view, run [ALTER TABLE](index.md) query for the target table, and then [ATTACH](../attach.md) the previously detached (`DETACH`) view. +If a materialized view uses the `TO [db.]name` construction, you must [DETACH](../detach.md) the view, run [ALTER TABLE](overview.md) query for the target table, and then [ATTACH](../attach.md) the previously detached (`DETACH`) view. **Example** diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 546a8b0958d..bd35efb3deb 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -261,7 +261,7 @@ The granted privilege allows `john` to insert data to the `x` and/or `y` columns ### ALTER -Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries according to the following hierarchy of privileges: +Allows executing [ALTER](../../sql-reference/statements/alter/overview.md) queries according to the following hierarchy of privileges: - `ALTER`. Level: `COLUMN`. - `ALTER TABLE`. Level: `GROUP` diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index 294724dfa50..733125c78f3 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -51,7 +51,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Tries to cancel and remove [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. +Tries to cancel and remove [mutations](../../sql-reference/statements/alter/overview.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. A test query (`TEST`) only checks the user’s rights and displays a list of mutations to stop. From ecb4c2b9fbc08f1d641ccafa976b1f331ac8462e Mon Sep 17 00:00:00 2001 From: Alexander Gololobov <440544+davenger@users.noreply.github.com> Date: Wed, 9 Nov 2022 00:49:11 +0100 Subject: [PATCH 39/46] Only look for logs with Error level --- .../0_stateless/02477_projection_materialize_and_zero_copy.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql b/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql index 922b613888f..027eb699ad6 100644 --- a/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql +++ b/tests/queries/0_stateless/02477_projection_materialize_and_zero_copy.sql @@ -13,6 +13,6 @@ alter table t materialize projection p_norm settings mutations_sync = 1; SYSTEM FLUSH LOGS; -SELECT * FROM system.text_log WHERE event_time >= now() - 30 and message like '%BAD_DATA_PART_NAME%'; +SELECT * FROM system.text_log WHERE event_time >= now() - 30 and level == 'Error' and message like '%BAD_DATA_PART_NAME%'; DROP TABLE IF EXISTS t; From 45c9debeaff24c41968c5a4a7f173e9dbc706dc6 Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 8 Nov 2022 19:17:58 -0500 Subject: [PATCH 40/46] update links --- .../database-engines/materialized-mysql.md | 18 +- .../engines/table-engines/log-family/index.md | 10 +- .../mergetree-family/mergetree.md | 114 +++---- .../mergetree-family/replication.md | 26 +- docs/en/engines/table-engines/special/join.md | 28 +- docs/en/operations/settings/settings.md | 308 +++++++++--------- docs/en/operations/storing-data.md | 18 +- docs/en/operations/system-tables/mutations.md | 34 +- docs/en/operations/system-tables/parts.md | 22 +- .../operations/system-tables/parts_columns.md | 2 +- .../sql-reference/statements/alter/column.md | 22 +- .../sql-reference/statements/alter/delete.md | 12 +- .../statements/alter/overview.md | 44 +-- .../statements/alter/partition.md | 22 +- .../statements/alter/projection.md | 10 +- .../statements/alter/skipping-index.md | 4 +- .../sql-reference/statements/alter/update.md | 10 +- docs/en/sql-reference/statements/index.md | 44 +-- 18 files changed, 374 insertions(+), 374 deletions(-) diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index 5ddcf6bfb41..0411286cd23 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -77,7 +77,7 @@ While turning on `gtid_mode` you should also specify `enforce_gtid_consistency = ## Virtual Columns {#virtual-columns} -When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](/docs/en/sql-reference/engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns. +When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) tables are used with virtual `_sign` and `_version` columns. ### \_version @@ -187,19 +187,19 @@ These are the schema conversion manipulations you can do with table overrides fo * Modify column type. Must be compatible with the original type, or replication will fail. For example, you can modify a UInt32 column to UInt64, but you can not modify a String column to Array(String). - * Modify [column TTL](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). + * Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). * Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs). * Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias). - * Add [skipping indexes](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes) - * Add [projections](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are + * Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes) + * Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here. `INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/) may be more useful in this case. - * Modify [PARTITION BY](/docs/en/sql-reference/table-engines/mergetree-family/custom-partitioning-key/) - * Modify [ORDER BY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) - * Modify [PRIMARY KEY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) - * Add [SAMPLE BY](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) - * Add [table TTL](/docs/en/sql-reference/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Modify [PARTITION BY](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key/) + * Modify [ORDER BY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Modify [PRIMARY KEY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Add [SAMPLE BY](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) + * Add [table TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) ```sql CREATE DATABASE db_name ENGINE = MaterializedMySQL(...) diff --git a/docs/en/engines/table-engines/log-family/index.md b/docs/en/engines/table-engines/log-family/index.md index 98bc4dbad04..269ad0c98f1 100644 --- a/docs/en/engines/table-engines/log-family/index.md +++ b/docs/en/engines/table-engines/log-family/index.md @@ -10,11 +10,11 @@ These engines were developed for scenarios when you need to quickly write many s Engines of the family: -- [StripeLog](../../../engines/table-engines/log-family/stripelog.md) -- [Log](../../../engines/table-engines/log-family/log.md) -- [TinyLog](../../../engines/table-engines/log-family/tinylog.md) +- [StripeLog](/docs/en/engines/table-engines/log-family/stripelog.md) +- [Log](/docs/en/engines/table-engines/log-family/log.md) +- [TinyLog](/docs/en/engines/table-engines/log-family/tinylog.md) -`Log` family table engines can store data to [HDFS](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-hdfs) or [S3](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3) distributed file systems. +`Log` family table engines can store data to [HDFS](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-hdfs) or [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3) distributed file systems. ## Common Properties {#common-properties} @@ -28,7 +28,7 @@ Engines: During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently. -- Do not support [mutations](../../../sql-reference/statements/alter/index.md#alter-mutations). +- Do not support [mutations](/docs/en/sql-reference/statements/alter/overview.md/#alter-mutations). - Do not support indexes. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 486baac2310..807a19605c4 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -16,20 +16,20 @@ Main features: This allows you to create a small sparse index that helps find data faster. -- Partitions can be used if the [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. +- Partitions can be used if the [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md) is specified. ClickHouse supports certain operations with partitions that are more efficient than general operations on the same data with the same result. ClickHouse also automatically cuts off the partition data where the partitioning key is specified in the query. - Data replication support. - The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](../../../engines/table-engines/mergetree-family/replication.md). + The family of `ReplicatedMergeTree` tables provides data replication. For more information, see [Data replication](/docs/en/engines/table-engines/mergetree-family/replication.md). - Data sampling support. If necessary, you can set the data sampling method in the table. :::info -The [Merge](../../../engines/table-engines/special/merge.md#merge) engine does not belong to the `*MergeTree` family. +The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family. ::: ## Creating a Table {#table_engine-mergetree-creating-a-table} @@ -57,7 +57,7 @@ ORDER BY expr [SETTINGS name=value, ...] ``` -For a description of parameters, see the [CREATE query description](../../../sql-reference/statements/create/table.md). +For a description of parameters, see the [CREATE query description](/docs/en/sql-reference/statements/create/table.md). ### Query Clauses {#mergetree-query-clauses} @@ -77,9 +77,9 @@ Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting th #### PARTITION BY -`PARTITION BY` — The [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression). +`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression). -For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](../../../sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. +For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format. #### PRIMARY KEY @@ -127,7 +127,7 @@ Additional parameters that control the behavior of the `MergeTree` (optional): #### use_minimalistic_part_header_in_zookeeper -`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”. +`use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”. #### min_merge_bytes_to_use_direct_io @@ -166,15 +166,15 @@ Additional parameters that control the behavior of the `MergeTree` (optional): #### max_compress_block_size -`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](../../../operations/settings/settings.md#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. +`max_compress_block_size` — Maximum size of blocks of uncompressed data before compressing for writing to a table. You can also specify this setting in the global settings (see [max_compress_block_size](/docs/en/operations/settings/settings.md/#max-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. #### min_compress_block_size -`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](../../../operations/settings/settings.md#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. +`min_compress_block_size` — Minimum size of blocks of uncompressed data required for compression when writing the next mark. You can also specify this setting in the global settings (see [min_compress_block_size](/docs/en/operations/settings/settings.md/#min-compress-block-size) setting). The value specified when table is created overrides the global value for this setting. #### max_partitions_to_read -`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](../../../operations/settings/merge-tree-settings.md#max-partitions-to-read) in the global setting. +`max_partitions_to_read` — Limits the maximum number of partitions that can be accessed in one query. You can also specify setting [max_partitions_to_read](/docs/en/operations/settings/merge-tree-settings.md/#max-partitions-to-read) in the global setting. **Example of Sections Setting** @@ -184,7 +184,7 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa In the example, we set partitioning by month. -We also set an expression for sampling as a hash by the user ID. This allows you to pseudorandomize the data in the table for each `CounterID` and `EventDate`. If you define a [SAMPLE](../../../sql-reference/statements/select/sample.md#select-sample-clause) clause when selecting the data, ClickHouse will return an evenly pseudorandom data sample for a subset of users. +We also set an expression for sampling as a hash by the user ID. This allows you to pseudorandomize the data in the table for each `CounterID` and `EventDate`. If you define a [SAMPLE](/docs/en/sql-reference/statements/select/sample.md/#select-sample-clause) clause when selecting the data, ClickHouse will return an evenly pseudorandom data sample for a subset of users. The `index_granularity` setting can be omitted because 8192 is the default value. @@ -207,9 +207,9 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] **MergeTree() Parameters** -- `date-column` — The name of a column of the [Date](../../../sql-reference/data-types/date.md) type. ClickHouse automatically creates partitions by month based on this column. The partition names are in the `"YYYYMM"` format. +- `date-column` — The name of a column of the [Date](/docs/en/sql-reference/data-types/date.md) type. ClickHouse automatically creates partitions by month based on this column. The partition names are in the `"YYYYMM"` format. - `sampling_expression` — An expression for sampling. -- `(primary, key)` — Primary key. Type: [Tuple()](../../../sql-reference/data-types/tuple.md) +- `(primary, key)` — Primary key. Type: [Tuple()](/docs/en/sql-reference/data-types/tuple.md) - `index_granularity` — The granularity of an index. The number of data rows between the “marks” of an index. The value 8192 is appropriate for most tasks. **Example** @@ -262,7 +262,7 @@ Sparse indexes allow you to work with a very large number of table rows, because ClickHouse does not require a unique primary key. You can insert multiple rows with the same primary key. -You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](../../../operations/settings/settings.md#allow-nullable-key) setting. The [NULLS_LAST](../../../sql-reference/statements/select/order-by.md#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. +You can use `Nullable`-typed expressions in the `PRIMARY KEY` and `ORDER BY` clauses but it is strongly discouraged. To allow this feature, turn on the [allow_nullable_key](/docs/en/operations/settings/settings.md/#allow-nullable-key) setting. The [NULLS_LAST](/docs/en/sql-reference/statements/select/order-by.md/#sorting-of-special-values) principle applies for `NULL` values in the `ORDER BY` clause. ### Selecting the Primary Key {#selecting-the-primary-key} @@ -279,26 +279,26 @@ The number of columns in the primary key is not explicitly limited. Depending on ClickHouse sorts data by primary key, so the higher the consistency, the better the compression. -- Provide additional logic when merging data parts in the [CollapsingMergeTree](../../../engines/table-engines/mergetree-family/collapsingmergetree.md#table_engine-collapsingmergetree) and [SummingMergeTree](../../../engines/table-engines/mergetree-family/summingmergetree.md) engines. +- Provide additional logic when merging data parts in the [CollapsingMergeTree](/docs/en/engines/table-engines/mergetree-family/collapsingmergetree.md/#table_engine-collapsingmergetree) and [SummingMergeTree](/docs/en/engines/table-engines/mergetree-family/summingmergetree.md) engines. In this case it makes sense to specify the *sorting key* that is different from the primary key. A long primary key will negatively affect the insert performance and memory consumption, but extra columns in the primary key do not affect ClickHouse performance during `SELECT` queries. -You can create a table without a primary key using the `ORDER BY tuple()` syntax. In this case, ClickHouse stores data in the order of inserting. If you want to save data order when inserting data by `INSERT ... SELECT` queries, set [max_insert_threads = 1](../../../operations/settings/settings.md#settings-max-insert-threads). +You can create a table without a primary key using the `ORDER BY tuple()` syntax. In this case, ClickHouse stores data in the order of inserting. If you want to save data order when inserting data by `INSERT ... SELECT` queries, set [max_insert_threads = 1](/docs/en/operations/settings/settings.md/#settings-max-insert-threads). -To select data in the initial order, use [single-threaded](../../../operations/settings/settings.md#settings-max_threads) `SELECT` queries. +To select data in the initial order, use [single-threaded](/docs/en/operations/settings/settings.md/#settings-max_threads) `SELECT` queries. ### Choosing a Primary Key that Differs from the Sorting Key {#choosing-a-primary-key-that-differs-from-the-sorting-key} It is possible to specify a primary key (an expression with values that are written in the index file for each mark) that is different from the sorting key (an expression for sorting the rows in data parts). In this case the primary key expression tuple must be a prefix of the sorting key expression tuple. -This feature is helpful when using the [SummingMergeTree](../../../engines/table-engines/mergetree-family/summingmergetree.md) and -[AggregatingMergeTree](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. In a common case when using these engines, the table has two types of columns: *dimensions* and *measures*. Typical queries aggregate values of measure columns with arbitrary `GROUP BY` and filtering by dimensions. Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns and this list must be frequently updated with newly added dimensions. +This feature is helpful when using the [SummingMergeTree](/docs/en/engines/table-engines/mergetree-family/summingmergetree.md) and +[AggregatingMergeTree](/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree.md) table engines. In a common case when using these engines, the table has two types of columns: *dimensions* and *measures*. Typical queries aggregate values of measure columns with arbitrary `GROUP BY` and filtering by dimensions. Because SummingMergeTree and AggregatingMergeTree aggregate rows with the same value of the sorting key, it is natural to add all dimensions to it. As a result, the key expression consists of a long list of columns and this list must be frequently updated with newly added dimensions. In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple. -[ALTER](../../../sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. +[ALTER](/docs/en/sql-reference/statements/alter/overview.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. ### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries} @@ -342,7 +342,7 @@ In the example below, the index can’t be used. SELECT count() FROM table WHERE CounterID = 34 OR URL LIKE '%upyachka%' ``` -To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](../../../operations/settings/settings.md#settings-force_index_by_date) and [force_primary_key](../../../operations/settings/settings.md#force-primary-key). +To check whether ClickHouse can use the index when running a query, use the settings [force_index_by_date](/docs/en/operations/settings/settings.md/#settings-force_index_by_date) and [force_primary_key](/docs/en/operations/settings/settings.md/#force-primary-key). The key for partitioning by month allows reading only those data blocks which contain dates from the proper range. In this case, the data block may contain data for many dates (up to an entire month). Within a block, data is sorted by primary key, which might not contain the date as the first column. Because of this, using a query with only a date condition that does not specify the primary key prefix will cause more data to be read than for a single date. @@ -400,7 +400,7 @@ Stores unique values of the specified expression (no more than `max_rows` rows, #### `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)` -Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](../../../sql-reference/data-types/string.md), [FixedString](../../../sql-reference/data-types/fixedstring.md) and [Map](../../../sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions. +Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](/docs/en/sql-reference/data-types/string.md), [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) and [Map](/docs/en/sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions. - `n` — ngram size, - `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well). @@ -417,11 +417,11 @@ The optional `false_positive` parameter is the probability of receiving a false Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID`, `Map`. -For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function. +For `Map` data type client can specify if index should be created for keys or values using [mapKeys](/docs/en/sql-reference/functions/tuple-map-functions.md/#mapkeys) or [mapValues](/docs/en/sql-reference/functions/tuple-map-functions.md/#mapvalues) function. There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details. -The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall). +The following functions can use the filter: [equals](/docs/en/sql-reference/functions/comparison-functions.md), [notEquals](/docs/en/sql-reference/functions/comparison-functions.md), [in](/docs/en/sql-reference/functions/in-functions), [notIn](/docs/en/sql-reference/functions/in-functions), [has](/docs/en/sql-reference/functions/array-functions#hasarr-elem), [hasAny](/docs/en/sql-reference/functions/array-functions#hasany), [hasAll](/docs/en/sql-reference/functions/array-functions#hasall). Example of index creation for `Map` data type @@ -445,21 +445,21 @@ The `set` index can be used with all functions. Function subsets for other index | Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | |------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------| -| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | -| [endsWith](../../../sql-reference/functions/string-functions.md#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | -| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | -| [in](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [notIn](../../../sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | -| [less (<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greater (>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [lessOrEquals (<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [greaterOrEquals (>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [empty](../../../sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | -| [notEmpty](../../../sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | +| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | +| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | +| [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | +| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | +| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | | hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization. @@ -485,16 +485,16 @@ For example: ## Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex} -In addition to skip indices, there are also [Approximate Nearest Neighbor Search Indexes](../../../engines/table-engines/mergetree-family/annindexes.md). +In addition to skip indices, there are also [Approximate Nearest Neighbor Search Indexes](/docs/en/engines/table-engines/mergetree-family/annindexes.md). ## Projections {#projections} -Projections are like [materialized views](../../../sql-reference/statements/create/view.md#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries. +Projections are like [materialized views](/docs/en/sql-reference/statements/create/view.md/#materialized) but defined in part-level. It provides consistency guarantees along with automatic usage in queries. :::note -When you are implementing projections you should also consider the [force_optimize_projection](../../../operations/settings/settings.md#force-optimize-projection) setting. +When you are implementing projections you should also consider the [force_optimize_projection](/docs/en/operations/settings/settings.md/#force-optimize-projection) setting. ::: -Projections are not supported in the `SELECT` statements with the [FINAL](../../../sql-reference/statements/select/from.md#select-from-final) modifier. +Projections are not supported in the `SELECT` statements with the [FINAL](/docs/en/sql-reference/statements/select/from.md/#select-from-final) modifier. ### Projection Query {#projection-query} A projection query is what defines a projection. It implicitly selects data from the parent table. @@ -504,7 +504,7 @@ A projection query is what defines a projection. It implicitly selects data from SELECT [GROUP BY] [ORDER BY] ``` -Projections can be modified or dropped with the [ALTER](../../../sql-reference/statements/alter/projection.md) statement. +Projections can be modified or dropped with the [ALTER](/docs/en/sql-reference/statements/alter/projection.md) statement. ### Projection Storage {#projection-storage} Projections are stored inside the part directory. It's similar to an index but contains a subdirectory that stores an anonymous `MergeTree` table's part. The table is induced by the definition query of the projection. If there is a `GROUP BY` clause, the underlying storage engine becomes [AggregatingMergeTree](aggregatingmergetree.md), and all aggregate functions are converted to `AggregateFunction`. If there is an `ORDER BY` clause, the `MergeTree` table uses it as its primary key expression. During the merge process the projection part is merged via its storage's merge routine. The checksum of the parent table's part is combined with the projection's part. Other maintenance jobs are similar to skip indices. @@ -526,7 +526,7 @@ Determines the lifetime of values. The `TTL` clause can be set for the whole table and for each individual column. Table-level `TTL` can also specify the logic of automatic moving data between disks and volumes, or recompressing parts where all the data has been expired. -Expressions must evaluate to [Date](../../../sql-reference/data-types/date.md) or [DateTime](../../../sql-reference/data-types/datetime.md) data type. +Expressions must evaluate to [Date](/docs/en/sql-reference/data-types/date.md) or [DateTime](/docs/en/sql-reference/data-types/datetime.md) data type. **Syntax** @@ -537,7 +537,7 @@ TTL time_column TTL time_column + interval ``` -To define `interval`, use [time interval](../../../sql-reference/operators/index.md#operators-datetime) operators, for example: +To define `interval`, use [time interval](/docs/en/sql-reference/operators/index.md/#operators-datetime) operators, for example: ``` sql TTL date_time + INTERVAL 1 MONTH @@ -684,11 +684,11 @@ Data with an expired `TTL` is removed when ClickHouse merges data parts. When ClickHouse detects that data is expired, it performs an off-schedule merge. To control the frequency of such merges, you can set `merge_with_ttl_timeout`. If the value is too low, it will perform many off-schedule merges that may consume a lot of resources. -If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query before `SELECT`. +If you perform the `SELECT` query between merges, you may get expired data. To avoid it, use the [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) query before `SELECT`. **See Also** -- [ttl_only_drop_parts](../../../operations/settings/settings.md#ttl_only_drop_parts) setting +- [ttl_only_drop_parts](/docs/en/operations/settings/settings.md/#ttl_only_drop_parts) setting ## Using Multiple Block Devices for Data Storage {#table_engine-mergetree-multiple-volumes} @@ -697,16 +697,16 @@ If you perform the `SELECT` query between merges, you may get expired data. To a `MergeTree` family table engines can store data on multiple block devices. For example, it can be useful when the data of a certain table are implicitly split into “hot” and “cold”. The most recent data is regularly requested but requires only a small amount of space. On the contrary, the fat-tailed historical data is requested rarely. If several disks are available, the “hot” data may be located on fast disks (for example, NVMe SSDs or in memory), while the “cold” data - on relatively slow ones (for example, HDD). -Data part is the minimum movable unit for `MergeTree`-engine tables. The data belonging to one part are stored on one disk. Data parts can be moved between disks in the background (according to user settings) as well as by means of the [ALTER](../../../sql-reference/statements/alter/partition.md#alter_move-partition) queries. +Data part is the minimum movable unit for `MergeTree`-engine tables. The data belonging to one part are stored on one disk. Data parts can be moved between disks in the background (according to user settings) as well as by means of the [ALTER](/docs/en/sql-reference/statements/alter/partition.md/#alter_move-partition) queries. ### Terms {#terms} - Disk — Block device mounted to the filesystem. -- Default disk — Disk that stores the path specified in the [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) server setting. +- Default disk — Disk that stores the path specified in the [path](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-path) server setting. - Volume — Ordered set of equal disks (similar to [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures)). - Storage policy — Set of volumes and the rules for moving data between them. -The names given to the described entities can be found in the system tables, [system.storage_policies](../../../operations/system-tables/storage_policies.md#system_tables-storage_policies) and [system.disks](../../../operations/system-tables/disks.md#system_tables-disks). To apply one of the configured storage policies for a table, use the `storage_policy` setting of `MergeTree`-engine family tables. +The names given to the described entities can be found in the system tables, [system.storage_policies](/docs/en/operations/system-tables/storage_policies.md/#system_tables-storage_policies) and [system.disks](/docs/en/operations/system-tables/disks.md/#system_tables-disks). To apply one of the configured storage policies for a table, use the `storage_policy` setting of `MergeTree`-engine family tables. ### Configuration {#table_engine-mergetree-multiple-volumes_configure} @@ -853,16 +853,16 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' The `default` storage policy implies using only one volume, which consists of only one disk given in ``. You could change storage policy after table creation with [ALTER TABLE ... MODIFY SETTING] query, new policy should include all old disks and volumes with same names. -The number of threads performing background moves of data parts can be changed by [background_move_pool_size](../../../operations/settings/settings.md#background_move_pool_size) setting. +The number of threads performing background moves of data parts can be changed by [background_move_pool_size](/docs/en/operations/settings/settings.md/#background_move_pool_size) setting. ### Details {#details} In the case of `MergeTree` tables, data is getting to disk in different ways: - As a result of an insert (`INSERT` query). -- During background merges and [mutations](../../../sql-reference/statements/alter/index.md#alter-mutations). +- During background merges and [mutations](/docs/en/sql-reference/statements/alter/overview.md/#alter-mutations). - When downloading from another replica. -- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](../../../sql-reference/statements/alter/partition.md#alter_freeze-partition). +- As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](/docs/en/sql-reference/statements/alter/partition.md/#alter_freeze-partition). In all these cases except for mutations and partition freezing, a part is stored on a volume and a disk according to the given storage policy: @@ -872,16 +872,16 @@ In all these cases except for mutations and partition freezing, a part is stored Under the hood, mutations and partition freezing make use of [hard links](https://en.wikipedia.org/wiki/Hard_link). Hard links between different disks are not supported, therefore in such cases the resulting parts are stored on the same disks as the initial ones. In the background, parts are moved between volumes on the basis of the amount of free space (`move_factor` parameter) according to the order the volumes are declared in the configuration file. -Data is never transferred from the last one and into the first one. One may use system tables [system.part_log](../../../operations/system-tables/part_log.md#system_tables-part-log) (field `type = MOVE_PART`) and [system.parts](../../../operations/system-tables/parts.md#system_tables-parts) (fields `path` and `disk`) to monitor background moves. Also, the detailed information can be found in server logs. +Data is never transferred from the last one and into the first one. One may use system tables [system.part_log](/docs/en/operations/system-tables/part_log.md/#system_tables-part-log) (field `type = MOVE_PART`) and [system.parts](/docs/en/operations/system-tables/parts.md/#system_tables-parts) (fields `path` and `disk`) to monitor background moves. Also, the detailed information can be found in server logs. -User can force moving a part or a partition from one volume to another using the query [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](../../../sql-reference/statements/alter/partition.md#alter_move-partition), all the restrictions for background operations are taken into account. The query initiates a move on its own and does not wait for background operations to be completed. User will get an error message if not enough free space is available or if any of the required conditions are not met. +User can force moving a part or a partition from one volume to another using the query [ALTER TABLE … MOVE PART\|PARTITION … TO VOLUME\|DISK …](/docs/en/sql-reference/statements/alter/partition.md/#alter_move-partition), all the restrictions for background operations are taken into account. The query initiates a move on its own and does not wait for background operations to be completed. User will get an error message if not enough free space is available or if any of the required conditions are not met. Moving data does not interfere with data replication. Therefore, different storage policies can be specified for the same table on different replicas. After the completion of background merges and mutations, old parts are removed only after a certain amount of time (`old_parts_lifetime`). During this time, they are not moved to other volumes or disks. Therefore, until the parts are finally removed, they are still taken into account for evaluation of the occupied disk space. -User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](../../../operations/settings/merge-tree-settings.md#min-bytes-to-rebalance-partition-over-jbod) setting. +User can assign new big parts to different disks of a [JBOD](https://en.wikipedia.org/wiki/Non-RAID_drive_architectures) volume in a balanced way using the [min_bytes_to_rebalance_partition_over_jbod](/docs/en/operations/settings/merge-tree-settings.md/#min-bytes-to-rebalance-partition-over-jbod) setting. ## Using S3 for Data Storage {#table_engine-mergetree-s3} diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 06faceab8ec..0e208629c2e 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -20,7 +20,7 @@ Replication works at the level of an individual table, not the entire server. A Replication does not depend on sharding. Each shard has its own independent replication. -Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](../../../sql-reference/statements/alter/index.md#query_language_queries_alter)). +Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/overview.md/#query_language_queries_alter)). `CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated: @@ -28,9 +28,9 @@ Compressed data for `INSERT` and `ALTER` queries is replicated (for more informa - The `DROP TABLE` query deletes the replica located on the server where the query is run. - The `RENAME` query renames the table on one of the replicas. In other words, replicated tables can have different names on different replicas. -ClickHouse uses [ClickHouse Keeper](../../../guides/sre/keeper/clickhouse-keeper.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended. +ClickHouse uses [ClickHouse Keeper](/docs/en/guides/sre/keeper/clickhouse-keeper.md) for storing replicas meta information. It is possible to use ZooKeeper version 3.4.5 or newer, but ClickHouse Keeper is recommended. -To use replication, set parameters in the [zookeeper](../../../operations/server-configuration-parameters/settings.md#server-settings_zookeeper) server configuration section. +To use replication, set parameters in the [zookeeper](/docs/en/operations/server-configuration-parameters/settings.md/#server-settings_zookeeper) server configuration section. :::warning Don’t neglect the security setting. ClickHouse supports the `digest` [ACL scheme](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl) of the ZooKeeper security subsystem. @@ -95,21 +95,21 @@ You can specify any existing ZooKeeper cluster and the system will use a directo If ZooKeeper isn’t set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only. -ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](../../../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](../../../operations/settings/settings.md#settings-fallback_to_stale_replicas_for_distributed_queries). +ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-fallback_to_stale_replicas_for_distributed_queries). For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data. For very large clusters, you can use different ZooKeeper clusters for different shards. However, from our experience this has not proven necessary based on production clusters with approximately 300 servers. -Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting. +Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](/docs/en/operations/settings/settings.md/#background_schedule_pool_size) setting. -`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) setting which can be tuned with a server restart. +`ReplicatedMergeTree` engine uses a separate thread pool for replicated fetches. Size of the pool is limited by the [background_fetches_pool_size](/docs/en/operations/settings/settings.md/#background_fetches_pool_size) setting which can be tuned with a server restart. By default, an INSERT query waits for confirmation of writing the data from only one replica. If the data was successfully written to only one replica and the server with this replica ceases to exist, the stored data will be lost. To enable getting confirmation of data writes from multiple replicas, use the `insert_quorum` option. Each block of data is written atomically. The INSERT query is divided into blocks up to `max_insert_block_size = 1048576` rows. In other words, if the `INSERT` query has less than 1048576 rows, it is made atomically. -Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application does not know if the data was written to the DB, so the `INSERT` query can simply be repeated. It does not matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-merge_tree) server settings. +Data blocks are deduplicated. For multiple writes of the same data block (data blocks of the same size containing the same rows in the same order), the block is only written once. The reason for this is in case of network failures when the client application does not know if the data was written to the DB, so the `INSERT` query can simply be repeated. It does not matter which replica INSERTs were sent to with identical data. `INSERTs` are idempotent. Deduplication parameters are controlled by [merge_tree](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-merge_tree) server settings. During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.) @@ -165,7 +165,7 @@ CREATE TABLE table_name -As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the [macros](../../../operations/server-configuration-parameters/settings.md#macros) section of the configuration file. +As the example shows, these parameters can contain substitutions in curly brackets. The substituted values are taken from the [macros](/docs/en/operations/server-configuration-parameters/settings.md/#macros) section of the configuration file. Example: @@ -295,10 +295,10 @@ If the data in ClickHouse Keeper was lost or damaged, you can save data by movin **See Also** -- [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) -- [background_fetches_pool_size](../../../operations/settings/settings.md#background_fetches_pool_size) -- [execute_merges_on_single_replica_time_threshold](../../../operations/settings/settings.md#execute-merges-on-single-replica-time-threshold) -- [max_replicated_fetches_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth) -- [max_replicated_sends_network_bandwidth](../../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) +- [background_schedule_pool_size](/docs/en/operations/settings/settings.md/#background_schedule_pool_size) +- [background_fetches_pool_size](/docs/en/operations/settings/settings.md/#background_fetches_pool_size) +- [execute_merges_on_single_replica_time_threshold](/docs/en/operations/settings/settings.md/#execute-merges-on-single-replica-time-threshold) +- [max_replicated_fetches_network_bandwidth](/docs/en/operations/settings/merge-tree-settings.md/#max_replicated_fetches_network_bandwidth) +- [max_replicated_sends_network_bandwidth](/docs/en/operations/settings/merge-tree-settings.md/#max_replicated_sends_network_bandwidth) [Original article](https://clickhouse.com/docs/en/operations/table_engines/replication/) diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index 161896e5550..36b2e59bcf8 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -6,10 +6,10 @@ sidebar_label: Join # Join Table Engine -Optional prepared data structure for usage in [JOIN](../../../sql-reference/statements/select/join.md#select-join) operations. +Optional prepared data structure for usage in [JOIN](/docs/en/sql-reference/statements/select/join.md/#select-join) operations. :::note -This is not an article about the [JOIN clause](../../../sql-reference/statements/select/join.md#select-join) itself. +This is not an article about the [JOIN clause](/docs/en/sql-reference/statements/select/join.md/#select-join) itself. ::: ## Creating a Table {#creating-a-table} @@ -22,17 +22,17 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ) ENGINE = Join(join_strictness, join_type, k1[, k2, ...]) ``` -See the detailed description of the [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query) query. +See the detailed description of the [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md/#create-table-query) query. ## Engine Parameters ### join_strictness -`join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types). +`join_strictness` – [JOIN strictness](/docs/en/sql-reference/statements/select/join.md/#select-join-types). ### join_type -`join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types). +`join_type` – [JOIN type](/docs/en/sql-reference/statements/select/join.md/#select-join-types). ### Key columns @@ -55,11 +55,11 @@ You can use `INSERT` queries to add data to the `Join`-engine tables. If the tab Main use-cases for `Join`-engine tables are following: - Place the table to the right side in a `JOIN` clause. -- Call the [joinGet](../../../sql-reference/functions/other-functions.md#joinget) function, which lets you extract data from the table the same way as from a dictionary. +- Call the [joinGet](/docs/en/sql-reference/functions/other-functions.md/#joinget) function, which lets you extract data from the table the same way as from a dictionary. ### Deleting Data {#deleting-data} -`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](../../../sql-reference/statements/alter/index.md#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk. +`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk. ### Limitations and Settings {#join-limitations-and-settings} @@ -67,30 +67,30 @@ When creating a table, the following settings are applied: #### join_use_nulls -[join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) +[join_use_nulls](/docs/en/operations/settings/settings.md/#join_use_nulls) #### max_rows_in_join -[max_rows_in_join](../../../operations/settings/query-complexity.md#settings-max_rows_in_join) +[max_rows_in_join](/docs/en/operations/settings/query-complexity.md/#settings-max_rows_in_join) #### max_bytes_in_join -[max_bytes_in_join](../../../operations/settings/query-complexity.md#settings-max_bytes_in_join) +[max_bytes_in_join](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join) #### join_overflow_mode -[join_overflow_mode](../../../operations/settings/query-complexity.md#settings-join_overflow_mode) +[join_overflow_mode](/docs/en/operations/settings/query-complexity.md/#settings-join_overflow_mode) #### join_any_take_last_row -[join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row) +[join_any_take_last_row](/docs/en/operations/settings/settings.md/#settings-join_any_take_last_row) #### join_use_nulls -[persistent](../../../operations/settings/settings.md#persistent) +[persistent](/docs/en/operations/settings/settings.md/#persistent) The `Join`-engine tables can’t be used in `GLOBAL JOIN` operations. -The `Join`-engine allows to specify [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls) setting in the `CREATE TABLE` statement. [SELECT](../../../sql-reference/statements/select/index.md) query should have the same `join_use_nulls` value. +The `Join`-engine allows to specify [join_use_nulls](/docs/en/operations/settings/settings.md/#join_use_nulls) setting in the `CREATE TABLE` statement. [SELECT](/docs/en/sql-reference/statements/select/index.md) query should have the same `join_use_nulls` value. ## Usage Examples {#example} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 7f7c14817ba..38077a2b891 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -70,7 +70,7 @@ Another use case of `prefer_global_in_and_join` is accessing tables created by **See also:** -- [Distributed subqueries](../../sql-reference/operators/in.md#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN` +- [Distributed subqueries](../../sql-reference/operators/in.md/#select-distributed-subqueries) for more information on how to use `GLOBAL IN`/`GLOBAL JOIN` ## enable_optimize_predicate_expression {#enable-optimize-predicate-expression} @@ -170,7 +170,7 @@ It makes sense to disable it if the server has millions of tiny tables that are ## function_range_max_elements_in_block {#settings-function_range_max_elements_in_block} -Sets the safety threshold for data volume generated by function [range](../../sql-reference/functions/array-functions.md#range). Defines the maximum number of values generated by function per block of data (sum of array sizes for every row in a block). +Sets the safety threshold for data volume generated by function [range](../../sql-reference/functions/array-functions.md/#range). Defines the maximum number of values generated by function per block of data (sum of array sizes for every row in a block). Possible values: @@ -273,10 +273,10 @@ Default value: 0. ## insert_null_as_default {#insert_null_as_default} -Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md#create-default-values) instead of [NULL](../../sql-reference/syntax.md#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) data type. +Enables or disables the insertion of [default values](../../sql-reference/statements/create/table.md/#create-default-values) instead of [NULL](../../sql-reference/syntax.md/#null-literal) into columns with not [nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable) data type. If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. -This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause. +This setting is applicable to [INSERT ... SELECT](../../sql-reference/statements/insert-into.md/#insert_query_insert-select) queries. Note that `SELECT` subqueries may be concatenated with `UNION ALL` clause. Possible values: @@ -287,7 +287,7 @@ Default value: `1`. ## join_default_strictness {#settings-join_default_strictness} -Sets default strictness for [JOIN clauses](../../sql-reference/statements/select/join.md#select-join). +Sets default strictness for [JOIN clauses](../../sql-reference/statements/select/join.md/#select-join). Possible values: @@ -322,7 +322,7 @@ When using `partial_merge` algorithm, ClickHouse sorts the data and dumps it to - `direct` - can be applied when the right storage supports key-value requests. -The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs. +The `direct` algorithm performs a lookup in the right table using rows from the left table as keys. It's supported only by special storage such as [Dictionary](../../engines/table-engines/special/dictionary.md/#dictionary) or [EmbeddedRocksDB](../../engines/table-engines/integrations/embedded-rocksdb.md) and only the `LEFT` and `INNER` JOINs. - `auto` — try `hash` join and switch on the fly to another algorithm if the memory limit is violated. @@ -348,7 +348,7 @@ Default value: 0. See also: -- [JOIN clause](../../sql-reference/statements/select/join.md#select-join) +- [JOIN clause](../../sql-reference/statements/select/join.md/#select-join) - [Join table engine](../../engines/table-engines/special/join.md) - [join_default_strictness](#settings-join_default_strictness) @@ -359,7 +359,7 @@ Sets the type of [JOIN](../../sql-reference/statements/select/join.md) behaviour Possible values: - 0 — The empty cells are filled with the default value of the corresponding field type. -- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable), and empty cells are filled with [NULL](../../sql-reference/syntax.md). +- 1 — `JOIN` behaves the same way as in standard SQL. The type of the corresponding field is converted to [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable), and empty cells are filled with [NULL](../../sql-reference/syntax.md). Default value: 0. @@ -431,7 +431,7 @@ Default value: 0. See also: -- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings) +- [JOIN strictness](../../sql-reference/statements/select/join.md/#join-settings) ## temporary_files_codec {#temporary_files_codec} @@ -532,7 +532,7 @@ Default value: 8. If ClickHouse should read more than `merge_tree_max_rows_to_use_cache` rows in one query, it does not use the cache of uncompressed blocks. -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. Possible values: @@ -544,7 +544,7 @@ Default value: 128 ✕ 8192. If ClickHouse should read more than `merge_tree_max_bytes_to_use_cache` bytes in one query, it does not use the cache of uncompressed blocks. -The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. +The cache of uncompressed blocks stores data extracted for queries. ClickHouse uses this cache to speed up responses to repeated small queries. This setting protects the cache from trashing by queries that read a large amount of data. The [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) server setting defines the size of the cache of uncompressed blocks. Possible values: @@ -594,7 +594,7 @@ Default value: `1`. Setting up query logging. -Queries sent to ClickHouse with this setup are logged according to the rules in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server configuration parameter. +Queries sent to ClickHouse with this setup are logged according to the rules in the [query_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query-log) server configuration parameter. Example: @@ -639,7 +639,7 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING' Setting up query threads logging. -Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter. +Query threads log into [system.query_thread_log](../../operations/system-tables/query_thread_log.md) table. This setting have effect only when [log_queries](#settings-log-queries) is true. Queries’ threads run by ClickHouse with this setup are logged according to the rules in the [query_thread_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query_thread_log) server configuration parameter. Possible values: @@ -658,7 +658,7 @@ log_query_threads=1 Setting up query views logging. -When a query run by ClickHouse with this setup on has associated views (materialized or live views), they are logged in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server configuration parameter. +When a query run by ClickHouse with this setup on has associated views (materialized or live views), they are logged in the [query_views_log](../../operations/server-configuration-parameters/settings.md/#server_configuration_parameters-query_views_log) server configuration parameter. Example: @@ -884,7 +884,7 @@ Default value: `5`. ## max_replicated_fetches_network_bandwidth_for_server {#max_replicated_fetches_network_bandwidth_for_server} -Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_fetches_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_fetches_network_bandwidth) setting. +Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) fetches for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_fetches_network_bandwidth](../../operations/settings/merge-tree-settings.md/#max_replicated_fetches_network_bandwidth) setting. The setting isn't followed perfectly accurately. @@ -905,7 +905,7 @@ Could be used for throttling speed when replicating the data to add or replace n ## max_replicated_sends_network_bandwidth_for_server {#max_replicated_sends_network_bandwidth_for_server} -Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md#max_replicated_sends_network_bandwidth) setting. +Limits the maximum speed of data exchange over the network in bytes per second for [replicated](../../engines/table-engines/mergetree-family/replication.md) sends for the server. Only has meaning at server startup. You can also limit the speed for a particular table with [max_replicated_sends_network_bandwidth](../../operations/settings/merge-tree-settings.md/#max_replicated_sends_network_bandwidth) setting. The setting isn't followed perfectly accurately. @@ -955,7 +955,7 @@ For more information, see the section “Extreme values”. ## kafka_max_wait_ms {#kafka-max-wait-ms} -The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) before retry. +The wait time in milliseconds for reading messages from [Kafka](../../engines/table-engines/integrations/kafka.md/#kafka) before retry. Possible values: @@ -977,7 +977,7 @@ Default value: false. ## use_uncompressed_cache {#setting-use_uncompressed_cache} Whether to use a cache of uncompressed blocks. Accepts 0 or 1. By default, 0 (disabled). -Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. +Using the uncompressed cache (only for tables in the MergeTree family) can significantly reduce latency and increase throughput when working with a large number of short queries. Enable this setting for users who send frequent short requests. Also pay attention to the [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md/#server-settings-uncompressed_cache_size) configuration parameter (only set in the config file) – the size of uncompressed cache blocks. By default, it is 8 GiB. The uncompressed cache is filled in as needed and the least-used data is automatically deleted. For queries that read at least a somewhat large volume of data (one million rows or more), the uncompressed cache is disabled automatically to save space for truly small queries. This means that you can keep the ‘use_uncompressed_cache’ setting always set to 1. @@ -1124,7 +1124,7 @@ This setting is useful for replicated tables with a sampling key. A query may be - The cluster latency distribution has a long tail, so that querying more servers increases the query overall latency. :::warning -This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md#max_parallel_replica-subqueries) for more details. +This setting will produce incorrect results when joins or subqueries are involved, and all tables don't meet certain requirements. See [Distributed Subqueries and max_parallel_replicas](../../sql-reference/operators/in.md/#max_parallel_replica-subqueries) for more details. ::: ## compile_expressions {#compile-expressions} @@ -1261,7 +1261,7 @@ Possible values: Default value: 1. By default, blocks inserted into replicated tables by the `INSERT` statement are deduplicated (see [Data Replication](../../engines/table-engines/mergetree-family/replication.md)). -For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). +For the replicated tables by default the only 100 of the most recent blocks for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window). ## deduplicate_blocks_in_dependent_materialized_views {#settings-deduplicate-blocks-in-dependent-materialized-views} @@ -1296,7 +1296,7 @@ Default value: empty string (disabled) `insert_deduplication_token` is used for deduplication _only_ when not empty. -For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). +For the replicated tables by default the only 100 of the most recent inserts for each partition are deduplicated (see [replicated_deduplication_window](merge-tree-settings.md/#replicated-deduplication-window), [replicated_deduplication_window_seconds](merge-tree-settings.md/#replicated-deduplication-window-seconds)). For not replicated tables see [non_replicated_deduplication_window](merge-tree-settings.md/#non-replicated-deduplication-window). Example: @@ -1373,15 +1373,15 @@ Default value: 0. ## count_distinct_implementation {#settings-count_distinct_implementation} -Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) construction. +Specifies which of the `uniq*` functions should be used to perform the [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md/#agg_function-count) construction. Possible values: -- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) -- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) -- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) -- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) -- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) +- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md/#agg_function-uniq) +- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md/#agg_function-uniqcombined) +- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md/#agg_function-uniqcombined64) +- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md/#agg_function-uniqhll12) +- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md/#agg_function-uniqexact) Default value: `uniqExact`. @@ -1616,14 +1616,14 @@ Enables or disables optimization by transforming some functions to reading subco These functions can be transformed: -- [length](../../sql-reference/functions/array-functions.md#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. -- [empty](../../sql-reference/functions/array-functions.md#function-empty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. -- [notEmpty](../../sql-reference/functions/array-functions.md#function-notempty) to read the [size0](../../sql-reference/data-types/array.md#array-size) subcolumn. -- [isNull](../../sql-reference/operators/index.md#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. -- [isNotNull](../../sql-reference/operators/index.md#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. -- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md#finding-null) subcolumn. -- [mapKeys](../../sql-reference/functions/tuple-map-functions.md#mapkeys) to read the [keys](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn. -- [mapValues](../../sql-reference/functions/tuple-map-functions.md#mapvalues) to read the [values](../../sql-reference/data-types/map.md#map-subcolumns) subcolumn. +- [length](../../sql-reference/functions/array-functions.md/#array_functions-length) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn. +- [empty](../../sql-reference/functions/array-functions.md/#function-empty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn. +- [notEmpty](../../sql-reference/functions/array-functions.md/#function-notempty) to read the [size0](../../sql-reference/data-types/array.md/#array-size) subcolumn. +- [isNull](../../sql-reference/operators/index.md/#operator-is-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn. +- [isNotNull](../../sql-reference/operators/index.md/#is-not-null) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn. +- [count](../../sql-reference/aggregate-functions/reference/count.md) to read the [null](../../sql-reference/data-types/nullable.md/#finding-null) subcolumn. +- [mapKeys](../../sql-reference/functions/tuple-map-functions.md/#mapkeys) to read the [keys](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn. +- [mapValues](../../sql-reference/functions/tuple-map-functions.md/#mapvalues) to read the [values](../../sql-reference/data-types/map.md/#map-subcolumns) subcolumn. Possible values: @@ -1782,7 +1782,7 @@ Default value: 1000000000 nanoseconds (once a second). See also: -- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) +- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log) ## query_profiler_cpu_time_period_ns {#query_profiler_cpu_time_period_ns} @@ -1805,7 +1805,7 @@ Default value: 1000000000 nanoseconds. See also: -- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) +- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log) ## allow_introspection_functions {#settings-allow_introspection_functions} @@ -1821,11 +1821,11 @@ Default value: 0. **See Also** - [Sampling Query Profiler](../../operations/optimizing-performance/sampling-query-profiler.md) -- System table [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) +- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log) ## input_format_parallel_parsing {#input-format-parallel-parsing} -Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) and [JSONEachRow](../../interfaces/formats.md#jsoneachrow) formats. +Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats. Possible values: @@ -1836,7 +1836,7 @@ Default value: `1`. ## output_format_parallel_formatting {#output-format-parallel-formatting} -Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) and [JSONEachRow](../../interfaces/formats.md#jsoneachrow) formats. +Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats. Possible values: @@ -1878,7 +1878,7 @@ Default value: 0. ## insert_distributed_sync {#insert_distributed_sync} -Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table. +Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table. By default, when inserting data into a `Distributed` table, the ClickHouse server sends data to cluster nodes in asynchronous mode. When `insert_distributed_sync=1`, the data is processed synchronously, and the `INSERT` operation succeeds only after all the data is saved on all shards (at least one replica for each shard if `internal_replication` is true). @@ -1891,12 +1891,12 @@ Default value: `0`. **See Also** -- [Distributed Table Engine](../../engines/table-engines/special/distributed.md#distributed) -- [Managing Distributed Tables](../../sql-reference/statements/system.md#query-language-system-distributed) +- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed) +- [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed) ## insert_shard_id {#insert_shard_id} -If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md#distributed) table into which the data will be inserted synchronously. +If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously. If `insert_shard_id` value is incorrect, the server will throw an exception. @@ -1909,7 +1909,7 @@ SELECT uniq(shard_num) FROM system.clusters WHERE cluster = 'requested_cluster'; Possible values: - 0 — Disabled. -- Any number from `1` to `shards_num` of corresponding [Distributed](../../engines/table-engines/special/distributed.md#distributed) table. +- Any number from `1` to `shards_num` of corresponding [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table. Default value: `0`. @@ -1969,7 +1969,7 @@ Default value: 16. ## background_move_pool_size {#background_move_pool_size} -Sets the number of threads performing background moves of data parts for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes)-engine tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. +Sets the number of threads performing background moves of data parts for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-multiple-volumes)-engine tables. This setting is applied at the ClickHouse server start and can’t be changed in a user session. Possible values: @@ -1979,7 +1979,7 @@ Default value: 8. ## background_schedule_pool_size {#background_schedule_pool_size} -Sets the number of threads performing background tasks for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables, [Kafka](../../engines/table-engines/integrations/kafka.md) streaming, [DNS cache updates](../../operations/server-configuration-parameters/settings.md#server-settings-dns-cache-update-period). This setting is applied at ClickHouse server start and can’t be changed in a user session. +Sets the number of threads performing background tasks for [replicated](../../engines/table-engines/mergetree-family/replication.md) tables, [Kafka](../../engines/table-engines/integrations/kafka.md) streaming, [DNS cache updates](../../operations/server-configuration-parameters/settings.md/#server-settings-dns-cache-update-period). This setting is applied at ClickHouse server start and can’t be changed in a user session. Possible values: @@ -2036,12 +2036,12 @@ Default value: 16. **See Also** -- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) engine. -- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md#rabbitmq-engine) engine. +- [Kafka](../../engines/table-engines/integrations/kafka.md/#kafka) engine. +- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md/#rabbitmq-engine) engine. ## validate_polygons {#validate_polygons} -Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo/index.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent. +Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo/index.md/#pointinpolygon) function, if the polygon is self-intersecting or self-tangent. Possible values: @@ -2052,7 +2052,7 @@ Default value: 1. ## transform_null_in {#transform_null_in} -Enables equality of [NULL](../../sql-reference/syntax.md#null-literal) values for [IN](../../sql-reference/operators/in.md) operator. +Enables equality of [NULL](../../sql-reference/syntax.md/#null-literal) values for [IN](../../sql-reference/operators/in.md) operator. By default, `NULL` values can’t be compared because `NULL` means undefined value. Thus, comparison `expr = NULL` must always return `false`. With this setting `NULL = NULL` returns `true` for `IN` operator. @@ -2106,7 +2106,7 @@ Result: **See Also** -- [NULL Processing in IN Operators](../../sql-reference/operators/in.md#in-null-processing) +- [NULL Processing in IN Operators](../../sql-reference/operators/in.md/#in-null-processing) ## low_cardinality_max_dictionary_size {#low_cardinality_max_dictionary_size} @@ -2133,7 +2133,7 @@ Default value: 0. ## low_cardinality_allow_in_native_format {#low_cardinality_allow_in_native_format} -Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md#native) format. +Allows or restricts using the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) data type with the [Native](../../interfaces/formats.md/#native) format. If usage of `LowCardinality` is restricted, ClickHouse server converts `LowCardinality`-columns to ordinary ones for `SELECT` queries, and convert ordinary columns to `LowCardinality`-columns for `INSERT` queries. @@ -2197,7 +2197,7 @@ Default value: 268435456. ## optimize_read_in_order {#optimize_read_in_order} -Enables [ORDER BY](../../sql-reference/statements/select/order-by.md#optimize_read_in_order) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries for reading data from [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. +Enables [ORDER BY](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries for reading data from [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. Possible values: @@ -2208,7 +2208,7 @@ Default value: `1`. **See Also** -- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md#optimize_read_in_order) +- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order) ## optimize_aggregation_in_order {#optimize_aggregation_in_order} @@ -2223,11 +2223,11 @@ Default value: `0`. **See Also** -- [GROUP BY optimization](../../sql-reference/statements/select/group-by.md#aggregation-in-order) +- [GROUP BY optimization](../../sql-reference/statements/select/group-by.md/#aggregation-in-order) ## mutations_sync {#mutations_sync} -Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously. +Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/overview.md/#mutations)) synchronously. Possible values: @@ -2239,8 +2239,8 @@ Default value: `0`. **See Also** -- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) -- [Mutations](../../sql-reference/statements/alter/index.md#mutations) +- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) +- [Mutations](../../sql-reference/statements/alter/overview.md/#mutations) ## ttl_only_drop_parts {#ttl_only_drop_parts} @@ -2261,8 +2261,8 @@ Default value: `0`. **See Also** -- [CREATE TABLE query clauses and settings](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-query-clauses) (`merge_with_ttl_timeout` setting) -- [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md#mergetree-table-ttl) +- [CREATE TABLE query clauses and settings](../../engines/table-engines/mergetree-family/mergetree.md/#mergetree-query-clauses) (`merge_with_ttl_timeout` setting) +- [Table TTL](../../engines/table-engines/mergetree-family/mergetree.md/#mergetree-table-ttl) ## lock_acquire_timeout {#lock_acquire_timeout} @@ -2279,7 +2279,7 @@ Default value: `120` seconds. ## cast_keep_nullable {#cast_keep_nullable} -Enables or disables keeping of the `Nullable` data type in [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) operations. +Enables or disables keeping of the `Nullable` data type in [CAST](../../sql-reference/functions/type-conversion-functions.md/#type_conversion_function-cast) operations. When the setting is enabled and the argument of `CAST` function is `Nullable`, the result is also transformed to `Nullable` type. When the setting is disabled, the result always has the destination type exactly. @@ -2324,7 +2324,7 @@ Result: **See Also** -- [CAST](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function +- [CAST](../../sql-reference/functions/type-conversion-functions.md/#type_conversion_function-cast) function ## system_events_show_zero_values {#system_events_show_zero_values} @@ -2369,7 +2369,7 @@ Result ## persistent {#persistent} -Disables persistency for the [Set](../../engines/table-engines/special/set.md#set) and [Join](../../engines/table-engines/special/join.md#join) table engines. +Disables persistency for the [Set](../../engines/table-engines/special/set.md/#set) and [Join](../../engines/table-engines/special/join.md/#join) table engines. Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence. @@ -2382,7 +2382,7 @@ Default value: `1`. ## allow_nullable_key {#allow-nullable-key} -Allows using of the [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) tables. +Allows using of the [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md/#table_engines-mergetree) tables. Possible values: @@ -2401,7 +2401,7 @@ Do not enable this feature in version `<= 21.8`. It's not properly implemented a ## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty} -Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility. +Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md/#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility. It is implemented via query rewrite (similar to [count_distinct_implementation](#settings-count_distinct_implementation) setting) to get consistent results for distributed queries. Possible values: @@ -2448,7 +2448,7 @@ See examples in [UNION](../../sql-reference/statements/select/union.md). ## data_type_default_nullable {#data_type_default_nullable} -Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable). +Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable). Possible values: @@ -2478,7 +2478,7 @@ It can be useful when merges are CPU bounded not IO bounded (performing heavy da ## max_final_threads {#max-final-threads} -Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier. +Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier. Possible values: @@ -2551,7 +2551,7 @@ Result: └─────────────┘ ``` -Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour. +Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md/#materialized) and [MaterializedMySQL](../../engines/database-engines/materialized-mysql.md) behaviour. ## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists} @@ -2608,7 +2608,7 @@ Default value: `0`. ## allow_experimental_live_view {#allow-experimental-live-view} -Allows creation of experimental [live views](../../sql-reference/statements/create/view.md#live-view). +Allows creation of experimental [live views](../../sql-reference/statements/create/view.md/#live-view). Possible values: @@ -2619,19 +2619,19 @@ Default value: `0`. ## live_view_heartbeat_interval {#live-view-heartbeat-interval} -Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md#live-view) is alive . +Sets the heartbeat interval in seconds to indicate [live view](../../sql-reference/statements/create/view.md/#live-view) is alive . Default value: `15`. ## max_live_view_insert_blocks_before_refresh {#max-live-view-insert-blocks-before-refresh} -Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md#live-view) is re-executed. +Sets the maximum number of inserted blocks after which mergeable blocks are dropped and query for [live view](../../sql-reference/statements/create/view.md/#live-view) is re-executed. Default value: `64`. ## periodic_live_view_refresh {#periodic-live-view-refresh} -Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md#live-view) is forced to refresh. +Sets the interval in seconds after which periodically refreshed [live view](../../sql-reference/statements/create/view.md/#live-view) is forced to refresh. Default value: `60`. @@ -2670,7 +2670,7 @@ Default value: 180. ## check_query_single_value_result {#check_query_single_value_result} -Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md#checking-mergetree-tables) query result for `MergeTree` family engines . +Defines the level of detail for the [CHECK TABLE](../../sql-reference/statements/check-table.md/#checking-mergetree-tables) query result for `MergeTree` family engines . Possible values: @@ -2681,7 +2681,7 @@ Default value: `0`. ## prefer_column_name_to_alias {#prefer-column-name-to-alias} -Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines. +Enables or disables using the original column names instead of aliases in query expressions and clauses. It especially matters when alias is the same as the column name, see [Expression Aliases](../../sql-reference/syntax.md/#notes-on-usage). Enable this setting to make aliases syntax rules in ClickHouse more compatible with most other database engines. Possible values: @@ -2725,7 +2725,7 @@ Result: ## limit {#limit} -Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting. +Sets the maximum number of rows to get from the query result. It adjusts the value set by the [LIMIT](../../sql-reference/statements/select/limit.md/#limit-clause) clause, so that the limit, specified in the query, cannot exceed the limit, set by this setting. Possible values: @@ -2736,7 +2736,7 @@ Default value: `0`. ## offset {#offset} -Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md#offset-fetch) clause, so that these two values are summarized. +Sets the number of rows to skip before starting to return rows from the query. It adjusts the offset set by the [OFFSET](../../sql-reference/statements/select/offset.md/#offset-fetch) clause, so that these two values are summarized. Possible values: @@ -2773,7 +2773,7 @@ Result: ## optimize_syntax_fuse_functions {#optimize_syntax_fuse_functions} -Enables to fuse aggregate functions with identical argument. It rewrites query contains at least two aggregate functions from [sum](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count) or [avg](../../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg) with identical argument to [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md#agg_function-sumCount). +Enables to fuse aggregate functions with identical argument. It rewrites query contains at least two aggregate functions from [sum](../../sql-reference/aggregate-functions/reference/sum.md/#agg_function-sum), [count](../../sql-reference/aggregate-functions/reference/count.md/#agg_function-count) or [avg](../../sql-reference/aggregate-functions/reference/avg.md/#agg_function-avg) with identical argument to [sumCount](../../sql-reference/aggregate-functions/reference/sumcount.md/#agg_function-sumCount). Possible values: @@ -2932,7 +2932,7 @@ If the setting is set to `0`, the table function does not make Nullable columns ## allow_experimental_projection_optimization {#allow-experimental-projection-optimization} -Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md#projections) optimization when processing `SELECT` queries. +Enables or disables [projection](../../engines/table-engines/mergetree-family/mergetree.md/#projections) optimization when processing `SELECT` queries. Possible values: @@ -2943,7 +2943,7 @@ Default value: `1`. ## force_optimize_projection {#force-optimize-projection} -Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). +Enables or disables the obligatory use of [projections](../../engines/table-engines/mergetree-family/mergetree.md/#projections) in `SELECT` queries, when projection optimization is enabled (see [allow_experimental_projection_optimization](#allow-experimental-projection-optimization) setting). Possible values: @@ -2954,7 +2954,7 @@ Default value: `0`. ## replication_alter_partitions_sync {#replication-alter-partitions-sync} -Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. +Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/overview.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. Possible values: @@ -2966,7 +2966,7 @@ Default value: `1`. ## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout} -Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. +Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/overview.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. Possible values: @@ -2978,7 +2978,7 @@ Default value: `120` seconds. ## regexp_max_matches_per_row {#regexp-max-matches-per-row} -Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md#extractallgroups-horizontal) function. +Sets the maximum number of matches for a single regular expression per row. Use it to protect against memory overload when using greedy regular expression in the [extractAllGroupsHorizontal](../../sql-reference/functions/string-search-functions.md/#extractallgroups-horizontal) function. Possible values: @@ -3010,7 +3010,7 @@ Default value: `1`. ## short_circuit_function_evaluation {#short-circuit-function-evaluation} -Allows calculating the [if](../../sql-reference/functions/conditional-functions.md#if), [multiIf](../../sql-reference/functions/conditional-functions.md#multiif), [and](../../sql-reference/functions/logical-functions.md#logical-and-function), and [or](../../sql-reference/functions/logical-functions.md#logical-or-function) functions according to a [short scheme](https://en.wikipedia.org/wiki/Short-circuit_evaluation). This helps optimize the execution of complex expressions in these functions and prevent possible exceptions (such as division by zero when it is not expected). +Allows calculating the [if](../../sql-reference/functions/conditional-functions.md/#if), [multiIf](../../sql-reference/functions/conditional-functions.md/#multiif), [and](../../sql-reference/functions/logical-functions.md/#logical-and-function), and [or](../../sql-reference/functions/logical-functions.md/#logical-or-function) functions according to a [short scheme](https://en.wikipedia.org/wiki/Short-circuit_evaluation). This helps optimize the execution of complex expressions in these functions and prevent possible exceptions (such as division by zero when it is not expected). Possible values: @@ -3022,7 +3022,7 @@ Default value: `enable`. ## max_hyperscan_regexp_length {#max-hyperscan-regexp-length} -Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn). +Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md/#multimatchanyhaystack-pattern1-pattern2-patternn). Possible values: @@ -3065,7 +3065,7 @@ Exception: Regexp length too large. ## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length} -Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn). +Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md/#multimatchanyhaystack-pattern1-pattern2-patternn). Possible values: @@ -3142,8 +3142,8 @@ Result: ## enable_extended_results_for_datetime_functions {#enable-extended-results-for-datetime-functions} Enables or disables returning results of type: -- `Date32` with extended range (compared to type `Date`) for functions [toStartOfYear](../../sql-reference/functions/date-time-functions.md#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md#tomonday) and [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md#tolastdayofmonth). -- `DateTime64` with extended range (compared to type `DateTime`) for functions [toStartOfDay](../../sql-reference/functions/date-time-functions.md#tostartofday), [toStartOfHour](../../sql-reference/functions/date-time-functions.md#tostartofhour), [toStartOfMinute](../../sql-reference/functions/date-time-functions.md#tostartofminute), [toStartOfFiveMinutes](../../sql-reference/functions/date-time-functions.md#tostartoffiveminutes), [toStartOfTenMinutes](../../sql-reference/functions/date-time-functions.md#tostartoftenminutes), [toStartOfFifteenMinutes](../../sql-reference/functions/date-time-functions.md#tostartoffifteenminutes) and [timeSlot](../../sql-reference/functions/date-time-functions.md#timeslot). +- `Date32` with extended range (compared to type `Date`) for functions [toStartOfYear](../../sql-reference/functions/date-time-functions.md/#tostartofyear), [toStartOfISOYear](../../sql-reference/functions/date-time-functions.md/#tostartofisoyear), [toStartOfQuarter](../../sql-reference/functions/date-time-functions.md/#tostartofquarter), [toStartOfMonth](../../sql-reference/functions/date-time-functions.md/#tostartofmonth), [toStartOfWeek](../../sql-reference/functions/date-time-functions.md/#tostartofweek), [toMonday](../../sql-reference/functions/date-time-functions.md/#tomonday) and [toLastDayOfMonth](../../sql-reference/functions/date-time-functions.md/#tolastdayofmonth). +- `DateTime64` with extended range (compared to type `DateTime`) for functions [toStartOfDay](../../sql-reference/functions/date-time-functions.md/#tostartofday), [toStartOfHour](../../sql-reference/functions/date-time-functions.md/#tostartofhour), [toStartOfMinute](../../sql-reference/functions/date-time-functions.md/#tostartofminute), [toStartOfFiveMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoffiveminutes), [toStartOfTenMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoftenminutes), [toStartOfFifteenMinutes](../../sql-reference/functions/date-time-functions.md/#tostartoffifteenminutes) and [timeSlot](../../sql-reference/functions/date-time-functions.md/#timeslot). Possible values: @@ -3167,7 +3167,7 @@ Default value: `1`. ## optimize_move_to_prewhere_if_final {#optimize_move_to_prewhere_if_final} -Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier. +Enables or disables automatic [PREWHERE](../../sql-reference/statements/select/prewhere.md) optimization in [SELECT](../../sql-reference/statements/select/index.md) queries with [FINAL](../../sql-reference/statements/select/from.md/#select-from-final) modifier. Works only for [*MergeTree](../../engines/table-engines/mergetree-family/index.md) tables. @@ -3184,7 +3184,7 @@ Default value: `0`. ## describe_include_subcolumns {#describe_include_subcolumns} -Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md#finding-null) or an [Array](../../sql-reference/data-types/array.md#array-size) data type. +Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md/#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md/#finding-null) or an [Array](../../sql-reference/data-types/array.md/#array-size) data type. Possible values: @@ -3283,7 +3283,7 @@ Default value: `0`. ## alter_partition_verbose_result {#alter-partition-verbose-result} Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied. -Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition). +Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md/#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md/#alter_freeze-partition). Possible values: @@ -3418,11 +3418,11 @@ When writing data, ClickHouse throws an exception if input data contain columns Supported formats: -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) -- [TSKV](../../interfaces/formats.md#tskv) +- [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) +- [TSKV](../../interfaces/formats.md/#tskv) - All formats with suffixes WithNames/WithNamesAndTypes -- [JSONColumns](../../interfaces/formats.md#jsoncolumns) -- [MySQLDump](../../interfaces/formats.md#mysqldump) +- [JSONColumns](../../interfaces/formats.md/#jsoncolumns) +- [MySQLDump](../../interfaces/formats.md/#mysqldump) Possible values: @@ -3439,18 +3439,18 @@ To improve insert performance, we recommend disabling this check if you are sure Supported formats: -- [CSVWithNames](../../interfaces/formats.md#csvwithnames) -- [CSVWithNamesAndTypes](../../interfaces/formats.md#csvwithnamesandtypes) -- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames) -- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes) -- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames) -- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes) -- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames) -- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes) -- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames) -- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) -- [CustomSeparatedWithNames](../../interfaces/formats.md#customseparatedwithnames) -- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md#customseparatedwithnamesandtypes) +- [CSVWithNames](../../interfaces/formats.md/#csvwithnames) +- [CSVWithNamesAndTypes](../../interfaces/formats.md/#csvwithnamesandtypes) +- [TabSeparatedWithNames](../../interfaces/formats.md/#tabseparatedwithnames) +- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md/#tabseparatedwithnamesandtypes) +- [JSONCompactEachRowWithNames](../../interfaces/formats.md/#jsoncompacteachrowwithnames) +- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompacteachrowwithnamesandtypes) +- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md/#jsoncompactstringseachrowwithnames) +- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompactstringseachrowwithnamesandtypes) +- [RowBinaryWithNames](../../interfaces/formats.md/#rowbinarywithnames) +- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md/#rowbinarywithnamesandtypes) +- [CustomSeparatedWithNames](../../interfaces/formats.md/#customseparatedwithnames) +- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md/#customseparatedwithnamesandtypes) Possible values: @@ -3465,12 +3465,12 @@ Controls whether format parser should check if data types from the input data ma Supported formats: -- [CSVWithNamesAndTypes](../../interfaces/formats.md#csvwithnamesandtypes) -- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes) -- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes) -- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes) -- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes) -- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md#customseparatedwithnamesandtypes) +- [CSVWithNamesAndTypes](../../interfaces/formats.md/#csvwithnamesandtypes) +- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md/#tabseparatedwithnamesandtypes) +- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompacteachrowwithnamesandtypes) +- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md/#jsoncompactstringseachrowwithnamesandtypes) +- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md/#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes) +- [CustomSeparatedWithNamesAndTypes](../../interfaces/formats.md/#customseparatedwithnamesandtypes) Possible values: @@ -3481,7 +3481,7 @@ Default value: 1. ## input_format_defaults_for_omitted_fields {#input_format_defaults_for_omitted_fields} -When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv), [TabSeparated](../../interfaces/formats.md#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes. +When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow), [CSV](../../interfaces/formats.md/#csv), [TabSeparated](../../interfaces/formats.md/#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes. :::note When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance. @@ -3496,7 +3496,7 @@ Default value: 1. ## input_format_null_as_default {#input_format_null_as_default} -Enables or disables the initialization of [NULL](../../sql-reference/syntax.md#null-literal) fields with [default values](../../sql-reference/statements/create/table.md#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md#data_type-nullable). +Enables or disables the initialization of [NULL](../../sql-reference/syntax.md/#null-literal) fields with [default values](../../sql-reference/statements/create/table.md/#create-default-values), if data type of these fields is not [nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable). If column type is not nullable and this setting is disabled, then inserting `NULL` causes an exception. If column type is nullable, then `NULL` values are inserted as is, regardless of this setting. This setting is applicable to [INSERT ... VALUES](../../sql-reference/statements/insert-into.md) queries for text input formats. @@ -3663,7 +3663,7 @@ Enabled by default ## insert_distributed_one_random_shard {#insert_distributed_one_random_shard} -Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table when there is no distributed key. +Enables or disables random shard insertion into a [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table when there is no distributed key. By default, when inserting data into a `Distributed` table with more than one shard, the ClickHouse server will reject any insertion request if there is no distributed key. When `insert_distributed_one_random_shard = 1`, insertions are allowed and data is forwarded randomly among all shards. @@ -3682,7 +3682,7 @@ Enables or disables the insertion of JSON data with nested objects. Supported formats: -- [JSONEachRow](../../interfaces/formats.md#jsoneachrow) +- [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) Possible values: @@ -3693,7 +3693,7 @@ Default value: 0. See also: -- [Usage of Nested Structures](../../interfaces/formats.md#jsoneachrow-nested) with the `JSONEachRow` format. +- [Usage of Nested Structures](../../interfaces/formats.md/#jsoneachrow-nested) with the `JSONEachRow` format. ### input_format_json_read_bools_as_numbers {#input_format_json_read_bools_as_numbers} @@ -3716,7 +3716,7 @@ Enabled by default. ### output_format_json_quote_64bit_integers {#output_format_json_quote_64bit_integers} -Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format. +Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md/#json) format. Such integers are enclosed in quotes by default. This behavior is compatible with most JavaScript implementations. Possible values: @@ -3734,7 +3734,7 @@ Disabled by default. ### output_format_json_quote_denormals {#output_format_json_quote_denormals} -Enables `+nan`, `-nan`, `+inf`, `-inf` outputs in [JSON](../../interfaces/formats.md#json) output format. +Enables `+nan`, `-nan`, `+inf`, `-inf` outputs in [JSON](../../interfaces/formats.md/#json) output format. Possible values: @@ -3851,7 +3851,7 @@ Disabled by default. ### output_format_json_array_of_rows {#output_format_json_array_of_rows} -Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md#jsoneachrow) format. +Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) format. Possible values: @@ -3904,7 +3904,7 @@ Disabled by default. ### format_json_object_each_row_column_for_object_name {#format_json_object_each_row_column_for_object_name} -The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md#jsonobjecteachrow) format. +The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md/#jsonobjecteachrow) format. Column type should be String. If value is empty, default names `row_{i}`will be used for object names. Default value: ''. @@ -4005,7 +4005,7 @@ Disabled by default. ### format_tsv_null_representation {#format_tsv_null_representation} -Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`. +Defines the representation of `NULL` for [TSV](../../interfaces/formats.md/#tabseparated) output and input formats. User can set any string as a value, for example, `My NULL`. Default value: `\N`. @@ -4159,7 +4159,7 @@ Default value: `0`. ### format_csv_null_representation {#format_csv_null_representation} -Defines the representation of `NULL` for [CSV](../../interfaces/formats.md#csv) output and input formats. User can set any string as a value, for example, `My NULL`. +Defines the representation of `NULL` for [CSV](../../interfaces/formats.md/#csv) output and input formats. User can set any string as a value, for example, `My NULL`. Default value: `\N`. @@ -4198,7 +4198,7 @@ My NULL ### input_format_values_interpret_expressions {#input_format_values_interpret_expressions} -Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section. +Enables or disables the full SQL parser if the fast stream parser can’t parse the data. This setting is used only for the [Values](../../interfaces/formats.md/#data-format-values) format at the data insertion. For more information about syntax parsing, see the [Syntax](../../sql-reference/syntax.md) section. Possible values: @@ -4248,7 +4248,7 @@ Ok. ### input_format_values_deduce_templates_of_expressions {#input_format_values_deduce_templates_of_expressions} -Enables or disables template deduction for SQL expressions in [Values](../../interfaces/formats.md#data-format-values) format. It allows parsing and interpreting expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse tries to deduce the template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows. +Enables or disables template deduction for SQL expressions in [Values](../../interfaces/formats.md/#data-format-values) format. It allows parsing and interpreting expressions in `Values` much faster if expressions in consecutive rows have the same structure. ClickHouse tries to deduce the template of an expression, parse the following rows using this template and evaluate the expression on a batch of successfully parsed rows. Possible values: @@ -4293,7 +4293,7 @@ Default value: 1. ### input_format_arrow_import_nested {#input_format_arrow_import_nested} -Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md#data_types-matching-arrow) input format. +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Arrow](../../interfaces/formats.md/#data_types-matching-arrow) input format. Possible values: @@ -4322,7 +4322,7 @@ Disabled by default. ### output_format_arrow_low_cardinality_as_dictionary {#output_format_arrow_low_cardinality_as_dictionary} -Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md#data-format-arrow) format for `SELECT` queries. +Allows to convert the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) type to the `DICTIONARY` type of the [Arrow](../../interfaces/formats.md/#data-format-arrow) format for `SELECT` queries. Possible values: @@ -4341,7 +4341,7 @@ Disabled by default. ### input_format_orc_import_nested {#input_format_orc_import_nested} -Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md#data-format-orc) input format. +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [ORC](../../interfaces/formats.md/#data-format-orc) input format. Possible values: @@ -4384,7 +4384,7 @@ Disabled by default. ## input_format_parquet_import_nested {#input_format_parquet_import_nested} -Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md#data-format-parquet) input format. +Enables or disables the ability to insert the data into [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) columns as an array of structs in [Parquet](../../interfaces/formats.md/#data-format-parquet) input format. Possible values: @@ -4481,7 +4481,7 @@ Disabled by default. ### input_format_avro_allow_missing_fields {#input_format_avro_allow_missing_fields} -Enables using fields that are not specified in [Avro](../../interfaces/formats.md#data-format-avro) or [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format schema. When a field is not found in the schema, ClickHouse uses the default value instead of throwing an exception. +Enables using fields that are not specified in [Avro](../../interfaces/formats.md/#data-format-avro) or [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format schema. When a field is not found in the schema, ClickHouse uses the default value instead of throwing an exception. Possible values: @@ -4492,7 +4492,7 @@ Default value: 0. ### format_avro_schema_registry_url {#format_avro_schema_registry_url} -Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent) format. +Sets [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html) URL to use with [AvroConfluent](../../interfaces/formats.md/#data-format-avro-confluent) format. Default value: `Empty`. @@ -4549,7 +4549,7 @@ Default value: `250`. ### output_format_pretty_max_value_width {#output_format_pretty_max_value_width} -Limits the width of value displayed in [Pretty](../../interfaces/formats.md#pretty) formats. If the value width exceeds the limit, the value is cut. +Limits the width of value displayed in [Pretty](../../interfaces/formats.md/#pretty) formats. If the value width exceeds the limit, the value is cut. Possible values: @@ -4625,7 +4625,7 @@ SELECT * FROM a; ### output_format_pretty_row_numbers {#output_format_pretty_row_numbers} -Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format. +Adds row numbers to output in the [Pretty](../../interfaces/formats.md/#pretty) format. Possible values: @@ -4670,52 +4670,52 @@ Delimiter between rows (for Template format). ### format_custom_escaping_rule {#format_custom_escaping_rule} -Sets the field escaping rule for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the field escaping rule for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Possible values: -- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md#tabseparated). -- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md#data-format-values). -- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md#csv). -- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md#jsoneachrow). -- `'XML'` — Similarly to [XML](../../interfaces/formats.md#xml). -- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md#tabseparatedraw). +- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md/#tabseparated). +- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md/#data-format-values). +- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md/#csv). +- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow). +- `'XML'` — Similarly to [XML](../../interfaces/formats.md/#xml). +- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md/#tabseparatedraw). Default value: `'Escaped'`. ### format_custom_field_delimiter {#format_custom_field_delimiter} -Sets the character that is interpreted as a delimiter between the fields for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a delimiter between the fields for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `'\t'`. ### format_custom_row_before_delimiter {#format_custom_row_before_delimiter} -Sets the character that is interpreted as a delimiter before the field of the first column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a delimiter before the field of the first column for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `''`. ### format_custom_row_after_delimiter {#format_custom_row_after_delimiter} -Sets the character that is interpreted as a delimiter after the field of the last column for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a delimiter after the field of the last column for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `'\n'`. ### format_custom_row_between_delimiter {#format_custom_row_between_delimiter} -Sets the character that is interpreted as a delimiter between the rows for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a delimiter between the rows for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `''`. ### format_custom_result_before_delimiter {#format_custom_result_before_delimiter} -Sets the character that is interpreted as a prefix before the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a prefix before the result set for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `''`. ### format_custom_result_after_delimiter {#format_custom_result_after_delimiter} -Sets the character that is interpreted as a suffix after the result set for [CustomSeparated](../../interfaces/formats.md#format-customseparated) data format. +Sets the character that is interpreted as a suffix after the result set for [CustomSeparated](../../interfaces/formats.md/#format-customseparated) data format. Default value: `''`. @@ -4727,12 +4727,12 @@ Field escaping rule. Possible values: -- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md#tabseparated). -- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md#data-format-values). -- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md#csv). -- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md#jsoneachrow). -- `'XML'` — Similarly to [XML](../../interfaces/formats.md#xml). -- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md#tabseparatedraw). +- `'Escaped'` — Similarly to [TSV](../../interfaces/formats.md/#tabseparated). +- `'Quoted'` — Similarly to [Values](../../interfaces/formats.md/#data-format-values). +- `'CSV'` — Similarly to [CSV](../../interfaces/formats.md/#csv). +- `'JSON'` — Similarly to [JSONEachRow](../../interfaces/formats.md/#jsoneachrow). +- `'XML'` — Similarly to [XML](../../interfaces/formats.md/#xml). +- `'Raw'` — Extracts subpatterns as a whole, no escaping rules, similarly to [TSVRaw](../../interfaces/formats.md/#tabseparatedraw). Default value: `Raw`. @@ -4746,7 +4746,7 @@ Disabled by default. ### format_capn_proto_enum_comparising_mode {#format_capn_proto_enum_comparising_mode} -Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema. +Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md/#capnproto) `Enum` data type from schema. Possible values: diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 43623577e66..c0acd544fa9 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -7,13 +7,13 @@ title: "External Disks for Storing Data" Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)). -To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine. +To work with data stored on `Amazon S3` disks use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine. To load data from a web server with static files use a disk with type [web](#storing-data-on-webserver). ## Configuring HDFS {#configuring-hdfs} -[MergeTree](../engines/table-engines/mergetree-family/mergetree.md) and [Log](../engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`. +[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to HDFS using a disk with type `HDFS`. Configuration markup: @@ -53,7 +53,7 @@ Optional parameters: ## Using Virtual File System for Data Encryption {#encrypted-virtual-file-system} -You can encrypt the data stored on [S3](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one. +You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one. Example of disk configuration: @@ -80,14 +80,14 @@ Required parameters: - `type` — `encrypted`. Otherwise the encrypted disk is not created. - `disk` — Type of disk for data storage. -- `key` — The key for encryption and decryption. Type: [Uint64](../sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encrypt in hexadecimal form. +- `key` — The key for encryption and decryption. Type: [Uint64](/docs/en/sql-reference/data-types/int-uint.md). You can use `key_hex` parameter to encrypt in hexadecimal form. You can specify multiple keys using the `id` attribute (see example above). Optional parameters: - `path` — Path to the location on the disk where the data will be saved. If not specified, the data will be saved in the root directory. - `current_key_id` — The key used for encryption. All the specified keys can be used for decryption, and you can always switch to another key while maintaining access to previously encrypted data. -- `algorithm` — [Algorithm](../sql-reference/statements/create/table.md#create-query-encryption-codecs) for encryption. Possible values: `AES_128_CTR`, `AES_192_CTR` or `AES_256_CTR`. Default value: `AES_128_CTR`. The key length depends on the algorithm: `AES_128_CTR` — 16 bytes, `AES_192_CTR` — 24 bytes, `AES_256_CTR` — 32 bytes. +- `algorithm` — [Algorithm](/docs/en/sql-reference/statements/create/table.md/#create-query-encryption-codecs) for encryption. Possible values: `AES_128_CTR`, `AES_192_CTR` or `AES_256_CTR`. Default value: `AES_128_CTR`. The key length depends on the algorithm: `AES_128_CTR` — 16 bytes, `AES_192_CTR` — 24 bytes, `AES_256_CTR` — 32 bytes. Example of disk configuration: @@ -265,9 +265,9 @@ Cache profile events: There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`. -This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](../sql-reference/statements/create/table.md), [ALTER TABLE](../sql-reference/statements/alter/index.md), [RENAME TABLE](../sql-reference/statements/rename.md#misc_operations-rename_table), [DETACH TABLE](../sql-reference/statements/detach.md) and [TRUNCATE TABLE](../sql-reference/statements/truncate.md). +This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/overview.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). -Web server storage is supported only for the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) and [Log](../engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](../engines/table-engines/mergetree-family/mergetree.md#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`. +Web server storage is supported only for the [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`. A ready test case. You need to add this configuration to config: @@ -451,7 +451,7 @@ Optional parameters: - `remote_fs_read_backoff_threashold` — The maximum wait time when trying to read data for remote disk. Default value: `10000` seconds. - `remote_fs_read_backoff_max_tries` — The maximum number of attempts to read with backoff. Default value: `5`. -If a query fails with an exception `DB:Exception Unreachable URL`, then you can try to adjust the settings: [http_connection_timeout](../operations/settings/settings.md#http_connection_timeout), [http_receive_timeout](../operations/settings/settings.md#http_receive_timeout), [keep_alive_timeout](../operations/server-configuration-parameters/settings.md#keep-alive-timeout). +If a query fails with an exception `DB:Exception Unreachable URL`, then you can try to adjust the settings: [http_connection_timeout](/docs/en/operations/settings/settings.md/#http_connection_timeout), [http_receive_timeout](/docs/en/operations/settings/settings.md/#http_receive_timeout), [keep_alive_timeout](/docs/en/operations/server-configuration-parameters/settings.md/#keep-alive-timeout). To get files for upload run: `clickhouse static-files-disk-uploader --metadata-path --output-dir ` (`--metadata-path` can be found in query `SELECT data_paths FROM system.tables WHERE name = 'table_name'`). @@ -460,7 +460,7 @@ When loading files by `endpoint`, they must be loaded into `/store/` p If URL is not reachable on disk load when the server is starting up tables, then all errors are caught. If in this case there were errors, tables can be reloaded (become visible) via `DETACH TABLE table_name` -> `ATTACH TABLE table_name`. If metadata was successfully loaded at server startup, then tables are available straight away. -Use [http_max_single_read_retries](../operations/settings/settings.md#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read. +Use [http_max_single_read_retries](/docs/en/operations/settings/settings.md/#http-max-single-read-retries) setting to limit the maximum number of retries during a single HTTP read. ## Zero-copy Replication (not ready for production) {#zero-copy} diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index 45447f3644e..98237562f37 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -3,31 +3,31 @@ slug: /en/operations/system-tables/mutations --- # mutations -The table contains information about [mutations](../../sql-reference/statements/alter/index.md#mutations) of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. +The table contains information about [mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. Columns: -- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied. +- `database` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the database to which the mutation was applied. -- `table` ([String](../../sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied. +- `table` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the table to which the mutation was applied. -- `mutation_id` ([String](../../sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `/mutations/` directory in ClickHouse Keeper. For non-replicated tables the IDs correspond to file names in the data directory of the table. +- `mutation_id` ([String](/docs/en/sql-reference/data-types/string.md)) — The ID of the mutation. For replicated tables these IDs correspond to znode names in the `/mutations/` directory in ClickHouse Keeper. For non-replicated tables the IDs correspond to file names in the data directory of the table. -- `command` ([String](../../sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`). +- `command` ([String](/docs/en/sql-reference/data-types/string.md)) — The mutation command string (the part of the query after `ALTER TABLE [db.]table`). -- `create_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution. +- `create_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — Date and time when the mutation command was submitted for execution. -- `block_numbers.partition_id` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty. +- `block_numbers.partition_id` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — For mutations of replicated tables, the array contains the partitions' IDs (one record for each partition). For mutations of non-replicated tables the array is empty. -- `block_numbers.number` ([Array](../../sql-reference/data-types/array.md)([Int64](../../sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition. +- `block_numbers.number` ([Array](/docs/en/sql-reference/data-types/array.md)([Int64](/docs/en/sql-reference/data-types/int-uint.md))) — For mutations of replicated tables, the array contains one record for each partition, with the block number that was acquired by the mutation. Only parts that contain blocks with numbers less than this number will be mutated in the partition. In non-replicated tables, block numbers in all partitions form a single sequence. This means that for mutations of non-replicated tables, the column will contain one record with a single block number acquired by the mutation. -- `parts_to_do_names` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete. +- `parts_to_do_names` ([Array](/docs/en/sql-reference/data-types/array.md)([String](/docs/en/sql-reference/data-types/string.md))) — An array of names of data parts that need to be mutated for the mutation to complete. -- `parts_to_do` ([Int64](../../sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete. +- `parts_to_do` ([Int64](/docs/en/sql-reference/data-types/int-uint.md)) — The number of data parts that need to be mutated for the mutation to complete. -- `is_done` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values: +- `is_done` ([UInt8](/docs/en/sql-reference/data-types/int-uint.md)) — The flag whether the mutation is done or not. Possible values: - `1` if the mutation is completed, - `0` if the mutation is still in process. @@ -37,16 +37,16 @@ Even if `parts_to_do = 0` it is possible that a mutation of a replicated table i If there were problems with mutating some data parts, the following columns contain additional information: -- `latest_failed_part` ([String](../../sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated. +- `latest_failed_part` ([String](/docs/en/sql-reference/data-types/string.md)) — The name of the most recent part that could not be mutated. -- `latest_fail_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure. +- `latest_fail_time` ([Datetime](/docs/en/sql-reference/data-types/datetime.md)) — The date and time of the most recent part mutation failure. -- `latest_fail_reason` ([String](../../sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure. +- `latest_fail_reason` ([String](/docs/en/sql-reference/data-types/string.md)) — The exception message that caused the most recent part mutation failure. **See Also** -- [Mutations](../../sql-reference/statements/alter/index.md#mutations) -- [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine -- [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family +- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) +- [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) table engine +- [ReplicatedMergeTree](/docs/en/engines/table-engines/mergetree-family/replication.md) family [Original article](https://clickhouse.com/docs/en/operations/system-tables/mutations) diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index f1d60896a2e..9b6a3495e1e 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -9,7 +9,7 @@ Each row describes one data part. Columns: -- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query. +- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/overview.md/#query_language_queries_alter) query. Formats: @@ -75,7 +75,7 @@ Columns: - `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) – The amount of memory (in bytes) reserved for primary key values. -- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition) +- `is_frozen` ([UInt8](../../sql-reference/data-types/int-uint.md)) – Flag that shows that a partition data backup exists. 1, the backup exists. 0, the backup does not exist. For more details, see [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md/#alter_freeze-partition) - `database` ([String](../../sql-reference/data-types/string.md)) – Name of the database. @@ -87,25 +87,25 @@ Columns: - `disk_name` ([String](../../sql-reference/data-types/string.md)) – Name of a disk that stores the data part. -- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files. +- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of compressed files. -- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.). +- `hash_of_uncompressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of uncompressed files (files with marks, index file etc.). -- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of data in the compressed files as if they were uncompressed. +- `uncompressed_hash_of_compressed_files` ([String](../../sql-reference/data-types/string.md)) – [sipHash128](../../sql-reference/functions/hash-functions.md/#hash_functions-siphash128) of data in the compressed files as if they were uncompressed. -- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). +- `delete_ttl_info_min` ([DateTime](../../sql-reference/data-types/datetime.md)) — The minimum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl). -- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). +- `delete_ttl_info_max` ([DateTime](../../sql-reference/data-types/datetime.md)) — The maximum value of the date and time key for [TTL DELETE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl). -- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). +- `move_ttl_info.expression` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of expressions. Each expression defines a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl). :::warning The `move_ttl_info.expression` array is kept mostly for backward compatibility, now the simpliest way to check `TTL MOVE` rule is to use the `move_ttl_info.min` and `move_ttl_info.max` fields. ::: -- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). +- `move_ttl_info.min` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the minimum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl). -- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl). +- `move_ttl_info.max` ([Array](../../sql-reference/data-types/array.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Array of date and time values. Each element describes the maximum key value for a [TTL MOVE rule](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl). - `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) – Alias for `bytes_on_disk`. @@ -166,6 +166,6 @@ move_ttl_info.max: [] **See Also** - [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md) -- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-ttl) +- [TTL for Columns and Tables](../../engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-ttl) [Original article](https://clickhouse.com/docs/en/operations/system-tables/parts) diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md index 68757ddfbff..c909d075f17 100644 --- a/docs/en/operations/system-tables/parts_columns.md +++ b/docs/en/operations/system-tables/parts_columns.md @@ -9,7 +9,7 @@ Each row describes one data part. Columns: -- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query. +- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/overview.md/#query_language_queries_alter) query. Formats: diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index cc278465437..a19b946b3cd 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -35,11 +35,11 @@ These actions are described in detail below. ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST] ``` -Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)). +Adds a new column to the table with the specified `name`, `type`, [`codec`](../create/table.md/#codecs) and `default_expr` (see the section [Default expressions](/docs/en/sql-reference/statements/create/table.md/#create-default-values)). If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions. -Adding a column just changes the table structure, without performing any actions with data. The data does not appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)). +Adding a column just changes the table structure, without performing any actions with data. The data does not appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md)). This approach allows us to complete the `ALTER` query instantly, without increasing the volume of old data. @@ -76,7 +76,7 @@ Deletes the column with the name `name`. If the `IF EXISTS` clause is specified, Deletes data from the file system. Since this deletes entire files, the query is completed almost instantly. :::warning -You can’t delete a column if it is referenced by [materialized view](../../../sql-reference/statements/create/view.md#materialized). Otherwise, it returns an error. +You can’t delete a column if it is referenced by [materialized view](/docs/en/sql-reference/statements/create/view.md/#materialized). Otherwise, it returns an error. ::: Example: @@ -107,7 +107,7 @@ ALTER TABLE visits RENAME COLUMN webBrowser TO browser CLEAR COLUMN [IF EXISTS] name IN PARTITION partition_name ``` -Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](partition.md#how-to-set-partition-expression). +Resets all data in a column for a specified partition. Read more about setting the partition name in the section [How to set the partition expression](partition.md/#how-to-set-partition-expression). If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. @@ -127,7 +127,7 @@ Adds a comment to the column. If the `IF EXISTS` clause is specified, the query Each column can have one comment. If a comment already exists for the column, a new comment overwrites the previous comment. -Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](../../../sql-reference/statements/describe-table.md) query. +Comments are stored in the `comment_expression` column returned by the [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md) query. Example: @@ -152,15 +152,15 @@ This query changes the `name` column properties: - TTL -For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md#codecs). +For examples of columns compression CODECS modifying, see [Column Compression Codecs](../create/table.md/#codecs). -For examples of columns TTL modifying, see [Column TTL](../../../engines/table-engines/mergetree-family/mergetree.md#mergetree-column-ttl). +For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist. The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description. -When changing the type, values are converted as if the [toType](../../../sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query does not do anything complex, and is completed almost instantly. +When changing the type, values are converted as if the [toType](/docs/en/sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query does not do anything complex, and is completed almost instantly. Example: @@ -246,7 +246,7 @@ SELECT groupArray(x), groupArray(s) FROM tmp; **See Also** -- [MATERIALIZED](../../statements/create/table.md#materialized). +- [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized). ## Limitations @@ -254,8 +254,8 @@ The `ALTER` query lets you create and delete separate elements (columns) in nest There is no support for deleting columns in the primary key or the sampling key (columns that are used in the `ENGINE` expression). Changing the type for columns that are included in the primary key is only possible if this change does not cause the data to be modified (for example, you are allowed to add values to an Enum or to change a type from `DateTime` to `UInt32`). -If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](../../../sql-reference/statements/insert-into.md#insert_query_insert-select) query, then switch the tables using the [RENAME](../../../sql-reference/statements/rename.md#rename-table) query and delete the old table. You can use the [clickhouse-copier](../../../operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. +If the `ALTER` query is not sufficient to make the table changes you need, you can create a new table, copy the data to it using the [INSERT SELECT](/docs/en/sql-reference/statements/insert-into.md/#insert_query_insert-select) query, then switch the tables using the [RENAME](/docs/en/sql-reference/statements/rename.md/#rename-table) query and delete the old table. You can use the [clickhouse-copier](/docs/en/operations/utilities/clickhouse-copier.md) as an alternative to the `INSERT SELECT` query. The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. -For tables that do not store data themselves (such as [Merge](../../../sql-reference/statements/alter/index.md) and [Distributed](../../../sql-reference/statements/alter/index.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. +For tables that do not store data themselves (such as [Merge](/docs/en/sql-reference/statements/alter/overview.md) and [Distributed](/docs/en/sql-reference/statements/alter/overview.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. diff --git a/docs/en/sql-reference/statements/alter/delete.md b/docs/en/sql-reference/statements/alter/delete.md index ba5d01d9b4d..05b33253bfe 100644 --- a/docs/en/sql-reference/statements/alter/delete.md +++ b/docs/en/sql-reference/statements/alter/delete.md @@ -10,21 +10,21 @@ sidebar_label: DELETE ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr ``` -Deletes data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +Deletes data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). :::note -The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. `ALTER TABLE` is considered a heavyweight operation that requires the underlying data to be merged before it is deleted. For MergeTree tables, consider using the [`DELETE FROM` query](../delete.md), which performs a lightweight delete and can be considerably faster. +The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. `ALTER TABLE` is considered a heavyweight operation that requires the underlying data to be merged before it is deleted. For MergeTree tables, consider using the [`DELETE FROM` query](/docs/en/sql-reference/statements/delete.md), which performs a lightweight delete and can be considerably faster. ::: The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value. One query can contain several commands separated by commas. -The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous. +The synchronicity of the query processing is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting. By default, it is asynchronous. **See also** -- [Mutations](../../../sql-reference/statements/alter/index.md#mutations) -- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) -- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting +- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) +- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) +- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting diff --git a/docs/en/sql-reference/statements/alter/overview.md b/docs/en/sql-reference/statements/alter/overview.md index 4027429cf0d..1c4d62f3190 100644 --- a/docs/en/sql-reference/statements/alter/overview.md +++ b/docs/en/sql-reference/statements/alter/overview.md @@ -8,43 +8,43 @@ sidebar_label: ALTER Most `ALTER TABLE` queries modify table settings or data: -- [COLUMN](../../../sql-reference/statements/alter/column.md) -- [PARTITION](../../../sql-reference/statements/alter/partition.md) -- [DELETE](../../../sql-reference/statements/alter/delete.md) -- [UPDATE](../../../sql-reference/statements/alter/update.md) -- [ORDER BY](../../../sql-reference/statements/alter/order-by.md) -- [INDEX](../../../sql-reference/statements/alter/index/index.md) -- [CONSTRAINT](../../../sql-reference/statements/alter/constraint.md) -- [TTL](../../../sql-reference/statements/alter/ttl.md) +- [COLUMN](/docs/en/sql-reference/statements/alter/column.md) +- [PARTITION](/docs/en/sql-reference/statements/alter/partition.md) +- [DELETE](/docs/en/sql-reference/statements/alter/delete.md) +- [UPDATE](/docs/en/sql-reference/statements/alter/update.md) +- [ORDER BY](/docs/en/sql-reference/statements/alter/order-by.md) +- [INDEX](/docs/en/sql-reference/statements/alter/skipping-index.md) +- [CONSTRAINT](/docs/en/sql-reference/statements/alter/constraint.md) +- [TTL](/docs/en/sql-reference/statements/alter/ttl.md) :::note -Most `ALTER TABLE` queries are supported only for [\*MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](../../../engines/table-engines/special/merge.md) and [Distributed](../../../engines/table-engines/special/distributed.md). +Most `ALTER TABLE` queries are supported only for [\*MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](/docs/en/engines/table-engines/special/merge.md) and [Distributed](/docs/en/engines/table-engines/special/distributed.md). ::: These `ALTER` statements manipulate views: -- [ALTER TABLE ... MODIFY QUERY](../../../sql-reference/statements/alter/view.md) — Modifies a [Materialized view](../create/view.md#materialized) structure. -- [ALTER LIVE VIEW](../../../sql-reference/statements/alter/view.md#alter-live-view) — Refreshes a [Live view](../create/view.md#live-view). +- [ALTER TABLE ... MODIFY QUERY](/docs/en/sql-reference/statements/alter/view.md) — Modifies a [Materialized view](/docs/en/sql-reference/statements/create/view.md/#materialized) structure. +- [ALTER LIVE VIEW](/docs/en/sql-reference/statements/alter/view.md/#alter-live-view) — Refreshes a [Live view](/docs/en/sql-reference/statements/create/view.md/#live-view). These `ALTER` statements modify entities related to role-based access control: -- [USER](../../../sql-reference/statements/alter/user.md) -- [ROLE](../../../sql-reference/statements/alter/role.md) -- [QUOTA](../../../sql-reference/statements/alter/quota.md) -- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md) -- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md) +- [USER](/docs/en/sql-reference/statements/alter/user.md) +- [ROLE](/docs/en/sql-reference/statements/alter/role.md) +- [QUOTA](/docs/en/sql-reference/statements/alter/quota.md) +- [ROW POLICY](/docs/en/sql-reference/statements/alter/row-policy.md) +- [SETTINGS PROFILE](/docs/en/sql-reference/statements/alter/settings-profile.md) -[ALTER TABLE ... MODIFY COMMENT](../../../sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not. +[ALTER TABLE ... MODIFY COMMENT](/docs/en/sql-reference/statements/alter/comment.md) statement adds, modifies, or removes comments to the table, regardless if it was set before or not. ## Mutations -`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts. +`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](/docs/en/sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](/docs/en/sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts. For `*MergeTree` tables mutations execute by **rewriting whole data parts**. There is no atomicity - parts are substituted for mutated parts as soon as they are ready and a `SELECT` query that started executing during a mutation will see data from parts that have already been mutated along with data from parts that have not been mutated yet. Mutations are totally ordered by their creation order and are applied to each part in that order. Mutations are also partially ordered with `INSERT INTO` queries: data that was inserted into the table before the mutation was submitted will be mutated and data that was inserted after that will not be mutated. Note that mutations do not block inserts in any way. -A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for non-replicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](../../../operations/system-tables/mutations.md#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](../../../sql-reference/statements/kill.md#kill-mutation) query. +A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for non-replicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the [`system.mutations`](/docs/en/operations/system-tables/mutations.md/#system_tables-mutations) table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted, but if the mutation is stuck for some reason it can be cancelled with the [`KILL MUTATION`](/docs/en/sql-reference/statements/kill.md/#kill-mutation) query. Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. @@ -52,12 +52,12 @@ Entries for finished mutations are not deleted right away (the number of preserv For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. -For all `ALTER` queries, you can use the [replication_alter_partitions_sync](../../../operations/settings/settings.md#replication-alter-partitions-sync) setting to set up waiting. +For all `ALTER` queries, you can use the [replication_alter_partitions_sync](/docs/en/operations/settings/settings.md/#replication-alter-partitions-sync) setting to set up waiting. -You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](../../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting. +You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](/docs/en/operations/settings/settings.md/#replication-wait-for-inactive-replica-timeout) setting. :::note For all `ALTER` queries, if `replication_alter_partitions_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown. ::: -For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. +For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting. diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 2d89c1d5d18..20eb76a8c8e 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -5,7 +5,7 @@ sidebar_label: PARTITION title: "Manipulating Partitions and Parts" --- -The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available: +The following operations with [partitions](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md) are available: - [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it. - [DROP PARTITION\|PART](#drop-partitionpart) — Deletes a partition or part. @@ -43,7 +43,7 @@ Read about setting the partition expression in a section [How to set the partiti After the query is executed, you can do whatever you want with the data in the `detached` directory — delete it from the file system, or just leave it. -This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](../../../operations/system-tables/replicas.md#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replicas (as multiple leaders are allowed). +This query is replicated – it moves the data to the `detached` directory on all replicas. Note that you can execute this query only on a leader replica. To find out if a replica is a leader, perform the `SELECT` query to the [system.replicas](/docs/en/operations/system-tables/replicas.md/#system_tables-replicas) table. Alternatively, it is easier to make a `DETACH` query on all replicas - all the replicas throw an exception, except the leader replicas (as multiple leaders are allowed). ## DROP PARTITION\|PART @@ -175,7 +175,7 @@ At the time of execution, for a data snapshot, the query creates hardlinks to a - if the `WITH NAME` parameter is specified, then the value of the `'backup_name'` parameter is used instead of the incremental number. :::note -If you use [a set of disks for data storage in a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression. +If you use [a set of disks for data storage in a table](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-multiple-volumes), the `shadow/N` directory appears on every disk, storing data parts that matched by the `PARTITION` expression. ::: The same structure of directories is created inside the backup as inside `/var/lib/clickhouse/`. The query performs `chmod` for all files, forbidding writing into them. @@ -249,7 +249,7 @@ Although the query is called `ALTER TABLE`, it does not change the table structu ## MOVE PARTITION\|PART -Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). +Moves partitions or data parts to another volume or disk for `MergeTree`-engine tables. See [Using Multiple Block Devices for Data Storage](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-multiple-volumes). ``` sql ALTER TABLE table_name [ON CLUSTER cluster] MOVE PARTITION|PART partition_expr TO DISK|VOLUME 'disk_name' @@ -270,7 +270,7 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' ## UPDATE IN PARTITION -Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). Syntax: @@ -286,11 +286,11 @@ ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2; ### See Also -- [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) +- [UPDATE](/docs/en/sql-reference/statements/alter/update.md/#alter-table-update-statements) ## DELETE IN PARTITION -Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). Syntax: @@ -306,7 +306,7 @@ ALTER TABLE mt DELETE IN PARTITION 2 WHERE p = 2; ### See Also -- [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) +- [DELETE](/docs/en/sql-reference/statements/alter/delete.md/#alter-mutations) ## How to Set Partition Expression @@ -315,16 +315,16 @@ You can specify the partition expression in `ALTER ... PARTITION` queries in dif - As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`. - As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`. - Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`. -- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](../../../operations/system-tables/detached_parts.md#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. +- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`. Usage of quotes when specifying the partition depends on the type of partition expression. For example, for the `String` type, you have to specify its name in quotes (`'`). For the `Date` and `Int*` types no quotes are needed. -All the rules above are also true for the [OPTIMIZE](../../../sql-reference/statements/optimize.md) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example: +All the rules above are also true for the [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) query. If you need to specify the only partition when optimizing a non-partitioned table, set the expression `PARTITION tuple()`. For example: ``` sql OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ``` -`IN PARTITION` specifies the partition to which the [UPDATE](../../../sql-reference/statements/alter/update.md#alter-table-update-statements) or [DELETE](../../../sql-reference/statements/alter/delete.md#alter-mutations) expressions are applied as a result of the `ALTER TABLE` query. New parts are created only from the specified partition. In this way, `IN PARTITION` helps to reduce the load when the table is divided into many partitions, and you only need to update the data point-by-point. +`IN PARTITION` specifies the partition to which the [UPDATE](/docs/en/sql-reference/statements/alter/update.md/#alter-table-update-statements) or [DELETE](/docs/en/sql-reference/statements/alter/delete.md/#alter-mutations) expressions are applied as a result of the `ALTER TABLE` query. New parts are created only from the specified partition. In this way, `IN PARTITION` helps to reduce the load when the table is divided into many partitions, and you only need to update the data point-by-point. The examples of `ALTER ... PARTITION` queries are demonstrated in the tests [`00502_custom_partitioning_local`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_local.sql) and [`00502_custom_partitioning_replicated_zookeeper`](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql). diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index ff8ecf3a77f..952afc7f764 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -5,7 +5,7 @@ sidebar_label: PROJECTION title: "Manipulating Projections" --- -The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available: +The following operations with [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections) are available: ## ADD PROJECTION @@ -13,15 +13,15 @@ The following operations with [projections](../../../engines/table-engines/merge ## DROP PROJECTION -`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). ## MATERIALIZE PROJECTION -`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). ## CLEAR PROJECTION -`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. @@ -29,5 +29,5 @@ The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only Also, they are replicated, syncing projections metadata via ClickHouse Keeper or ZooKeeper. :::note -Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +Projection manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/docs/en/sql-reference/statements/alter/skipping-index.md b/docs/en/sql-reference/statements/alter/skipping-index.md index 1a61e1b73ec..47b3999160e 100644 --- a/docs/en/sql-reference/statements/alter/skipping-index.md +++ b/docs/en/sql-reference/statements/alter/skipping-index.md @@ -14,12 +14,12 @@ The following operations are available: - `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. -- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. +- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing indices metadata via ZooKeeper. :::note -Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/sql-reference/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/sql-reference/engines/table-engines/mergetree-family/replication.md) variants). +Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/docs/en/sql-reference/statements/alter/update.md b/docs/en/sql-reference/statements/alter/update.md index e4fb872ae24..475a1527842 100644 --- a/docs/en/sql-reference/statements/alter/update.md +++ b/docs/en/sql-reference/statements/alter/update.md @@ -10,7 +10,7 @@ sidebar_label: UPDATE ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] WHERE filter_expr ``` -Manipulates data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +Manipulates data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). :::note The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. @@ -20,11 +20,11 @@ The `filter_expr` must be of type `UInt8`. This query updates values of specifie One query can contain several commands separated by commas. -The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous. +The synchronicity of the query processing is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting. By default, it is asynchronous. **See also** -- [Mutations](../../../sql-reference/statements/alter/index.md#mutations) -- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) -- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting +- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) +- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) +- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index bfb90f4a89f..4583b4ef1e9 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -8,25 +8,25 @@ sidebar_label: Statements Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately: -- [SELECT](../../sql-reference/statements/select/index.md) -- [INSERT INTO](../../sql-reference/statements/insert-into.md) -- [CREATE](../../sql-reference/statements/create/index.md) -- [ALTER](../../sql-reference/statements/alter/index.md) -- [SYSTEM](../../sql-reference/statements/system.md) -- [SHOW](../../sql-reference/statements/show.md) -- [GRANT](../../sql-reference/statements/grant.md) -- [REVOKE](../../sql-reference/statements/revoke.md) -- [ATTACH](../../sql-reference/statements/attach.md) -- [CHECK TABLE](../../sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md) -- [DETACH](../../sql-reference/statements/detach.md) -- [DROP](../../sql-reference/statements/drop.md) -- [EXISTS](../../sql-reference/statements/exists.md) -- [KILL](../../sql-reference/statements/kill.md) -- [OPTIMIZE](../../sql-reference/statements/optimize.md) -- [RENAME](../../sql-reference/statements/rename.md) -- [SET](../../sql-reference/statements/set.md) -- [SET ROLE](../../sql-reference/statements/set-role.md) -- [TRUNCATE](../../sql-reference/statements/truncate.md) -- [USE](../../sql-reference/statements/use.md) -- [EXPLAIN](../../sql-reference/statements/explain.md) +- [SELECT](/docs/en/sql-reference/statements/select/index.md) +- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md) +- [CREATE](/docs/en/sql-reference/statements/create/index.md) +- [ALTER](/docs/en/sql-reference/statements/alter/overview.md) +- [SYSTEM](/docs/en/sql-reference/statements/system.md) +- [SHOW](/docs/en/sql-reference/statements/show.md) +- [GRANT](/docs/en/sql-reference/statements/grant.md) +- [REVOKE](/docs/en/sql-reference/statements/revoke.md) +- [ATTACH](/docs/en/sql-reference/statements/attach.md) +- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md) +- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md) +- [DETACH](/docs/en/sql-reference/statements/detach.md) +- [DROP](/docs/en/sql-reference/statements/drop.md) +- [EXISTS](/docs/en/sql-reference/statements/exists.md) +- [KILL](/docs/en/sql-reference/statements/kill.md) +- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) +- [RENAME](/docs/en/sql-reference/statements/rename.md) +- [SET](/docs/en/sql-reference/statements/set.md) +- [SET ROLE](/docs/en/sql-reference/statements/set-role.md) +- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md) +- [USE](/docs/en/sql-reference/statements/use.md) +- [EXPLAIN](/docs/en/sql-reference/statements/explain.md) From c822c8161ff1a5c57ad4077c2324bd841b9a04fc Mon Sep 17 00:00:00 2001 From: DanRoscigno Date: Tue, 8 Nov 2022 20:21:26 -0500 Subject: [PATCH 41/46] fix nav --- docs/en/engines/database-engines/materialized-mysql.md | 2 +- docs/en/engines/table-engines/log-family/index.md | 2 +- .../table-engines/mergetree-family/mergetree.md | 4 ++-- .../table-engines/mergetree-family/replication.md | 2 +- docs/en/engines/table-engines/special/join.md | 2 +- docs/en/operations/settings/settings.md | 10 +++++----- docs/en/operations/storing-data.md | 2 +- docs/en/operations/system-tables/mutations.md | 4 ++-- docs/en/operations/system-tables/parts.md | 2 +- docs/en/operations/system-tables/parts_columns.md | 2 +- docs/en/sql-reference/statements/alter/column.md | 2 +- docs/en/sql-reference/statements/alter/delete.md | 6 +++--- .../statements/alter/{overview.md => index.md} | 0 docs/en/sql-reference/statements/alter/partition.md | 4 ++-- docs/en/sql-reference/statements/alter/projection.md | 6 +++--- .../sql-reference/statements/alter/skipping-index.md | 2 +- docs/en/sql-reference/statements/alter/update.md | 6 +++--- docs/en/sql-reference/statements/alter/view.md | 2 +- docs/en/sql-reference/statements/grant.md | 2 +- docs/en/sql-reference/statements/index.md | 2 +- docs/en/sql-reference/statements/kill.md | 2 +- 21 files changed, 33 insertions(+), 33 deletions(-) rename docs/en/sql-reference/statements/alter/{overview.md => index.md} (100%) diff --git a/docs/en/engines/database-engines/materialized-mysql.md b/docs/en/engines/database-engines/materialized-mysql.md index 0411286cd23..7dd43858416 100644 --- a/docs/en/engines/database-engines/materialized-mysql.md +++ b/docs/en/engines/database-engines/materialized-mysql.md @@ -133,7 +133,7 @@ Apart of the data types limitations there are few restrictions comparing to `MyS ### DDL Queries {#ddl-queries} -MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](/docs/en/sql-reference/statements/alter/overview.md), [CREATE](/docs/en/sql-reference/statements/create/index.md), [DROP](/docs/en/sql-reference/statements/drop.md), [RENAME](/docs/en/sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored. +MySQL DDL queries are converted into the corresponding ClickHouse DDL queries ([ALTER](/docs/en/sql-reference/statements/alter/index.md), [CREATE](/docs/en/sql-reference/statements/create/index.md), [DROP](/docs/en/sql-reference/statements/drop.md), [RENAME](/docs/en/sql-reference/statements/rename.md)). If ClickHouse cannot parse some DDL query, the query is ignored. ### Data Replication {#data-replication} diff --git a/docs/en/engines/table-engines/log-family/index.md b/docs/en/engines/table-engines/log-family/index.md index 269ad0c98f1..486c41c2496 100644 --- a/docs/en/engines/table-engines/log-family/index.md +++ b/docs/en/engines/table-engines/log-family/index.md @@ -28,7 +28,7 @@ Engines: During `INSERT` queries, the table is locked, and other queries for reading and writing data both wait for the table to unlock. If there are no data writing queries, any number of data reading queries can be performed concurrently. -- Do not support [mutations](/docs/en/sql-reference/statements/alter/overview.md/#alter-mutations). +- Do not support [mutations](/docs/en/sql-reference/statements/alter/index.md/#alter-mutations). - Do not support indexes. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 807a19605c4..7dfb5a9fed7 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -298,7 +298,7 @@ This feature is helpful when using the [SummingMergeTree](/docs/en/engines/table In this case it makes sense to leave only a few columns in the primary key that will provide efficient range scans and add the remaining dimension columns to the sorting key tuple. -[ALTER](/docs/en/sql-reference/statements/alter/overview.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. +[ALTER](/docs/en/sql-reference/statements/alter/index.md) of the sorting key is a lightweight operation because when a new column is simultaneously added to the table and to the sorting key, existing data parts do not need to be changed. Since the old sorting key is a prefix of the new sorting key and there is no data in the newly added column, the data is sorted by both the old and new sorting keys at the moment of table modification. ### Use of Indexes and Partitions in Queries {#use-of-indexes-and-partitions-in-queries} @@ -860,7 +860,7 @@ The number of threads performing background moves of data parts can be changed b In the case of `MergeTree` tables, data is getting to disk in different ways: - As a result of an insert (`INSERT` query). -- During background merges and [mutations](/docs/en/sql-reference/statements/alter/overview.md/#alter-mutations). +- During background merges and [mutations](/docs/en/sql-reference/statements/alter/index.md/#alter-mutations). - When downloading from another replica. - As a result of partition freezing [ALTER TABLE … FREEZE PARTITION](/docs/en/sql-reference/statements/alter/partition.md/#alter_freeze-partition). diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 0e208629c2e..67b595d0fa0 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -20,7 +20,7 @@ Replication works at the level of an individual table, not the entire server. A Replication does not depend on sharding. Each shard has its own independent replication. -Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/overview.md/#query_language_queries_alter)). +Compressed data for `INSERT` and `ALTER` queries is replicated (for more information, see the documentation for [ALTER](/docs/en/sql-reference/statements/alter/index.md/#query_language_queries_alter)). `CREATE`, `DROP`, `ATTACH`, `DETACH` and `RENAME` queries are executed on a single server and are not replicated: diff --git a/docs/en/engines/table-engines/special/join.md b/docs/en/engines/table-engines/special/join.md index 36b2e59bcf8..0e51a8b7696 100644 --- a/docs/en/engines/table-engines/special/join.md +++ b/docs/en/engines/table-engines/special/join.md @@ -59,7 +59,7 @@ Main use-cases for `Join`-engine tables are following: ### Deleting Data {#deleting-data} -`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk. +`ALTER DELETE` queries for `Join`-engine tables are implemented as [mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations). `DELETE` mutation reads filtered data and overwrites data of memory and disk. ### Limitations and Settings {#join-limitations-and-settings} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 38077a2b891..d2f0f46f637 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2227,7 +2227,7 @@ Default value: `0`. ## mutations_sync {#mutations_sync} -Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/overview.md/#mutations)) synchronously. +Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md/#mutations)) synchronously. Possible values: @@ -2239,8 +2239,8 @@ Default value: `0`. **See Also** -- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) -- [Mutations](../../sql-reference/statements/alter/overview.md/#mutations) +- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries) +- [Mutations](../../sql-reference/statements/alter/index.md/#mutations) ## ttl_only_drop_parts {#ttl_only_drop_parts} @@ -2954,7 +2954,7 @@ Default value: `0`. ## replication_alter_partitions_sync {#replication-alter-partitions-sync} -Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/overview.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. +Allows to set up waiting for actions to be executed on replicas by [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. Possible values: @@ -2966,7 +2966,7 @@ Default value: `1`. ## replication_wait_for_inactive_replica_timeout {#replication-wait-for-inactive-replica-timeout} -Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/overview.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. +Specifies how long (in seconds) to wait for inactive replicas to execute [ALTER](../../sql-reference/statements/alter/index.md), [OPTIMIZE](../../sql-reference/statements/optimize.md) or [TRUNCATE](../../sql-reference/statements/truncate.md) queries. Possible values: diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index c0acd544fa9..203fe4e42d2 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -265,7 +265,7 @@ Cache profile events: There is a tool `clickhouse-static-files-uploader`, which prepares a data directory for a given table (`SELECT data_paths FROM system.tables WHERE name = 'table_name'`). For each table you need, you get a directory of files. These files can be uploaded to, for example, a web server with static files. After this preparation, you can load this table into any ClickHouse server via `DiskWeb`. -This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/overview.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). +This is a read-only disk. Its data is only read and never modified. A new table is loaded to this disk via `ATTACH TABLE` query (see example below). Local disk is not actually used, each `SELECT` query will result in a `http` request to fetch required data. All modification of the table data will result in an exception, i.e. the following types of queries are not allowed: [CREATE TABLE](/docs/en/sql-reference/statements/create/table.md), [ALTER TABLE](/docs/en/sql-reference/statements/alter/index.md), [RENAME TABLE](/docs/en/sql-reference/statements/rename.md/#misc_operations-rename_table), [DETACH TABLE](/docs/en/sql-reference/statements/detach.md) and [TRUNCATE TABLE](/docs/en/sql-reference/statements/truncate.md). Web server storage is supported only for the [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) engine families. To access the data stored on a `web` disk, use the [storage_policy](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#terms) setting when executing the query. For example, `ATTACH TABLE table_web UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'`. diff --git a/docs/en/operations/system-tables/mutations.md b/docs/en/operations/system-tables/mutations.md index 98237562f37..782d7c42ad2 100644 --- a/docs/en/operations/system-tables/mutations.md +++ b/docs/en/operations/system-tables/mutations.md @@ -3,7 +3,7 @@ slug: /en/operations/system-tables/mutations --- # mutations -The table contains information about [mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. +The table contains information about [mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations) of [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) tables and their progress. Each mutation command is represented by a single row. Columns: @@ -45,7 +45,7 @@ If there were problems with mutating some data parts, the following columns cont **See Also** -- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) +- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations) - [MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) table engine - [ReplicatedMergeTree](/docs/en/engines/table-engines/mergetree-family/replication.md) family diff --git a/docs/en/operations/system-tables/parts.md b/docs/en/operations/system-tables/parts.md index 9b6a3495e1e..cbabd9b27b1 100644 --- a/docs/en/operations/system-tables/parts.md +++ b/docs/en/operations/system-tables/parts.md @@ -9,7 +9,7 @@ Each row describes one data part. Columns: -- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/overview.md/#query_language_queries_alter) query. +- `partition` ([String](../../sql-reference/data-types/string.md)) – The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md/#query_language_queries_alter) query. Formats: diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md index c909d075f17..d934e01f245 100644 --- a/docs/en/operations/system-tables/parts_columns.md +++ b/docs/en/operations/system-tables/parts_columns.md @@ -9,7 +9,7 @@ Each row describes one data part. Columns: -- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/overview.md/#query_language_queries_alter) query. +- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md/#query_language_queries_alter) query. Formats: diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index a19b946b3cd..6bca0dbff42 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -258,4 +258,4 @@ If the `ALTER` query is not sufficient to make the table changes you need, you c The `ALTER` query blocks all reads and writes for the table. In other words, if a long `SELECT` is running at the time of the `ALTER` query, the `ALTER` query will wait for it to complete. At the same time, all new queries to the same table will wait while this `ALTER` is running. -For tables that do not store data themselves (such as [Merge](/docs/en/sql-reference/statements/alter/overview.md) and [Distributed](/docs/en/sql-reference/statements/alter/overview.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. +For tables that do not store data themselves (such as [Merge](/docs/en/sql-reference/statements/alter/index.md) and [Distributed](/docs/en/sql-reference/statements/alter/index.md)), `ALTER` just changes the table structure, and does not change the structure of subordinate tables. For example, when running ALTER for a `Distributed` table, you will also need to run `ALTER` for the tables on all remote servers. diff --git a/docs/en/sql-reference/statements/alter/delete.md b/docs/en/sql-reference/statements/alter/delete.md index 05b33253bfe..4dcab030d13 100644 --- a/docs/en/sql-reference/statements/alter/delete.md +++ b/docs/en/sql-reference/statements/alter/delete.md @@ -10,7 +10,7 @@ sidebar_label: DELETE ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr ``` -Deletes data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +Deletes data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). :::note @@ -25,6 +25,6 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do **See also** -- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) -- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) +- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations) +- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries) - [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting diff --git a/docs/en/sql-reference/statements/alter/overview.md b/docs/en/sql-reference/statements/alter/index.md similarity index 100% rename from docs/en/sql-reference/statements/alter/overview.md rename to docs/en/sql-reference/statements/alter/index.md diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 20eb76a8c8e..146c15e776e 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -270,7 +270,7 @@ ALTER TABLE hits MOVE PARTITION '2019-09-01' TO DISK 'fast_ssd' ## UPDATE IN PARTITION -Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +Manipulates data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). Syntax: @@ -290,7 +290,7 @@ ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2; ## DELETE IN PARTITION -Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +Deletes data in the specifies partition matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). Syntax: diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index 952afc7f764..7913c7cb7e4 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -13,15 +13,15 @@ The following operations with [projections](/docs/en/engines/table-engines/merge ## DROP PROJECTION -`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +`ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). ## MATERIALIZE PROJECTION -`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). ## CLEAR PROJECTION -`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +`ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. diff --git a/docs/en/sql-reference/statements/alter/skipping-index.md b/docs/en/sql-reference/statements/alter/skipping-index.md index 47b3999160e..2dadffc4527 100644 --- a/docs/en/sql-reference/statements/alter/skipping-index.md +++ b/docs/en/sql-reference/statements/alter/skipping-index.md @@ -14,7 +14,7 @@ The following operations are available: - `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. -- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. +- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. The first two commands are lightweight in a sense that they only change metadata or remove files. diff --git a/docs/en/sql-reference/statements/alter/update.md b/docs/en/sql-reference/statements/alter/update.md index 475a1527842..f40b72f7ab3 100644 --- a/docs/en/sql-reference/statements/alter/update.md +++ b/docs/en/sql-reference/statements/alter/update.md @@ -10,7 +10,7 @@ sidebar_label: UPDATE ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] WHERE filter_expr ``` -Manipulates data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/overview.md/#mutations). +Manipulates data matching the specified filtering expression. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md/#mutations). :::note The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. @@ -24,7 +24,7 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do **See also** -- [Mutations](/docs/en/sql-reference/statements/alter/overview.md/#mutations) -- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/overview.md/#synchronicity-of-alter-queries) +- [Mutations](/docs/en/sql-reference/statements/alter/index.md/#mutations) +- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md/#synchronicity-of-alter-queries) - [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md index fd6045a026e..e382cdace30 100644 --- a/docs/en/sql-reference/statements/alter/view.md +++ b/docs/en/sql-reference/statements/alter/view.md @@ -8,7 +8,7 @@ sidebar_label: VIEW You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement. Use it when the materialized view was created without the `TO [db.]name` clause. The `allow_experimental_alter_materialized_view_structure` setting must be enabled. -If a materialized view uses the `TO [db.]name` construction, you must [DETACH](../detach.md) the view, run [ALTER TABLE](overview.md) query for the target table, and then [ATTACH](../attach.md) the previously detached (`DETACH`) view. +If a materialized view uses the `TO [db.]name` construction, you must [DETACH](../detach.md) the view, run [ALTER TABLE](index.md) query for the target table, and then [ATTACH](../attach.md) the previously detached (`DETACH`) view. **Example** diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index bd35efb3deb..546a8b0958d 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -261,7 +261,7 @@ The granted privilege allows `john` to insert data to the `x` and/or `y` columns ### ALTER -Allows executing [ALTER](../../sql-reference/statements/alter/overview.md) queries according to the following hierarchy of privileges: +Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries according to the following hierarchy of privileges: - `ALTER`. Level: `COLUMN`. - `ALTER TABLE`. Level: `GROUP` diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index 4583b4ef1e9..b286d8c932d 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -11,7 +11,7 @@ Statements represent various kinds of action you can perform using SQL queries. - [SELECT](/docs/en/sql-reference/statements/select/index.md) - [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md) - [CREATE](/docs/en/sql-reference/statements/create/index.md) -- [ALTER](/docs/en/sql-reference/statements/alter/overview.md) +- [ALTER](/docs/en/sql-reference/statements/alter/index.md) - [SYSTEM](/docs/en/sql-reference/statements/system.md) - [SHOW](/docs/en/sql-reference/statements/show.md) - [GRANT](/docs/en/sql-reference/statements/grant.md) diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index 733125c78f3..294724dfa50 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -51,7 +51,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Tries to cancel and remove [mutations](../../sql-reference/statements/alter/overview.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. +Tries to cancel and remove [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query. A test query (`TEST`) only checks the user’s rights and displays a list of mutations to stop. From 768ada118b12a25bc6ae3e531b597b67a6e56628 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Tue, 8 Nov 2022 21:37:31 +0100 Subject: [PATCH 42/46] Analyzer AST key condition crash fix --- src/Storages/MergeTree/KeyCondition.cpp | 2 +- ...477_analyzer_ast_key_condition_crash.reference | 2 ++ .../02477_analyzer_ast_key_condition_crash.sql | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.reference create mode 100644 tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 8ffd1059a58..1d688427a57 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -783,7 +783,7 @@ KeyCondition::KeyCondition( context, key_column_names, key_expr_, - query_info.syntax_analyzer_result->getArrayJoinSourceNameSet(), + query_info.syntax_analyzer_result ? query_info.syntax_analyzer_result->getArrayJoinSourceNameSet() : NameSet{}, single_point_, strict_) { diff --git a/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.reference b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql new file mode 100644 index 00000000000..53f3a9b23ec --- /dev/null +++ b/tests/queries/0_stateless/02477_analyzer_ast_key_condition_crash.sql @@ -0,0 +1,15 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64 +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (1); + +SELECT * FROM test_table WHERE id = 1; + +SELECT * FROM test_table WHERE id = 1 SETTINGS query_plan_optimize_primary_key = 0; + +DROP TABLE test_table; From 9edb4d779b142309579d2b8f45ce81d6d01ed951 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 9 Nov 2022 12:18:29 +0100 Subject: [PATCH 43/46] fix --- docker/test/fuzzer/run-fuzzer.sh | 1 + tests/ci/ast_fuzzer_check.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index e57b671342c..dbb56b258ed 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -391,6 +391,7 @@ th { cursor: pointer; }

AST Fuzzer for PR #${PR_TO_TEST} @ ${SHA_TO_TEST}

+ 🌚🌞
@@ -681,6 +682,25 @@ document.getElementById('add').addEventListener('click', e => { resize(); }); +document.getElementById('reload').addEventListener('click', e => { + reloadAll(); +}); + +function showReloadIfNeeded() { + const is_any_field_changed = (host != document.getElementById('url').value + || user != document.getElementById('user').value + || password != document.getElementById('password').value); + if (is_any_field_changed) { + document.getElementById('reload').style.display = ''; + } else { + document.getElementById('reload').style.display = 'none'; + } +} + +document.getElementById('password').addEventListener('input', e => { showReloadIfNeeded(); }) +document.getElementById('user').addEventListener('input', e => { showReloadIfNeeded(); }) +document.getElementById('url').addEventListener('input', e => { showReloadIfNeeded(); }) + function legendAsTooltipPlugin({ className, style = { background: "var(--legend-background)" } } = {}) { let legendEl; @@ -843,10 +863,15 @@ function resize() { new ResizeObserver(resize).observe(document.body); -document.getElementById('params').onsubmit = function(event) { +function reloadAll() { updateParams(); drawAll(); saveState(); + document.getElementById('reload').style.display = 'none'; +} + +document.getElementById('params').onsubmit = function(event) { + reloadAll(); event.preventDefault(); } From a977ea307133e896383047bf10a00ac1631a4fca Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 1 Nov 2022 17:19:56 +0100 Subject: [PATCH 45/46] dashboard.html: scroll and focus to new chart --- programs/server/dashboard.html | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index 97cb8f8db12..b15bced5028 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -673,11 +673,16 @@ function insertChart(i) { chart.addEventListener('mouseleave', e => { edit_buttons.style.display = 'none'; }); charts.appendChild(chart); + return {chart: chart, textarea: query_editor_textarea}; }; document.getElementById('add').addEventListener('click', e => { queries.push({ title: '', query: '' }); - insertChart(plots.length); + + const {chart, textarea} = insertChart(plots.length); + chart.scrollIntoView(); + textarea.focus(); + plots.push(null); resize(); }); From e3f211a9437d272786230ac54939016d72e114f5 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 1 Nov 2022 17:20:26 +0100 Subject: [PATCH 46/46] dashboard.html: add debug mode to add add_http_cors_header --- programs/server/dashboard.html | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html index b15bced5028..859ce78068c 100644 --- a/programs/server/dashboard.html +++ b/programs/server/dashboard.html @@ -756,6 +756,8 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend- }; } +let add_http_cors_header = false; + async function draw(idx, chart, url_params, query) { if (plots[idx]) { plots[idx].destroy(); @@ -767,6 +769,12 @@ async function draw(idx, chart, url_params, query) { password = document.getElementById('password').value; let url = `${host}?default_format=JSONCompactColumns` + + if (add_http_cors_header) { + // For debug purposes, you may set add_http_cors_header from a browser console + url += '&add_http_cors_header=1'; + } + if (user) { url += `&user=${encodeURIComponent(user)}`; }