From 665fabde9e60eea5f6364200cdad56d00ed39298 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sat, 18 Apr 2020 19:20:34 +0300 Subject: [PATCH 001/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionWelchTTest.cpp | 28 ++ .../AggregateFunctionWelchTTest.h | 251 ++++++++++++++++++ .../registerAggregateFunctions.cpp | 1 + .../registerAggregateFunctions.h | 1 + 4 files changed, 281 insertions(+) create mode 100644 src/AggregateFunctions/AggregateFunctionWelchTTest.cpp create mode 100644 src/AggregateFunctions/AggregateFunctionWelchTTest.h diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp new file mode 100644 index 00000000000..46b533a2c0b --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -0,0 +1,28 @@ +#include +#include +#include + + +namespace DB +{ + +namespace +{ + +AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) +{ + + + return std::make_shared(argument_types, parameters); + +} + +} + +void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) +{ + + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); +} + +} diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h new file mode 100644 index 00000000000..a52528df431 --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -0,0 +1,251 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +// hard-codded values - part of the algorithm + +#define SIGN_LVL_CNT 6 + +Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { + // for significance level = 0.2 + {0.2, 3.078, 1.886, 1.638, 1.533, 1.476, 1.44, 1.415, 1.397, 1.383, 1.372, 1.363, 1.356, 1.35, 1.345, 1.341, 1.337, 1.333, 1.33, 1.328, 1.325, 1.323, 1.321, 1.319, 1.318, 1.316, 1.315, 1.314, 1.313, 1.311, 1.31, 1.309, 1.309, 1.308, 1.307, 1.306, 1.306, 1.305, 1.304, 1.304, 1.303, 1.303, 1.302, 1.302, 1.301, 1.301, 1.3, 1.3, 1.299, 1.299, 1.299, 1.298, 1.298, 1.298, 1.297, 1.297, 1.297, 1.297, 1.296, 1.296, 1.296, 1.296, 1.295, 1.295, 1.295, 1.295, 1.295, 1.294, 1.294, 1.294, 1.294, 1.294, 1.293, 1.293, 1.293, 1.293, 1.293, 1.293, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.29, 1.29, 1.29, 1.29, 1.29, 1.282} + + // for significance level = 0.1 + {0.1, 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697, 1.696, 1.694, 1.692, 1.691, 1.69, 1.688, 1.687, 1.686, 1.685, 1.684, 1.683, 1.682, 1.681, 1.68, 1.679, 1.679, 1.678, 1.677, 1.677, 1.676, 1.675, 1.675, 1.674, 1.674, 1.673, 1.673, 1.672, 1.672, 1.671, 1.671, 1.67, 1.67, 1.669, 1.669, 1.669, 1.668, 1.668, 1.668, 1.667, 1.667, 1.667, 1.666, 1.666, 1.666, 1.665, 1.665, 1.665, 1.665, 1.664, 1.664, 1.664, 1.664, 1.663, 1.663, 1.663, 1.663, 1.663, 1.662, 1.662, 1.662, 1.662, 1.662, 1.661, 1.661, 1.661, 1.661, 1.661, 1.661, 1.66, 1.66, 1.645} + + // for significance level = 0.05 + {0.05, 12.706, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228, 2.201, 2.179, 2.16, 2.145, 2.131, 2.12, 2.11, 2.101, 2.093, 2.086, 2.08, 2.074, 2.069, 2.064, 2.06, 2.056, 2.052, 2.048, 2.045, 2.042, 2.04, 2.037, 2.035, 2.032, 2.03, 2.028, 2.026, 2.024, 2.023, 2.021, 2.02, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.01, 2.009, 2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.0, 2.0, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994, 1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.99, 1.99, 1.99, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987, 1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984, 1.96} + + // for significance level = 0.02 + {0.02, 31.821, 6.965, 4.541, 3.747, 3.365, 3.143, 2.998, 2.896, 2.821, 2.764, 2.718, 2.681, 2.65, 2.624, 2.602, 2.583, 2.567, 2.552, 2.539, 2.528, 2.518, 2.508, 2.5, 2.492, 2.485, 2.479, 2.473, 2.467, 2.462, 2.457, 2.453, 2.449, 2.445, 2.441, 2.438, 2.434, 2.431, 2.429, 2.426, 2.423, 2.421, 2.418, 2.416, 2.414, 2.412, 2.41, 2.408, 2.407, 2.405, 2.403, 2.402, 2.4, 2.399, 2.397, 2.396, 2.395, 2.394, 2.392, 2.391, 2.39, 2.389, 2.388, 2.387, 2.386, 2.385, 2.384, 2.383, 2.382, 2.382, 2.381, 2.38, 2.379, 2.379, 2.378, 2.377, 2.376, 2.376, 2.375, 2.374, 2.374, 2.373, 2.373, 2.372, 2.372, 2.371, 2.37, 2.37, 2.369, 2.369, 2.368, 2.368, 2.368, 2.367, 2.367, 2.366, 2.366, 2.365, 2.365, 2.365, 2.364, 2.326} + + // for significance level = 0.01 + {0.01, 63.657, 9.925, 5.841, 4.604, 4.032, 3.707, 3.499, 3.355, 3.25, 3.169, 3.106, 3.055, 3.012, 2.977, 2.947, 2.921, 2.898, 2.878, 2.861, 2.845, 2.831, 2.819, 2.807, 2.797, 2.787, 2.779, 2.771, 2.763, 2.756, 2.75, 2.744, 2.738, 2.733, 2.728, 2.724, 2.719, 2.715, 2.712, 2.708, 2.704, 2.701, 2.698, 2.695, 2.692, 2.69, 2.687, 2.685, 2.682, 2.68, 2.678, 2.676, 2.674, 2.672, 2.67, 2.668, 2.667, 2.665, 2.663, 2.662, 2.66, 2.659, 2.657, 2.656, 2.655, 2.654, 2.652, 2.651, 2.65, 2.649, 2.648, 2.647, 2.646, 2.645, 2.644, 2.643, 2.642, 2.641, 2.64, 2.64, 2.639, 2.638, 2.637, 2.636, 2.636, 2.635, 2.634, 2.634, 2.633, 2.632, 2.632, 2.631, 2.63, 2.63, 2.629, 2.629, 2.628, 2.627, 2.627, 2.626, 2.626, 2.576} + + // for significance level = 0.002 + {0.002, 318.313, 22.327, 10.215, 7.173, 5.893, 5.208, 4.782, 4.499, 4.296, 4.143, 4.024, 3.929, 3.852, 3.787, 3.733, 3.686, 3.646, 3.61, 3.579, 3.552, 3.527, 3.505, 3.485, 3.467, 3.45, 3.435, 3.421, 3.408, 3.396, 3.385, 3.375, 3.365, 3.356, 3.348, 3.34, 3.333, 3.326, 3.319, 3.313, 3.307, 3.301, 3.296, 3.291, 3.286, 3.281, 3.277, 3.273, 3.269, 3.265, 3.261, 3.258, 3.255, 3.251, 3.248, 3.245, 3.242, 3.239, 3.237, 3.234, 3.232, 3.229, 3.227, 3.225, 3.223, 3.22, 3.218, 3.216, 3.214, 3.213, 3.211, 3.209, 3.207, 3.206, 3.204, 3.202, 3.201, 3.199, 3.198, 3.197, 3.195, 3.194, 3.193, 3.191, 3.19, 3.189, 3.188, 3.187, 3.185, 3.184, 3.183, 3.182, 3.181, 3.18, 3.179, 3.178, 3.177, 3.176, 3.175, 3.175, 3.174, 3.09} +} + +// our algorithm implementation via vectors: +// https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 +// col, col, bool +template +struct AggregateFunctionWelchTTestData final { + + size_t size_x = 0; + size_t size_y = 0; + X sum_x = 0; + Y sum_y = 0; + X square_sum_x = 0; + Y square_sum_y = 0; + Float64 mean_x = 0; + Float64 mean_y = 0; + + /* + not yet sure how to use them + void add_x(X x) { + mean_x = (Float64)(sum_x + x) / (size_x + 1); + size_x ++; + sum_x += x; + square_sum_x += x * x; + } + + void add_y(Y y) { + mean_y = (sum_y + y) / (size_y + 1); + size_y ++; + sum_y += y; + square_sum_y += y * y; + } + */ + + void add(X x, Y y) { + sum_x += x; + sum_y += y; + size_x++; + size_y++; + mean_x = (Float64) sum_x / size_x; + mean_y = (Float64) sum_y / size_y; + square_sum_x += x * x; + square_sum_y += y * y; + } + + void merge(const AggregateFunctionWelchTTestData &other) { + sum_x += other.sum_x; + sum_y += other.sum_y; + size_x += other.size_x; + size_y += other.size_y; + mean_x = (Float64) sum_x / size_x; + mean_y = (Float64) sum_y / size_y; + square_sum_x += other.square_sum_x; + square_sum_y += other.square_sum_y; + } + + void serialize(WriteBuffer &buf) const { + writeBinary(mean_x, buf); + writeBinary(mean_y, buf); + writeBinary(sum_x, buf); + writeBinary(sum_y, buf); + writeBinary(square_sum_x, buf); + writeBinary(square_sum_y, buf); + writeBinary(size_x, buf); + writeBinary(size_y, buf); + } + + void deserialize(ReadBuffer &buf) { + readBinary(mean_x, buf); + readBinary(mean_y, buf); + readBinary(sum_x, buf); + readBinary(sum_y, buf); + readBinary(square_sum_x, buf); + readBinary(square_sum_y, buf); + readBinary(size_x, buf); + readBinary(size_y, buf); + } + + Float64 get_sx() const { + return (Float64)(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); + } + + Float64 get_sy() const { + return (Float64)(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); + } + + Float64 get_T(Float64 sx, Float64 sy) const { + return (Float64)(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); + } + + Float64 get_degrees_of_freed(Float64 sx, Float64 sy) const { + return (Float64)(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / + ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); + } + + Ret get_result(Float64 t, Float64 dof, Float64 parametr) const + { + //find our table + int table; + for (int i = 0; i < SIGN_LVL_CNT; ++i) + { + if (CriticalValuesTable[i][0] == parametr) + { + table = i; + } + } + + //round or make infinity dof + dof = (int) dof; + if (dof > 100) + { + dof = 101; + } + //check if abs of t is greater than table[dof] + t = abs(t) + if(t > CriticalValuesTable[table][dof]) { + return (UInt8) 1; + //in this case we reject the null hypothesis + } + else { + return (UInt8) 0; + } + } +}; + +template +class AggregateFunctionWelchTTest final : public + IAggregateFunctionDataHelper< + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest + > +{ +public: + AggregateFunctionWelchTTest( + const DataTypes & arguments, + const Array & params + ): + IAggregateFunctionDataHelper< + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest + > {arguments, params} + { + // notice: arguments has been in factory + } + + String getName() const override + { + return "WelchTTest"; + } + + void add( + AggregateDataPtr place, + const IColumn ** columns, + size_t row_num, + Arena * + ) const override + { + auto col_x = assert_cast *>(columns[0]); + auto col_y = assert_cast *>(columns[1]); + + X x = col_x->getData()[row_num]; + Y y = col_y->getData()[row_num]; + + this->data(place).add(x, y); + } + + void merge( + AggregateDataPtr place, + ConstAggregateDataPtr rhs, Arena * + ) const override + { + this->data(place).merge(this->data(rhs)); + } + + void serialize( + ConstAggregateDataPtr place, + WriteBuffer & buf + ) const override + { + this->data(place).serialize(buf); + } + + void deserialize( + AggregateDataPtr place, + ReadBuffer & buf, Arena * + ) const override + { + this->data(place).deserialize(buf); + } + + void insertResultInto( + ConstAggregateDataPtr place, + IColumn & to + ) const override + { + Float64 significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); + + Float64 sx = this->data(place).get_sx(); + Float64 sy = this->data(place).get_sy(); + Float64 t_value = this->data(place).get_T(sx, sy); + Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); + Ret result = this->data(place).get_result(t_value, dof, significance_level); + + auto & column = static_cast(to); + column.getData().push_back(result); + } + + +} +}; + +}; \ No newline at end of file diff --git a/src/AggregateFunctions/registerAggregateFunctions.cpp b/src/AggregateFunctions/registerAggregateFunctions.cpp index a9ab1d4f8ea..adc72ec9169 100644 --- a/src/AggregateFunctions/registerAggregateFunctions.cpp +++ b/src/AggregateFunctions/registerAggregateFunctions.cpp @@ -45,6 +45,7 @@ void registerAggregateFunctions() registerAggregateFunctionMoving(factory); registerAggregateFunctionCategoricalIV(factory); registerAggregateFunctionAggThrow(factory); + registerAggregateFunctionWelchTTest(factory); } { diff --git a/src/AggregateFunctions/registerAggregateFunctions.h b/src/AggregateFunctions/registerAggregateFunctions.h index 88cdf4a504d..046b125dec5 100644 --- a/src/AggregateFunctions/registerAggregateFunctions.h +++ b/src/AggregateFunctions/registerAggregateFunctions.h @@ -35,6 +35,7 @@ void registerAggregateFunctionSimpleLinearRegression(AggregateFunctionFactory &) void registerAggregateFunctionMoving(AggregateFunctionFactory &); void registerAggregateFunctionCategoricalIV(AggregateFunctionFactory &); void registerAggregateFunctionAggThrow(AggregateFunctionFactory &); +void registerAggregateFunctionWelchTTest(AggregateFunctionFactory &); class AggregateFunctionCombinatorFactory; void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory &); From 69c1f33b0b22bd103cbce8f0ce747dc12937d1a1 Mon Sep 17 00:00:00 2001 From: antikvist Date: Mon, 27 Apr 2020 00:09:56 +0300 Subject: [PATCH 002/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionWelchTTest.h | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index a52528df431..be1e176d540 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -4,13 +4,17 @@ #include #include #include +#include #include #include #include #include #include #include -#include + +#include +#include +#include namespace DB @@ -21,19 +25,19 @@ namespace DB Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // for significance level = 0.2 - {0.2, 3.078, 1.886, 1.638, 1.533, 1.476, 1.44, 1.415, 1.397, 1.383, 1.372, 1.363, 1.356, 1.35, 1.345, 1.341, 1.337, 1.333, 1.33, 1.328, 1.325, 1.323, 1.321, 1.319, 1.318, 1.316, 1.315, 1.314, 1.313, 1.311, 1.31, 1.309, 1.309, 1.308, 1.307, 1.306, 1.306, 1.305, 1.304, 1.304, 1.303, 1.303, 1.302, 1.302, 1.301, 1.301, 1.3, 1.3, 1.299, 1.299, 1.299, 1.298, 1.298, 1.298, 1.297, 1.297, 1.297, 1.297, 1.296, 1.296, 1.296, 1.296, 1.295, 1.295, 1.295, 1.295, 1.295, 1.294, 1.294, 1.294, 1.294, 1.294, 1.293, 1.293, 1.293, 1.293, 1.293, 1.293, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.29, 1.29, 1.29, 1.29, 1.29, 1.282} + {0.2, 3.078, 1.886, 1.638, 1.533, 1.476, 1.44, 1.415, 1.397, 1.383, 1.372, 1.363, 1.356, 1.35, 1.345, 1.341, 1.337, 1.333, 1.33, 1.328, 1.325, 1.323, 1.321, 1.319, 1.318, 1.316, 1.315, 1.314, 1.313, 1.311, 1.31, 1.309, 1.309, 1.308, 1.307, 1.306, 1.306, 1.305, 1.304, 1.304, 1.303, 1.303, 1.302, 1.302, 1.301, 1.301, 1.3, 1.3, 1.299, 1.299, 1.299, 1.298, 1.298, 1.298, 1.297, 1.297, 1.297, 1.297, 1.296, 1.296, 1.296, 1.296, 1.295, 1.295, 1.295, 1.295, 1.295, 1.294, 1.294, 1.294, 1.294, 1.294, 1.293, 1.293, 1.293, 1.293, 1.293, 1.293, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.29, 1.29, 1.29, 1.29, 1.29, 1.282}, // for significance level = 0.1 - {0.1, 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697, 1.696, 1.694, 1.692, 1.691, 1.69, 1.688, 1.687, 1.686, 1.685, 1.684, 1.683, 1.682, 1.681, 1.68, 1.679, 1.679, 1.678, 1.677, 1.677, 1.676, 1.675, 1.675, 1.674, 1.674, 1.673, 1.673, 1.672, 1.672, 1.671, 1.671, 1.67, 1.67, 1.669, 1.669, 1.669, 1.668, 1.668, 1.668, 1.667, 1.667, 1.667, 1.666, 1.666, 1.666, 1.665, 1.665, 1.665, 1.665, 1.664, 1.664, 1.664, 1.664, 1.663, 1.663, 1.663, 1.663, 1.663, 1.662, 1.662, 1.662, 1.662, 1.662, 1.661, 1.661, 1.661, 1.661, 1.661, 1.661, 1.66, 1.66, 1.645} + {0.1, 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697, 1.696, 1.694, 1.692, 1.691, 1.69, 1.688, 1.687, 1.686, 1.685, 1.684, 1.683, 1.682, 1.681, 1.68, 1.679, 1.679, 1.678, 1.677, 1.677, 1.676, 1.675, 1.675, 1.674, 1.674, 1.673, 1.673, 1.672, 1.672, 1.671, 1.671, 1.67, 1.67, 1.669, 1.669, 1.669, 1.668, 1.668, 1.668, 1.667, 1.667, 1.667, 1.666, 1.666, 1.666, 1.665, 1.665, 1.665, 1.665, 1.664, 1.664, 1.664, 1.664, 1.663, 1.663, 1.663, 1.663, 1.663, 1.662, 1.662, 1.662, 1.662, 1.662, 1.661, 1.661, 1.661, 1.661, 1.661, 1.661, 1.66, 1.66, 1.645}, // for significance level = 0.05 - {0.05, 12.706, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228, 2.201, 2.179, 2.16, 2.145, 2.131, 2.12, 2.11, 2.101, 2.093, 2.086, 2.08, 2.074, 2.069, 2.064, 2.06, 2.056, 2.052, 2.048, 2.045, 2.042, 2.04, 2.037, 2.035, 2.032, 2.03, 2.028, 2.026, 2.024, 2.023, 2.021, 2.02, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.01, 2.009, 2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.0, 2.0, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994, 1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.99, 1.99, 1.99, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987, 1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984, 1.96} + {0.05, 12.706, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228, 2.201, 2.179, 2.16, 2.145, 2.131, 2.12, 2.11, 2.101, 2.093, 2.086, 2.08, 2.074, 2.069, 2.064, 2.06, 2.056, 2.052, 2.048, 2.045, 2.042, 2.04, 2.037, 2.035, 2.032, 2.03, 2.028, 2.026, 2.024, 2.023, 2.021, 2.02, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.01, 2.009, 2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.0, 2.0, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994, 1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.99, 1.99, 1.99, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987, 1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984, 1.96}, // for significance level = 0.02 - {0.02, 31.821, 6.965, 4.541, 3.747, 3.365, 3.143, 2.998, 2.896, 2.821, 2.764, 2.718, 2.681, 2.65, 2.624, 2.602, 2.583, 2.567, 2.552, 2.539, 2.528, 2.518, 2.508, 2.5, 2.492, 2.485, 2.479, 2.473, 2.467, 2.462, 2.457, 2.453, 2.449, 2.445, 2.441, 2.438, 2.434, 2.431, 2.429, 2.426, 2.423, 2.421, 2.418, 2.416, 2.414, 2.412, 2.41, 2.408, 2.407, 2.405, 2.403, 2.402, 2.4, 2.399, 2.397, 2.396, 2.395, 2.394, 2.392, 2.391, 2.39, 2.389, 2.388, 2.387, 2.386, 2.385, 2.384, 2.383, 2.382, 2.382, 2.381, 2.38, 2.379, 2.379, 2.378, 2.377, 2.376, 2.376, 2.375, 2.374, 2.374, 2.373, 2.373, 2.372, 2.372, 2.371, 2.37, 2.37, 2.369, 2.369, 2.368, 2.368, 2.368, 2.367, 2.367, 2.366, 2.366, 2.365, 2.365, 2.365, 2.364, 2.326} + {0.02, 31.821, 6.965, 4.541, 3.747, 3.365, 3.143, 2.998, 2.896, 2.821, 2.764, 2.718, 2.681, 2.65, 2.624, 2.602, 2.583, 2.567, 2.552, 2.539, 2.528, 2.518, 2.508, 2.5, 2.492, 2.485, 2.479, 2.473, 2.467, 2.462, 2.457, 2.453, 2.449, 2.445, 2.441, 2.438, 2.434, 2.431, 2.429, 2.426, 2.423, 2.421, 2.418, 2.416, 2.414, 2.412, 2.41, 2.408, 2.407, 2.405, 2.403, 2.402, 2.4, 2.399, 2.397, 2.396, 2.395, 2.394, 2.392, 2.391, 2.39, 2.389, 2.388, 2.387, 2.386, 2.385, 2.384, 2.383, 2.382, 2.382, 2.381, 2.38, 2.379, 2.379, 2.378, 2.377, 2.376, 2.376, 2.375, 2.374, 2.374, 2.373, 2.373, 2.372, 2.372, 2.371, 2.37, 2.37, 2.369, 2.369, 2.368, 2.368, 2.368, 2.367, 2.367, 2.366, 2.366, 2.365, 2.365, 2.365, 2.364, 2.326}, // for significance level = 0.01 - {0.01, 63.657, 9.925, 5.841, 4.604, 4.032, 3.707, 3.499, 3.355, 3.25, 3.169, 3.106, 3.055, 3.012, 2.977, 2.947, 2.921, 2.898, 2.878, 2.861, 2.845, 2.831, 2.819, 2.807, 2.797, 2.787, 2.779, 2.771, 2.763, 2.756, 2.75, 2.744, 2.738, 2.733, 2.728, 2.724, 2.719, 2.715, 2.712, 2.708, 2.704, 2.701, 2.698, 2.695, 2.692, 2.69, 2.687, 2.685, 2.682, 2.68, 2.678, 2.676, 2.674, 2.672, 2.67, 2.668, 2.667, 2.665, 2.663, 2.662, 2.66, 2.659, 2.657, 2.656, 2.655, 2.654, 2.652, 2.651, 2.65, 2.649, 2.648, 2.647, 2.646, 2.645, 2.644, 2.643, 2.642, 2.641, 2.64, 2.64, 2.639, 2.638, 2.637, 2.636, 2.636, 2.635, 2.634, 2.634, 2.633, 2.632, 2.632, 2.631, 2.63, 2.63, 2.629, 2.629, 2.628, 2.627, 2.627, 2.626, 2.626, 2.576} + {0.01, 63.657, 9.925, 5.841, 4.604, 4.032, 3.707, 3.499, 3.355, 3.25, 3.169, 3.106, 3.055, 3.012, 2.977, 2.947, 2.921, 2.898, 2.878, 2.861, 2.845, 2.831, 2.819, 2.807, 2.797, 2.787, 2.779, 2.771, 2.763, 2.756, 2.75, 2.744, 2.738, 2.733, 2.728, 2.724, 2.719, 2.715, 2.712, 2.708, 2.704, 2.701, 2.698, 2.695, 2.692, 2.69, 2.687, 2.685, 2.682, 2.68, 2.678, 2.676, 2.674, 2.672, 2.67, 2.668, 2.667, 2.665, 2.663, 2.662, 2.66, 2.659, 2.657, 2.656, 2.655, 2.654, 2.652, 2.651, 2.65, 2.649, 2.648, 2.647, 2.646, 2.645, 2.644, 2.643, 2.642, 2.641, 2.64, 2.64, 2.639, 2.638, 2.637, 2.636, 2.636, 2.635, 2.634, 2.634, 2.633, 2.632, 2.632, 2.631, 2.63, 2.63, 2.629, 2.629, 2.628, 2.627, 2.627, 2.626, 2.626, 2.576}, // for significance level = 0.002 {0.002, 318.313, 22.327, 10.215, 7.173, 5.893, 5.208, 4.782, 4.499, 4.296, 4.143, 4.024, 3.929, 3.852, 3.787, 3.733, 3.686, 3.646, 3.61, 3.579, 3.552, 3.527, 3.505, 3.485, 3.467, 3.45, 3.435, 3.421, 3.408, 3.396, 3.385, 3.375, 3.365, 3.356, 3.348, 3.34, 3.333, 3.326, 3.319, 3.313, 3.307, 3.301, 3.296, 3.291, 3.286, 3.281, 3.277, 3.273, 3.269, 3.265, 3.261, 3.258, 3.255, 3.251, 3.248, 3.245, 3.242, 3.239, 3.237, 3.234, 3.232, 3.229, 3.227, 3.225, 3.223, 3.22, 3.218, 3.216, 3.214, 3.213, 3.211, 3.209, 3.207, 3.206, 3.204, 3.202, 3.201, 3.199, 3.198, 3.197, 3.195, 3.194, 3.193, 3.191, 3.19, 3.189, 3.188, 3.187, 3.185, 3.184, 3.183, 3.182, 3.181, 3.18, 3.179, 3.178, 3.177, 3.176, 3.175, 3.175, 3.174, 3.09} @@ -145,19 +149,19 @@ struct AggregateFunctionWelchTTestData final { } //round or make infinity dof - dof = (int) dof; + dof = static_cast(dof); if (dof > 100) { dof = 101; } //check if abs of t is greater than table[dof] - t = abs(t) + t = abs(t); if(t > CriticalValuesTable[table][dof]) { - return (UInt8) 1; + return static_cast(1); //in this case we reject the null hypothesis } else { - return (UInt8) 0; + return static_cast(0); } } }; From 62460faf97e2a10d3245d7b11d139240fcafa84b Mon Sep 17 00:00:00 2001 From: antikvist Date: Mon, 27 Apr 2020 01:59:41 +0300 Subject: [PATCH 003/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 46b533a2c0b..a4e0a54775b 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -9,7 +9,7 @@ namespace DB namespace { -AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) +AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argument_types, const Array & parameters) { diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index be1e176d540..0279849be2c 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -41,7 +41,7 @@ Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // for significance level = 0.002 {0.002, 318.313, 22.327, 10.215, 7.173, 5.893, 5.208, 4.782, 4.499, 4.296, 4.143, 4.024, 3.929, 3.852, 3.787, 3.733, 3.686, 3.646, 3.61, 3.579, 3.552, 3.527, 3.505, 3.485, 3.467, 3.45, 3.435, 3.421, 3.408, 3.396, 3.385, 3.375, 3.365, 3.356, 3.348, 3.34, 3.333, 3.326, 3.319, 3.313, 3.307, 3.301, 3.296, 3.291, 3.286, 3.281, 3.277, 3.273, 3.269, 3.265, 3.261, 3.258, 3.255, 3.251, 3.248, 3.245, 3.242, 3.239, 3.237, 3.234, 3.232, 3.229, 3.227, 3.225, 3.223, 3.22, 3.218, 3.216, 3.214, 3.213, 3.211, 3.209, 3.207, 3.206, 3.204, 3.202, 3.201, 3.199, 3.198, 3.197, 3.195, 3.194, 3.193, 3.191, 3.19, 3.189, 3.188, 3.187, 3.185, 3.184, 3.183, 3.182, 3.181, 3.18, 3.179, 3.178, 3.177, 3.176, 3.175, 3.175, 3.174, 3.09} -} +}; // our algorithm implementation via vectors: // https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 From dcfb99b9877528fe09db02a1afff6346de2c8e57 Mon Sep 17 00:00:00 2001 From: antikvist Date: Mon, 27 Apr 2020 19:16:02 +0300 Subject: [PATCH 004/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionWelchTTest.h | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 0279849be2c..ecc397cf731 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -17,6 +17,11 @@ #include +#include + +#include + + namespace DB { // hard-codded values - part of the algorithm @@ -80,8 +85,8 @@ struct AggregateFunctionWelchTTestData final { sum_y += y; size_x++; size_y++; - mean_x = (Float64) sum_x / size_x; - mean_y = (Float64) sum_y / size_y; + mean_x = static_cast(sum_x) / size_x; + mean_y = static_cast(sum_y) / size_y; square_sum_x += x * x; square_sum_y += y * y; } @@ -91,8 +96,8 @@ struct AggregateFunctionWelchTTestData final { sum_y += other.sum_y; size_x += other.size_x; size_y += other.size_y; - mean_x = (Float64) sum_x / size_x; - mean_y = (Float64) sum_y / size_y; + mean_x = static_cast(sum_x) / size_x; + mean_y = static_cast(sum_y) / size_y; square_sum_x += other.square_sum_x; square_sum_y += other.square_sum_y; } @@ -120,19 +125,19 @@ struct AggregateFunctionWelchTTestData final { } Float64 get_sx() const { - return (Float64)(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); + return static_cast(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); } Float64 get_sy() const { - return (Float64)(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); + return static_cast(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); } Float64 get_T(Float64 sx, Float64 sy) const { - return (Float64)(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); + return static_cast(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); } Float64 get_degrees_of_freed(Float64 sx, Float64 sy) const { - return (Float64)(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / + return static_cast(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } From 7cfe5ef42b80a45243e70d3b73cf459054e46890 Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 1 May 2020 00:36:37 +0300 Subject: [PATCH 005/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index ecc397cf731..e716553e065 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -144,7 +144,7 @@ struct AggregateFunctionWelchTTestData final { Ret get_result(Float64 t, Float64 dof, Float64 parametr) const { //find our table - int table; + int table = 0; for (int i = 0; i < SIGN_LVL_CNT; ++i) { if (CriticalValuesTable[i][0] == parametr) @@ -249,12 +249,11 @@ public: Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); Ret result = this->data(place).get_result(t_value, dof, significance_level); - auto & column = static_cast(to); + //check the type + auto & column = static_cast &>(to); column.getData().push_back(result); } -} }; - -}; \ No newline at end of file +}; From 4f56cc32ae388c5dc002afe071b0b24a9d1adafb Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 6 May 2020 00:48:05 +0300 Subject: [PATCH 006/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 6 +++--- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index a4e0a54775b..c4a0c6d4e2b 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -9,11 +9,11 @@ namespace DB namespace { +template AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argument_types, const Array & parameters) { - - - return std::make_shared(argument_types, parameters); + Float64 significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); + return std::make_shared>(argument_types, parameters); } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index e716553e065..a1416f9ab41 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -154,14 +154,14 @@ struct AggregateFunctionWelchTTestData final { } //round or make infinity dof - dof = static_cast(dof); - if (dof > 100) + i_dof = static_cast(dof); + if (i_dof > 100) { - dof = 101; + i_dof = 101; } //check if abs of t is greater than table[dof] t = abs(t); - if(t > CriticalValuesTable[table][dof]) { + if(t > CriticalValuesTable[table][i_dof]) { return static_cast(1); //in this case we reject the null hypothesis } From f0ac5b441f7dccbdfe31512c90ae02da1d9921fa Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 6 May 2020 02:07:51 +0300 Subject: [PATCH 007/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 1 - src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index c4a0c6d4e2b..2c8d1e0aed8 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -12,7 +12,6 @@ namespace template AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argument_types, const Array & parameters) { - Float64 significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); return std::make_shared>(argument_types, parameters); } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index a1416f9ab41..9445ccc506d 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -154,7 +154,7 @@ struct AggregateFunctionWelchTTestData final { } //round or make infinity dof - i_dof = static_cast(dof); + int i_dof = static_cast(dof); if (i_dof > 100) { i_dof = 101; From 3dde788146bcef2697cd2701b60d82f603d9c887 Mon Sep 17 00:00:00 2001 From: antikvist Date: Thu, 7 May 2020 14:17:58 +0300 Subject: [PATCH 008/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 9445ccc506d..5203ba1f988 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -186,7 +186,7 @@ public: IAggregateFunctionDataHelper< AggregateFunctionWelchTTestData, AggregateFunctionWelchTTest - > {arguments, params} + > ({argument}, params) { // notice: arguments has been in factory } From 92afa6c0decda463505263f2956005c58e831862 Mon Sep 17 00:00:00 2001 From: antikvist Date: Thu, 7 May 2020 17:14:29 +0300 Subject: [PATCH 009/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 5203ba1f988..22dda8ea244 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -186,7 +186,7 @@ public: IAggregateFunctionDataHelper< AggregateFunctionWelchTTestData, AggregateFunctionWelchTTest - > ({argument}, params) + > ({arguments}, params) { // notice: arguments has been in factory } From 7975d8d5b0c9c4534f108344deb1fdd279cf1eaf Mon Sep 17 00:00:00 2001 From: antikvist Date: Thu, 7 May 2020 23:11:25 +0300 Subject: [PATCH 010/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionCount.cpp | 2 +- .../AggregateFunctionWelchTTest.h | 47 +++++++++++-------- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionCount.cpp b/src/AggregateFunctions/AggregateFunctionCount.cpp index 6c22fec87a2..7ede78e720f 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.cpp +++ b/src/AggregateFunctions/AggregateFunctionCount.cpp @@ -22,7 +22,7 @@ AggregateFunctionPtr createAggregateFunctionCount(const std::string & name, cons void registerAggregateFunctionCount(AggregateFunctionFactory & factory) { - factory.registerFunction("count", createAggregateFunctionCount, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("count", createAggregateFunctionCount); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 22dda8ea244..e2e720a12ef 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -12,14 +14,8 @@ #include #include -#include -#include -#include - - #include -#include namespace DB @@ -52,7 +48,8 @@ Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 // col, col, bool template -struct AggregateFunctionWelchTTestData final { +struct AggregateFunctionWelchTTestData final +{ size_t size_x = 0; size_t size_y = 0; @@ -65,14 +62,16 @@ struct AggregateFunctionWelchTTestData final { /* not yet sure how to use them - void add_x(X x) { + void add_x(X x) + { mean_x = (Float64)(sum_x + x) / (size_x + 1); size_x ++; sum_x += x; square_sum_x += x * x; } - void add_y(Y y) { + void add_y(Y y) + { mean_y = (sum_y + y) / (size_y + 1); size_y ++; sum_y += y; @@ -80,7 +79,8 @@ struct AggregateFunctionWelchTTestData final { } */ - void add(X x, Y y) { + void add(X x, Y y) + { sum_x += x; sum_y += y; size_x++; @@ -91,7 +91,8 @@ struct AggregateFunctionWelchTTestData final { square_sum_y += y * y; } - void merge(const AggregateFunctionWelchTTestData &other) { + void merge(const AggregateFunctionWelchTTestData &other) + { sum_x += other.sum_x; sum_y += other.sum_y; size_x += other.size_x; @@ -102,7 +103,8 @@ struct AggregateFunctionWelchTTestData final { square_sum_y += other.square_sum_y; } - void serialize(WriteBuffer &buf) const { + void serialize(WriteBuffer &buf) const + { writeBinary(mean_x, buf); writeBinary(mean_y, buf); writeBinary(sum_x, buf); @@ -113,7 +115,8 @@ struct AggregateFunctionWelchTTestData final { writeBinary(size_y, buf); } - void deserialize(ReadBuffer &buf) { + void deserialize(ReadBuffer &buf) + { readBinary(mean_x, buf); readBinary(mean_y, buf); readBinary(sum_x, buf); @@ -124,19 +127,23 @@ struct AggregateFunctionWelchTTestData final { readBinary(size_y, buf); } - Float64 get_sx() const { + Float64 get_sx() const + { return static_cast(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); } - Float64 get_sy() const { + Float64 get_sy() const + { return static_cast(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); } - Float64 get_T(Float64 sx, Float64 sy) const { + Float64 get_T(Float64 sx, Float64 sy) const + { return static_cast(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); } - Float64 get_degrees_of_freed(Float64 sx, Float64 sy) const { + Float64 get_degrees_of_freed(Float64 sx, Float64 sy) const + { return static_cast(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } @@ -161,11 +168,13 @@ struct AggregateFunctionWelchTTestData final { } //check if abs of t is greater than table[dof] t = abs(t); - if(t > CriticalValuesTable[table][i_dof]) { + if(t > CriticalValuesTable[table][i_dof]) + { return static_cast(1); //in this case we reject the null hypothesis } - else { + else + { return static_cast(0); } } From b390043f31fe9a4023e9a00b248560155f114b72 Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 8 May 2020 00:39:51 +0300 Subject: [PATCH 011/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionWelchTTest.cpp | 12 ++++++++++-- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 12 +++++++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 2c8d1e0aed8..c8f2a46b1e4 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -12,7 +12,15 @@ namespace template AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argument_types, const Array & parameters) { - return std::make_shared>(argument_types, parameters); + // default value + Float64 significance_level = 0.1; + if (!params.empty()) + { + significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); + } + + + return std::make_shared>(significance_level, argument_types, parameters); } @@ -21,7 +29,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argumen void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index e2e720a12ef..367970fa4e0 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -168,7 +168,7 @@ struct AggregateFunctionWelchTTestData final } //check if abs of t is greater than table[dof] t = abs(t); - if(t > CriticalValuesTable[table][i_dof]) + if (t > CriticalValuesTable[table][i_dof]) { return static_cast(1); //in this case we reject the null hypothesis @@ -187,15 +187,22 @@ class AggregateFunctionWelchTTest final : public AggregateFunctionWelchTTest > { + + +private: + Float64 significance_level; + + public: AggregateFunctionWelchTTest( + Float64 sglvl_, const DataTypes & arguments, const Array & params ): IAggregateFunctionDataHelper< AggregateFunctionWelchTTestData, AggregateFunctionWelchTTest - > ({arguments}, params) + > ({arguments}, params), significance_level(sglvl_) { // notice: arguments has been in factory } @@ -250,7 +257,6 @@ public: IColumn & to ) const override { - Float64 significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); Float64 sx = this->data(place).get_sx(); Float64 sy = this->data(place).get_sy(); From ab7d1fb86fe2645bb7d3ed43cb79b41517f64b08 Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 8 May 2020 00:44:31 +0300 Subject: [PATCH 012/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 367970fa4e0..4b199e799f5 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -264,7 +264,7 @@ public: Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); Ret result = this->data(place).get_result(t_value, dof, significance_level); - //check the type + auto & column = static_cast &>(to); column.getData().push_back(result); } From 4b4ff06cab3642cd061f80a8d0eddb74ff0d79db Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 8 May 2020 02:22:12 +0300 Subject: [PATCH 013/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 5 +++-- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index c8f2a46b1e4..151071091e4 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -1,6 +1,7 @@ #include #include #include +#include "registerAggregateFunctions.h" namespace DB @@ -14,9 +15,9 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argumen { // default value Float64 significance_level = 0.1; - if (!params.empty()) + if (!parameters.empty()) { - significance_level = applyVisitor(FieldVisitorConvertToNumber(), params[0]); + significance_level = applyVisitor(FieldVisitorConvertToNumber(), parameters[0]); } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 4b199e799f5..855b3fcd917 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -17,7 +17,6 @@ #include - namespace DB { // hard-codded values - part of the algorithm @@ -269,6 +268,5 @@ public: column.getData().push_back(result); } - }; }; From df4c312e141f938099ab5e9d00d17f40c442389d Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 8 May 2020 13:17:59 +0300 Subject: [PATCH 014/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 151071091e4..bc759b9f8f1 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -30,7 +30,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argumen void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); } } From 6d6f0b00ba532c3bb637d5a8f6d2dcaf845f257e Mon Sep 17 00:00:00 2001 From: antikvist Date: Sat, 9 May 2020 01:55:09 +0300 Subject: [PATCH 015/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- .../AggregateFunctionWelchTTest.cpp | 25 ++++++++--- .../AggregateFunctionWelchTTest.h | 43 +++++++++++-------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index bc759b9f8f1..1634d0149da 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -3,26 +3,39 @@ #include #include "registerAggregateFunctions.h" +#include +#include +#include + namespace DB { namespace { +//template +static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) +{ + return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); +} -template -AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argument_types, const Array & parameters) +//template +AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, + const DataTypes & argument_types, + const Array & parameters) { // default value Float64 significance_level = 0.1; + if (parameters.size() > 1) + throw Exception("Aggregate function " + name + " requires two parameters or less.", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); if (!parameters.empty()) { significance_level = applyVisitor(FieldVisitorConvertToNumber(), parameters[0]); } - - return std::make_shared>(significance_level, argument_types, parameters); - + AggregateFunctionPtr res (createWithExtraTypes(significance_level, argument_types, parameters)); + return res; } } @@ -30,7 +43,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const DataTypes & argumen void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 855b3fcd917..bbf02200745 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -46,16 +46,16 @@ Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // our algorithm implementation via vectors: // https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 // col, col, bool -template +//template struct AggregateFunctionWelchTTestData final { size_t size_x = 0; size_t size_y = 0; - X sum_x = 0; - Y sum_y = 0; - X square_sum_x = 0; - Y square_sum_y = 0; + Float64 sum_x = static_cast(0); + Float64 sum_y = static_cast(0); + Float64 square_sum_x = static_cast(0); + Float64 square_sum_y = static_cast(0); Float64 mean_x = 0; Float64 mean_y = 0; @@ -78,7 +78,7 @@ struct AggregateFunctionWelchTTestData final } */ - void add(X x, Y y) + void add(Float64 x, Float64 y) { sum_x += x; sum_y += y; @@ -147,7 +147,7 @@ struct AggregateFunctionWelchTTestData final ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } - Ret get_result(Float64 t, Float64 dof, Float64 parametr) const + UInt8 get_result(Float64 t, Float64 dof, Float64 parametr) const { //find our table int table = 0; @@ -179,11 +179,11 @@ struct AggregateFunctionWelchTTestData final } }; -template -class AggregateFunctionWelchTTest final : public +//template +class AggregateFunctionWelchTTest : public IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest > { @@ -199,8 +199,8 @@ public: const Array & params ): IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest > ({arguments}, params), significance_level(sglvl_) { // notice: arguments has been in factory @@ -211,6 +211,11 @@ public: return "WelchTTest"; } + DataTypePtr getReturnType() const override + { + return std::make_shared(); + } + void add( AggregateDataPtr place, const IColumn ** columns, @@ -218,11 +223,11 @@ public: Arena * ) const override { - auto col_x = assert_cast *>(columns[0]); - auto col_y = assert_cast *>(columns[1]); + auto col_x = assert_cast *>(columns[0]); + auto col_y = assert_cast *>(columns[1]); - X x = col_x->getData()[row_num]; - Y y = col_y->getData()[row_num]; + Float64 x = col_x->getData()[row_num]; + Float64 y = col_y->getData()[row_num]; this->data(place).add(x, y); } @@ -261,10 +266,10 @@ public: Float64 sy = this->data(place).get_sy(); Float64 t_value = this->data(place).get_T(sx, sy); Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); - Ret result = this->data(place).get_result(t_value, dof, significance_level); + UInt8 result = this->data(place).get_result(t_value, dof, significance_level); - auto & column = static_cast &>(to); + auto & column = static_cast &>(to); column.getData().push_back(result); } From f6e6c48d9c477f92f7669996e4742d1125061ea5 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sun, 17 May 2020 12:25:41 +0300 Subject: [PATCH 016/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionCount.cpp | 2 +- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionCount.cpp b/src/AggregateFunctions/AggregateFunctionCount.cpp index 7ede78e720f..6c22fec87a2 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.cpp +++ b/src/AggregateFunctions/AggregateFunctionCount.cpp @@ -22,7 +22,7 @@ AggregateFunctionPtr createAggregateFunctionCount(const std::string & name, cons void registerAggregateFunctionCount(AggregateFunctionFactory & factory) { - factory.registerFunction("count", createAggregateFunctionCount); + factory.registerFunction("count", createAggregateFunctionCount, AggregateFunctionFactory::CaseInsensitive); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 1634d0149da..90b1c445a14 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -14,7 +14,7 @@ namespace DB namespace { //template -static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) +IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) { return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); } From 6069750b0e6bf95ab180d033c0fe376102d540fd Mon Sep 17 00:00:00 2001 From: antikvist Date: Sun, 17 May 2020 14:15:49 +0300 Subject: [PATCH 017/432] #WelchTTest aggregate function implementation What's new: -Main classes for aggreagate function added. -Data class with needed mathods added. -Registered function in registerAggregateFunctions.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 90b1c445a14..1634d0149da 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -14,7 +14,7 @@ namespace DB namespace { //template -IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) +static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) { return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); } From 1903e6cec227f216159b23656213587beaea4981 Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 12 Jun 2020 02:43:55 +0300 Subject: [PATCH 018/432] pray to ClickHouse gods --- .../AggregateFunctionWelchTTest.cpp | 21 +++++--- .../AggregateFunctionWelchTTest.h | 50 +++++++++---------- 2 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 1634d0149da..3a2bc831cc1 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -4,30 +4,34 @@ #include "registerAggregateFunctions.h" #include -#include #include +namespace ErrorCodes +{ +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + namespace DB { namespace { -//template +template static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) { return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); } -//template +template AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, - const DataTypes & argument_types, - const Array & parameters) + const DataTypes & argument_types, + const Array & parameters) { // default value Float64 significance_level = 0.1; if (parameters.size() > 1) - throw Exception("Aggregate function " + name + " requires two parameters or less.", + throw Exception("Aggregate function " + name + " requires one parameter or less.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); if (!parameters.empty()) { @@ -40,10 +44,11 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, } +template void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); } -} +} \ No newline at end of file diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index bbf02200745..d625cc908ec 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -46,16 +46,17 @@ Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // our algorithm implementation via vectors: // https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 // col, col, bool -//template +template +//template struct AggregateFunctionWelchTTestData final { size_t size_x = 0; size_t size_y = 0; - Float64 sum_x = static_cast(0); - Float64 sum_y = static_cast(0); - Float64 square_sum_x = static_cast(0); - Float64 square_sum_y = static_cast(0); + X sum_x = static_cast(0); + Y sum_y = static_cast(0); + X square_sum_x = static_cast(0); + Y square_sum_y = static_cast(0); Float64 mean_x = 0; Float64 mean_y = 0; @@ -78,7 +79,7 @@ struct AggregateFunctionWelchTTestData final } */ - void add(Float64 x, Float64 y) + void add(X x, Y y) { sum_x += x; sum_y += y; @@ -147,7 +148,7 @@ struct AggregateFunctionWelchTTestData final ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } - UInt8 get_result(Float64 t, Float64 dof, Float64 parametr) const + Ret get_result(Float64 t, Float64 dof, Float64 parametr) const { //find our table int table = 0; @@ -169,29 +170,27 @@ struct AggregateFunctionWelchTTestData final t = abs(t); if (t > CriticalValuesTable[table][i_dof]) { - return static_cast(1); + return static_cast(1); //in this case we reject the null hypothesis } else { - return static_cast(0); + return static_cast(0); } } }; -//template +template class AggregateFunctionWelchTTest : public - IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest - > + IAggregateFunctionDataHelper< + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest + > { - private: Float64 significance_level; - public: AggregateFunctionWelchTTest( Float64 sglvl_, @@ -213,7 +212,7 @@ public: DataTypePtr getReturnType() const override { - return std::make_shared(); + return std::make_shared>(); } void add( @@ -223,11 +222,11 @@ public: Arena * ) const override { - auto col_x = assert_cast *>(columns[0]); - auto col_y = assert_cast *>(columns[1]); + auto col_x = assert_cast *>(columns[0]); + auto col_y = assert_cast *>(columns[1]); - Float64 x = col_x->getData()[row_num]; - Float64 y = col_y->getData()[row_num]; + X x = col_x->getData()[row_num]; + Y y = col_y->getData()[row_num]; this->data(place).add(x, y); } @@ -257,7 +256,7 @@ public: } void insertResultInto( - ConstAggregateDataPtr place, + AggregateDataPtr place, IColumn & to ) const override { @@ -266,12 +265,11 @@ public: Float64 sy = this->data(place).get_sy(); Float64 t_value = this->data(place).get_T(sx, sy); Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); - UInt8 result = this->data(place).get_result(t_value, dof, significance_level); + Ret result = this->data(place).get_result(t_value, dof, significance_level); - - auto & column = static_cast &>(to); + auto & column = static_cast &>(to); column.getData().push_back(result); } }; -}; +}; \ No newline at end of file From bbfccd491ece148818b5d0b09ffdd6abaedae908 Mon Sep 17 00:00:00 2001 From: antikvist Date: Fri, 12 Jun 2020 16:51:33 +0300 Subject: [PATCH 019/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 3a2bc831cc1..853a1182340 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -20,7 +20,7 @@ namespace template static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) { - return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); + return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); } template diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index d625cc908ec..bbd02d844c4 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -198,8 +198,8 @@ public: const Array & params ): IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest > ({arguments}, params), significance_level(sglvl_) { // notice: arguments has been in factory From 13faa3d83f8e50a2e1ea1c23b0026a11c9d72163 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sat, 13 Jun 2020 19:23:17 +0300 Subject: [PATCH 020/432] welch t-test --- .../AggregateFunctionWelchTTest.cpp | 26 ++++++++++------- .../AggregateFunctionWelchTTest.h | 28 +++++++++---------- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 853a1182340..9f451fd5d88 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -7,6 +7,8 @@ #include +// the return type is boolean (we use UInt8 as we do not have boolean in clickhouse) + namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; @@ -17,13 +19,11 @@ namespace DB namespace { -template -static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) -{ - return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); -} +//static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) +//{ +// return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); +//} -template AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) @@ -38,17 +38,23 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, significance_level = applyVisitor(FieldVisitorConvertToNumber(), parameters[0]); } - AggregateFunctionPtr res (createWithExtraTypes(significance_level, argument_types, parameters)); + AggregateFunctionPtr res; + DataTypePtr data_type = argument_types[0]; +// if (isDecimal(data_type)) +// res.reset(createWithDecimalType(*data_type, significance_level, argument_types, parameters)); +// else + res.reset(createWithNumericType(*data_type, significance_level, argument_types, parameters)); + + //AggregateFunctionPtr res (createWithExtraTypes(significance_level, argument_types, parameters)); return res; } } -template + void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); } } \ No newline at end of file diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index bbd02d844c4..13b9c992162 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -46,8 +46,8 @@ Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { // our algorithm implementation via vectors: // https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 // col, col, bool -template -//template +template +//template struct AggregateFunctionWelchTTestData final { @@ -148,7 +148,7 @@ struct AggregateFunctionWelchTTestData final ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } - Ret get_result(Float64 t, Float64 dof, Float64 parametr) const + UInt8 get_result(Float64 t, Float64 dof, Float64 parametr) const { //find our table int table = 0; @@ -170,21 +170,21 @@ struct AggregateFunctionWelchTTestData final t = abs(t); if (t > CriticalValuesTable[table][i_dof]) { - return static_cast(1); + return static_cast(1); //in this case we reject the null hypothesis } else { - return static_cast(0); + return static_cast(0); } } }; -template +template class AggregateFunctionWelchTTest : public IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest > { @@ -198,8 +198,8 @@ public: const Array & params ): IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest + AggregateFunctionWelchTTestData, + AggregateFunctionWelchTTest > ({arguments}, params), significance_level(sglvl_) { // notice: arguments has been in factory @@ -212,7 +212,7 @@ public: DataTypePtr getReturnType() const override { - return std::make_shared>(); + return std::make_shared>(); } void add( @@ -265,11 +265,11 @@ public: Float64 sy = this->data(place).get_sy(); Float64 t_value = this->data(place).get_T(sx, sy); Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); - Ret result = this->data(place).get_result(t_value, dof, significance_level); + UInt8 result = this->data(place).get_result(t_value, dof, significance_level); - auto & column = static_cast &>(to); + auto & column = static_cast &>(to); column.getData().push_back(result); } }; -}; \ No newline at end of file +}; \ No newline at end of file From cb8eec8def65f817773f623e57d8e02518d3c2bd Mon Sep 17 00:00:00 2001 From: antikvist Date: Sun, 14 Jun 2020 00:55:01 +0300 Subject: [PATCH 021/432] welch t-test --- .../AggregateFunctionWelchTTest.cpp | 24 +++++++++++-------- .../AggregateFunctionWelchTTest.h | 2 +- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 9f451fd5d88..8d2963aba74 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -12,6 +12,7 @@ namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int NOT_IMPLEMENTED; } namespace DB @@ -31,21 +32,24 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, // default value Float64 significance_level = 0.1; if (parameters.size() > 1) - throw Exception("Aggregate function " + name + " requires one parameter or less.", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + { + throw Exception("Aggregate function " + name + " requires one parameter or less.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + if (!parameters.empty()) { significance_level = applyVisitor(FieldVisitorConvertToNumber(), parameters[0]); } AggregateFunctionPtr res; - DataTypePtr data_type = argument_types[0]; -// if (isDecimal(data_type)) -// res.reset(createWithDecimalType(*data_type, significance_level, argument_types, parameters)); -// else - res.reset(createWithNumericType(*data_type, significance_level, argument_types, parameters)); - //AggregateFunctionPtr res (createWithExtraTypes(significance_level, argument_types, parameters)); + if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) + { + throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); + } + + res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, argument_types, parameters)); + return res; } @@ -54,7 +58,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest); + factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); } -} \ No newline at end of file +} diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 13b9c992162..29f8e17b6be 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -272,4 +272,4 @@ public: } }; -}; \ No newline at end of file +}; From 9638eb7490bd8cfad234353d791115cd9988cbf6 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sun, 14 Jun 2020 19:18:04 +0300 Subject: [PATCH 022/432] welch t-test --- .../AggregateFunctionWelchTTest.cpp | 15 ++++++--------- .../AggregateFunctionWelchTTest.h | 12 ++++++------ 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 8d2963aba74..c7349f28d90 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -20,10 +20,6 @@ namespace DB namespace { -//static IAggregateFunction * createWithExtraTypes(Float64 significance_level, const DataTypes & argument_types, const Array & parameters) -//{ -// return new AggregateFunctionWelchTTest(significance_level, argument_types, parameters); -//} AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, @@ -43,12 +39,13 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, AggregateFunctionPtr res; - if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) - { - throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); - } +// if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) +// { +// throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); +// } - res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, argument_types, parameters)); + res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, + argument_types, parameters)); return res; } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 29f8e17b6be..210b8990693 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -53,12 +53,12 @@ struct AggregateFunctionWelchTTestData final size_t size_x = 0; size_t size_y = 0; - X sum_x = static_cast(0); - Y sum_y = static_cast(0); - X square_sum_x = static_cast(0); - Y square_sum_y = static_cast(0); - Float64 mean_x = 0; - Float64 mean_y = 0; + X sum_x = static_cast(0); + Y sum_y = static_cast(0); + X square_sum_x = static_cast(0); + Y square_sum_y = static_cast(0); + Float64 mean_x = static_cast(0); + Float64 mean_y = static_cast(0); /* not yet sure how to use them From 3b30ea2f373d841f936b6b2bd5be25e335b82d70 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sun, 14 Jun 2020 23:00:02 +0300 Subject: [PATCH 023/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index c7349f28d90..e7ba204046e 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -25,6 +25,8 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) { + assertBinary(name, argument_types); + // default value Float64 significance_level = 0.1; if (parameters.size() > 1) @@ -39,10 +41,10 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, AggregateFunctionPtr res; -// if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) -// { -// throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); -// } + if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) + { + throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); + } res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, argument_types, parameters)); From 2f073ab3f785fc267715940d685a1b7743c588f4 Mon Sep 17 00:00:00 2001 From: antikvist Date: Mon, 15 Jun 2020 21:18:22 +0300 Subject: [PATCH 024/432] welch t-test --- .../AggregateFunctionWelchTTest.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index e7ba204046e..b1c8f73d2e9 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -43,11 +43,17 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) { - throw Exception("Aggregate function " + name + " does not support decimal types.", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); + } + + else{ + res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, + argument_types, parameters)); } - res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, - argument_types, parameters)); + if(!res){ + throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); + } return res; } From ccce2537f686152e09c7143517707e5dcec8fe95 Mon Sep 17 00:00:00 2001 From: antikvist Date: Mon, 15 Jun 2020 21:21:48 +0300 Subject: [PATCH 025/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index b1c8f73d2e9..28ad414146e 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -45,12 +45,13 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, { throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); } - + else{ res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, argument_types, parameters)); } + if(!res){ throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); } From 965bf4bd6535676f99b667b8630fcdce7f4f9e5b Mon Sep 17 00:00:00 2001 From: antikvist Date: Tue, 16 Jun 2020 01:38:35 +0300 Subject: [PATCH 026/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 28ad414146e..3d2e98e2a0e 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -46,13 +46,15 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); } - else{ + else + { res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, argument_types, parameters)); } - if(!res){ + if (!res) + { throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); } From 51ff2f4e92d163593c192437e98a5a03bf67011e Mon Sep 17 00:00:00 2001 From: antikvist Date: Tue, 16 Jun 2020 09:14:54 +0300 Subject: [PATCH 027/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 210b8990693..897c583d913 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -272,4 +272,5 @@ public: } }; + }; From a7f8d6b380f290f25725fef6ab97c10dba0c37b1 Mon Sep 17 00:00:00 2001 From: antikvist Date: Tue, 16 Jun 2020 12:45:46 +0300 Subject: [PATCH 028/432] welch t-test --- src/AggregateFunctions/ya.make | 1 + 1 file changed, 1 insertion(+) diff --git a/src/AggregateFunctions/ya.make b/src/AggregateFunctions/ya.make index bfa32b6dd78..edae91cc745 100644 --- a/src/AggregateFunctions/ya.make +++ b/src/AggregateFunctions/ya.make @@ -49,6 +49,7 @@ SRCS( registerAggregateFunctions.cpp UniqCombinedBiasData.cpp UniqVariadicHash.cpp + AggregateFunctionWelchTTest.cpp ) END() From 21c5ecb597f3c3e4e65cbf4d48b3e604f9428931 Mon Sep 17 00:00:00 2001 From: antikvist Date: Tue, 16 Jun 2020 22:58:06 +0300 Subject: [PATCH 029/432] welch t-test --- .../AggregateFunctionWelchTTest.h | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 897c583d913..266da9fde5b 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -16,6 +16,10 @@ #include +namespace ErrorCodes +{ +extern const int BAD_ARGUMENTS; +} namespace DB { @@ -127,6 +131,16 @@ struct AggregateFunctionWelchTTestData final readBinary(size_y, buf); } + size_t get_size_y() const + { + return size_y; + } + + size_t get_size_x() const + { + return size_x; + } + Float64 get_sx() const { return static_cast(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); @@ -162,10 +176,17 @@ struct AggregateFunctionWelchTTestData final //round or make infinity dof int i_dof = static_cast(dof); + if (i_dof > 100) { i_dof = 101; } + + if(i_dof < 100) + { + i_dof = 1; + } + //check if abs of t is greater than table[dof] t = abs(t); if (t > CriticalValuesTable[table][i_dof]) @@ -260,6 +281,13 @@ public: IColumn & to ) const override { + size_t size_x = this->data(place).get_size_x(); + size_t size_y = this->data(place).get_size_y(); + + if(size_x < 2 || size_y < 2) + { + throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); + } Float64 sx = this->data(place).get_sx(); Float64 sy = this->data(place).get_sy(); From d0f92b5492b635881b72f32d2b4462b4f44b907c Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 17 Jun 2020 18:43:22 +0300 Subject: [PATCH 030/432] welch t-test --- .../AggregateFunctionWelchTTest.h | 5 +++-- .../0_stateless/01319_welch_ttest.reference | 3 +++ tests/queries/0_stateless/01319_welch_ttest.sql | 17 +++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/01319_welch_ttest.reference create mode 100644 tests/queries/0_stateless/01319_welch_ttest.sql diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 266da9fde5b..e2ae1761c51 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -189,7 +190,7 @@ struct AggregateFunctionWelchTTestData final //check if abs of t is greater than table[dof] t = abs(t); - if (t > CriticalValuesTable[table][i_dof]) + if (t >= CriticalValuesTable[table][i_dof]) { return static_cast(1); //in this case we reject the null hypothesis @@ -284,7 +285,7 @@ public: size_t size_x = this->data(place).get_size_x(); size_t size_y = this->data(place).get_size_y(); - if(size_x < 2 || size_y < 2) + if( size_x < 2 || size_y < 2) { throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } diff --git a/tests/queries/0_stateless/01319_welch_ttest.reference b/tests/queries/0_stateless/01319_welch_ttest.reference new file mode 100644 index 00000000000..e22493782f0 --- /dev/null +++ b/tests/queries/0_stateless/01319_welch_ttest.reference @@ -0,0 +1,3 @@ +1 +0 +0 diff --git a/tests/queries/0_stateless/01319_welch_ttest.sql b/tests/queries/0_stateless/01319_welch_ttest.sql new file mode 100644 index 00000000000..ea103cc433b --- /dev/null +++ b/tests/queries/0_stateless/01319_welch_ttest.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS welch_ttest; +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO welch_ttest VALUES (2224.779, 2465.0984), (2588.11, 1909.0328), (1979.625, 1175.8747), (2137.442, 2171.378), (2565.818, 2193.2821), (1754.023, 2854.9475), (1654.947, 2060.1777), (1789.256, 2258.2366), (2320.659, 1856.0535), (2039.532, 1501.8126), (1983.497, 2987.6542), (2232.903, 1681.9778), (2513.93, 2479.6776), (2066.382, 1259.8584), (2492.715, 1120.9043), (1988.287, 1982.1213), (1840.036, 3012.3949), (2249.749, 2252.373), (1766.982, 2591.3122), (1724.84, 1940.589), (0, 1995.185), (0, 2535.1344), (0, 597.3155), (0, 2343.2192), (0, 3154.84), (0, 1125.1966), (0, 1227.8842), (0, 1692.805), (0, 2539.6772), (0, 1936.1927), (0, 1783.7795), (0, 1703.4384), (0, 2077.194), (0, 1614.4071), (0, 2360.0365), (0, 1619.2781), (0, 2033.5109), (0, 2333.7834), (0, 2144.0485), (0, 2583.8709), (0, 1116.7213), (0, 1601.9383), (0, 1570.0431), (0, 1963.0777), (0, 1639.2533), (0, 2277.5223), (0, 1991.9286), (0, 2044.3338), (0, 1794.4781), (0, 1597.9119) +SELECT WelchTTest(0.1)(left, right) from welch_ttest; + +DROP TABLE IF EXISTS welch_ttest; +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO welch_ttest VALUES (2224.779, 2465.0984), (2588.11, 1909.0328), (1979.625, 1175.8747), (2137.442, 2171.378), (2565.818, 2193.2821), (1754.023, 2854.9475), (1654.947, 2060.1777), (1789.256, 2258.2366), (2320.659, 1856.0535), (2039.532, 1501.8126), (1983.497, 2987.6542), (2232.903, 1681.9778), (2513.93, 2479.6776), (2066.382, 1259.8584), (2492.715, 1120.9043), (1988.287, 1982.1213), (1840.036, 3012.3949), (2249.749, 2252.373), (1766.982, 2591.3122), (1724.84, 1940.589), (0, 1995.185), (0, 2535.1344), (0, 597.3155), (0, 2343.2192), (0, 3154.84), (0, 1125.1966), (0, 1227.8842), (0, 1692.805), (0, 2539.6772), (0, 1936.1927), (0, 1783.7795), (0, 1703.4384), (0, 2077.194), (0, 1614.4071), (0, 2360.0365), (0, 1619.2781), (0, 2033.5109), (0, 2333.7834), (0, 2144.0485), (0, 2583.8709), (0, 1116.7213), (0, 1601.9383), (0, 1570.0431), (0, 1963.0777), (0, 1639.2533), (0, 2277.5223), (0, 1991.9286), (0, 2044.3338), (0, 1794.4781), (0, 1597.9119) +SELECT WelchTTest(0.02)(left, right) from welch_ttest; + +DROP TABLE IF EXISTS welch_ttest; +CREATE TABLE welch_ttest (left Int64, right Int64) ENGINE = Memory; + +INSERT INTO welch_ttest VALUES (1, 1), (1, 1), (1, 1); +SELECT WelchTTest(0.1)(left, right) from welch_ttest; \ No newline at end of file From d92160e734fe71b03e66b330512f2f261274becf Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 17 Jun 2020 18:59:58 +0300 Subject: [PATCH 031/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index e2ae1761c51..456effc53b8 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -183,7 +183,7 @@ struct AggregateFunctionWelchTTestData final i_dof = 101; } - if(i_dof < 100) + if (i_dof < 100) { i_dof = 1; } @@ -285,7 +285,7 @@ public: size_t size_x = this->data(place).get_size_x(); size_t size_y = this->data(place).get_size_y(); - if( size_x < 2 || size_y < 2) + if (size_x < 2 || size_y < 2) { throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } From e4792df9a96be8a99a68292affb7c406e40d15a5 Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 17 Jun 2020 21:56:39 +0300 Subject: [PATCH 032/432] welch t-test --- tests/queries/0_stateless/01319_welch_ttest.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01319_welch_ttest.sql b/tests/queries/0_stateless/01319_welch_ttest.sql index ea103cc433b..26bd686efab 100644 --- a/tests/queries/0_stateless/01319_welch_ttest.sql +++ b/tests/queries/0_stateless/01319_welch_ttest.sql @@ -14,4 +14,5 @@ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Int64, right Int64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (1, 1), (1, 1), (1, 1); -SELECT WelchTTest(0.1)(left, right) from welch_ttest; \ No newline at end of file +SELECT WelchTTest(0.1)(left, right) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; \ No newline at end of file From ae8ee1cbfaba57dcee2210d98ccf2ac7f054e6a4 Mon Sep 17 00:00:00 2001 From: antikvist Date: Wed, 17 Jun 2020 22:22:26 +0300 Subject: [PATCH 033/432] welch t-test --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 456effc53b8..cc1417e6659 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -154,6 +154,16 @@ struct AggregateFunctionWelchTTestData final Float64 get_T(Float64 sx, Float64 sy) const { + if (sx == 0 && sy == 0) + { + throw Exception("division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); + } + + if (sx == -sy && size_x == size_y) + { + throw Exception("division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); + } + return static_cast(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); } From 08f9444842dfa6c0782237b839d491a54200c21e Mon Sep 17 00:00:00 2001 From: antikvist Date: Thu, 18 Jun 2020 00:02:50 +0300 Subject: [PATCH 034/432] welch t-test --- tests/queries/0_stateless/01319_welch_ttest.reference | 1 - tests/queries/0_stateless/01319_welch_ttest.sql | 5 ----- 2 files changed, 6 deletions(-) diff --git a/tests/queries/0_stateless/01319_welch_ttest.reference b/tests/queries/0_stateless/01319_welch_ttest.reference index e22493782f0..b261da18d51 100644 --- a/tests/queries/0_stateless/01319_welch_ttest.reference +++ b/tests/queries/0_stateless/01319_welch_ttest.reference @@ -1,3 +1,2 @@ 1 0 -0 diff --git a/tests/queries/0_stateless/01319_welch_ttest.sql b/tests/queries/0_stateless/01319_welch_ttest.sql index 26bd686efab..b8e881a069b 100644 --- a/tests/queries/0_stateless/01319_welch_ttest.sql +++ b/tests/queries/0_stateless/01319_welch_ttest.sql @@ -10,9 +10,4 @@ CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (2224.779, 2465.0984), (2588.11, 1909.0328), (1979.625, 1175.8747), (2137.442, 2171.378), (2565.818, 2193.2821), (1754.023, 2854.9475), (1654.947, 2060.1777), (1789.256, 2258.2366), (2320.659, 1856.0535), (2039.532, 1501.8126), (1983.497, 2987.6542), (2232.903, 1681.9778), (2513.93, 2479.6776), (2066.382, 1259.8584), (2492.715, 1120.9043), (1988.287, 1982.1213), (1840.036, 3012.3949), (2249.749, 2252.373), (1766.982, 2591.3122), (1724.84, 1940.589), (0, 1995.185), (0, 2535.1344), (0, 597.3155), (0, 2343.2192), (0, 3154.84), (0, 1125.1966), (0, 1227.8842), (0, 1692.805), (0, 2539.6772), (0, 1936.1927), (0, 1783.7795), (0, 1703.4384), (0, 2077.194), (0, 1614.4071), (0, 2360.0365), (0, 1619.2781), (0, 2033.5109), (0, 2333.7834), (0, 2144.0485), (0, 2583.8709), (0, 1116.7213), (0, 1601.9383), (0, 1570.0431), (0, 1963.0777), (0, 1639.2533), (0, 2277.5223), (0, 1991.9286), (0, 2044.3338), (0, 1794.4781), (0, 1597.9119) SELECT WelchTTest(0.02)(left, right) from welch_ttest; -DROP TABLE IF EXISTS welch_ttest; -CREATE TABLE welch_ttest (left Int64, right Int64) ENGINE = Memory; - -INSERT INTO welch_ttest VALUES (1, 1), (1, 1), (1, 1); -SELECT WelchTTest(0.1)(left, right) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; \ No newline at end of file From 4660da3e5e509bd2abd8674fc2babb4dcbeee1c4 Mon Sep 17 00:00:00 2001 From: antikvist Date: Thu, 18 Jun 2020 22:18:52 +0300 Subject: [PATCH 035/432] welch t-test --- .../AggregateFunctionWelchTTest.h | 34 +++++++++---------- .../0_stateless/01319_welch_ttest.reference | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index cc1417e6659..2f56e5e6b6c 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -132,27 +132,27 @@ struct AggregateFunctionWelchTTestData final readBinary(size_y, buf); } - size_t get_size_y() const + size_t getSizeY() const { return size_y; } - size_t get_size_x() const + size_t getSizeX() const { return size_x; } - Float64 get_sx() const + Float64 getSx() const { return static_cast(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); } - Float64 get_sy() const + Float64 getSy() const { return static_cast(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); } - Float64 get_T(Float64 sx, Float64 sy) const + Float64 getT(Float64 sx, Float64 sy) const { if (sx == 0 && sy == 0) { @@ -167,13 +167,13 @@ struct AggregateFunctionWelchTTestData final return static_cast(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); } - Float64 get_degrees_of_freed(Float64 sx, Float64 sy) const + Float64 getDegreesOfFreedom(Float64 sx, Float64 sy) const { return static_cast(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); } - UInt8 get_result(Float64 t, Float64 dof, Float64 parametr) const + UInt8 getResult(Float64 t, Float64 dof, Float64 parametr) const { //find our table int table = 0; @@ -193,7 +193,7 @@ struct AggregateFunctionWelchTTestData final i_dof = 101; } - if (i_dof < 100) + if (i_dof < 1) { i_dof = 1; } @@ -202,12 +202,12 @@ struct AggregateFunctionWelchTTestData final t = abs(t); if (t >= CriticalValuesTable[table][i_dof]) { - return static_cast(1); + return static_cast(0); //in this case we reject the null hypothesis } else { - return static_cast(0); + return static_cast(1); } } }; @@ -292,19 +292,19 @@ public: IColumn & to ) const override { - size_t size_x = this->data(place).get_size_x(); - size_t size_y = this->data(place).get_size_y(); + size_t size_x = this->data(place).getSizeX(); + size_t size_y = this->data(place).getSizeY(); if (size_x < 2 || size_y < 2) { throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } - Float64 sx = this->data(place).get_sx(); - Float64 sy = this->data(place).get_sy(); - Float64 t_value = this->data(place).get_T(sx, sy); - Float64 dof = this->data(place).get_degrees_of_freed(sx, sy); - UInt8 result = this->data(place).get_result(t_value, dof, significance_level); + Float64 sx = this->data(place).getSx(); + Float64 sy = this->data(place).getSy(); + Float64 t_value = this->data(place).getT(sx, sy); + Float64 dof = this->data(place).getDegreesOfFreedom(sx, sy); + UInt8 result = this->data(place).getResult(t_value, dof, significance_level); auto & column = static_cast &>(to); column.getData().push_back(result); diff --git a/tests/queries/0_stateless/01319_welch_ttest.reference b/tests/queries/0_stateless/01319_welch_ttest.reference index b261da18d51..aa47d0d46d4 100644 --- a/tests/queries/0_stateless/01319_welch_ttest.reference +++ b/tests/queries/0_stateless/01319_welch_ttest.reference @@ -1,2 +1,2 @@ -1 +0 0 From 839ee63294515a2899d5a5855bfe824231d17319 Mon Sep 17 00:00:00 2001 From: antikvist Date: Sat, 20 Jun 2020 19:31:00 +0300 Subject: [PATCH 036/432] welch --- .../{01319_welch_ttest.reference => 01322_welch_ttest.reference} | 0 .../0_stateless/{01319_welch_ttest.sql => 01322_welch_ttest.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{01319_welch_ttest.reference => 01322_welch_ttest.reference} (100%) rename tests/queries/0_stateless/{01319_welch_ttest.sql => 01322_welch_ttest.sql} (100%) diff --git a/tests/queries/0_stateless/01319_welch_ttest.reference b/tests/queries/0_stateless/01322_welch_ttest.reference similarity index 100% rename from tests/queries/0_stateless/01319_welch_ttest.reference rename to tests/queries/0_stateless/01322_welch_ttest.reference diff --git a/tests/queries/0_stateless/01319_welch_ttest.sql b/tests/queries/0_stateless/01322_welch_ttest.sql similarity index 100% rename from tests/queries/0_stateless/01319_welch_ttest.sql rename to tests/queries/0_stateless/01322_welch_ttest.sql From 3d89f0e9df325b6830153735263b5efaa829cf0a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 21 Jul 2020 15:41:14 +0300 Subject: [PATCH 037/432] Perf test: bind server to one NUMA node --- docker/test/performance-comparison/compare.sh | 12 ++++++++++-- tests/performance/decimal_aggregates.xml | 2 +- tests/performance/jit_large_requests.xml | 4 +--- tests/performance/string_sort.xml | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 378e87f443b..4bde4f945a6 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -55,12 +55,18 @@ function restart set -m # Spawn servers in their own process groups - left/clickhouse-server --config-file=left/config/config.xml -- --path left/db --user_files_path left/db/user_files &>> left-server-log.log & + numactl --membind=0 --cpunodebind=0 --localalloc \ + left/clickhouse-server --config-file=left/config/config.xml \ + -- --path left/db --user_files_path left/db/user_files \ + &>> left-server-log.log & left_pid=$! kill -0 $left_pid disown $left_pid - right/clickhouse-server --config-file=right/config/config.xml -- --path right/db --user_files_path right/db/user_files &>> right-server-log.log & + numactl --membind=0 --cpunodebind=0 --localalloc \ + right/clickhouse-server --config-file=right/config/config.xml \ + -- --path right/db --user_files_path right/db/user_files \ + &>> right-server-log.log & right_pid=$! kill -0 $right_pid disown $right_pid @@ -909,6 +915,8 @@ case "$stage" in time configure ;& "restart") + numactl --hardware ||: + lscpu ||: time restart ;& "run_tests") diff --git a/tests/performance/decimal_aggregates.xml b/tests/performance/decimal_aggregates.xml index 0c8df88c73c..142d9388404 100644 --- a/tests/performance/decimal_aggregates.xml +++ b/tests/performance/decimal_aggregates.xml @@ -1,6 +1,6 @@ - 30G + 35G CREATE TABLE t (x UInt64, d32 Decimal32(3), d64 Decimal64(4), d128 Decimal128(5)) ENGINE = Memory diff --git a/tests/performance/jit_large_requests.xml b/tests/performance/jit_large_requests.xml index 805b7f2edb1..6aed7bea544 100644 --- a/tests/performance/jit_large_requests.xml +++ b/tests/performance/jit_large_requests.xml @@ -1,6 +1,4 @@ - - CREATE TABLE jit_test ( a UInt64, @@ -43,7 +41,7 @@ SETTINGS compile_expressions = 0; - + SELECT COUNT() FROM diff --git a/tests/performance/string_sort.xml b/tests/performance/string_sort.xml index ce5a54e2680..5d859398ece 100644 --- a/tests/performance/string_sort.xml +++ b/tests/performance/string_sort.xml @@ -43,7 +43,7 @@ - + From 9f49bf2d82154db6417585505bfab0897d3d1d4e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 22 Jul 2020 01:09:54 +0300 Subject: [PATCH 038/432] fixup --- docker/test/performance-comparison/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 912a8bd12cd..df666af8e8e 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -17,6 +17,7 @@ RUN apt-get update \ libc6-dbg \ moreutils \ ncdu \ + numactl \ p7zip-full \ parallel \ psmisc \ From 2b7c0167cb0fda79b59eecf752a56611eae36ab0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 22 Jul 2020 21:02:56 +0300 Subject: [PATCH 039/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 4bde4f945a6..caaff129cfb 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -55,7 +55,7 @@ function restart set -m # Spawn servers in their own process groups - numactl --membind=0 --cpunodebind=0 --localalloc \ + numactl --cpunodebind=0 --localalloc \ left/clickhouse-server --config-file=left/config/config.xml \ -- --path left/db --user_files_path left/db/user_files \ &>> left-server-log.log & @@ -63,7 +63,7 @@ function restart kill -0 $left_pid disown $left_pid - numactl --membind=0 --cpunodebind=0 --localalloc \ + numactl --cpunodebind=0 --localalloc \ right/clickhouse-server --config-file=right/config/config.xml \ -- --path right/db --user_files_path right/db/user_files \ &>> right-server-log.log & From 15cd448afaf98c57f5300674e451eebc5b1f427e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 23 Jul 2020 17:59:32 +0300 Subject: [PATCH 040/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index caaff129cfb..84aa31fb76b 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -55,7 +55,7 @@ function restart set -m # Spawn servers in their own process groups - numactl --cpunodebind=0 --localalloc \ + numactl --cpunodebind=0 --membind=0 \ left/clickhouse-server --config-file=left/config/config.xml \ -- --path left/db --user_files_path left/db/user_files \ &>> left-server-log.log & @@ -63,7 +63,7 @@ function restart kill -0 $left_pid disown $left_pid - numactl --cpunodebind=0 --localalloc \ + numactl --cpunodebind=0 --membind=0 \ right/clickhouse-server --config-file=right/config/config.xml \ -- --path right/db --user_files_path right/db/user_files \ &>> right-server-log.log & From 0e205075394aec860a959c5c4125bf01f04a1c86 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 30 Jul 2020 17:26:47 +0300 Subject: [PATCH 041/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 84aa31fb76b..387e259823f 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -63,7 +63,7 @@ function restart kill -0 $left_pid disown $left_pid - numactl --cpunodebind=0 --membind=0 \ + numactl --cpunodebind=1 --membind=1 \ right/clickhouse-server --config-file=right/config/config.xml \ -- --path right/db --user_files_path right/db/user_files \ &>> right-server-log.log & From 42e5f8ec44b14e65eb3c8efc097a39356b9bf750 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 31 Jul 2020 03:49:36 +0300 Subject: [PATCH 042/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 387e259823f..29576be2903 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -952,7 +952,7 @@ case "$stage" in # to collect the logs. Prefer not to restart, because addresses might change # and we won't be able to process trace_log data. Start in a subshell, so that # it doesn't interfere with the watchdog through `wait`. - ( get_profiles || restart || get_profiles ||: ) + ( get_profiles || restart && get_profiles ||: ) # Kill the whole process group, because somehow when the subshell is killed, # the sleep inside remains alive and orphaned. From ff3f378e8adfe21cfdcb412f6e014cc42a4eec79 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 6 Aug 2020 00:58:56 +0300 Subject: [PATCH 043/432] try to rebuild package for 2b7c0167cb0fda79b59eecf752a56611eae36ab0 --- docker/test/performance-comparison/compare.sh | 6 +- .../test/performance-comparison/perf.py.orig | 190 ++++++++++++++++++ 2 files changed, 193 insertions(+), 3 deletions(-) create mode 100755 docker/test/performance-comparison/perf.py.orig diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 29576be2903..caaff129cfb 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -55,7 +55,7 @@ function restart set -m # Spawn servers in their own process groups - numactl --cpunodebind=0 --membind=0 \ + numactl --cpunodebind=0 --localalloc \ left/clickhouse-server --config-file=left/config/config.xml \ -- --path left/db --user_files_path left/db/user_files \ &>> left-server-log.log & @@ -63,7 +63,7 @@ function restart kill -0 $left_pid disown $left_pid - numactl --cpunodebind=1 --membind=1 \ + numactl --cpunodebind=0 --localalloc \ right/clickhouse-server --config-file=right/config/config.xml \ -- --path right/db --user_files_path right/db/user_files \ &>> right-server-log.log & @@ -952,7 +952,7 @@ case "$stage" in # to collect the logs. Prefer not to restart, because addresses might change # and we won't be able to process trace_log data. Start in a subshell, so that # it doesn't interfere with the watchdog through `wait`. - ( get_profiles || restart && get_profiles ||: ) + ( get_profiles || restart || get_profiles ||: ) # Kill the whole process group, because somehow when the subshell is killed, # the sleep inside remains alive and orphaned. diff --git a/docker/test/performance-comparison/perf.py.orig b/docker/test/performance-comparison/perf.py.orig new file mode 100755 index 00000000000..c25a3041a67 --- /dev/null +++ b/docker/test/performance-comparison/perf.py.orig @@ -0,0 +1,190 @@ +#!/usr/bin/python3 + +import os +import sys +import itertools +import clickhouse_driver +import xml.etree.ElementTree as et +import argparse +import pprint +import string +import time +import traceback + +stage_start_seconds = time.perf_counter() + +def report_stage_end(stage_name): + global stage_start_seconds + print('{}\t{}'.format(stage_name, time.perf_counter() - stage_start_seconds)) + stage_start_seconds = time.perf_counter() + +report_stage_end('start') + +parser = argparse.ArgumentParser(description='Run performance test.') +# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. +parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') +parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.") +parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.") +parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') +parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.') +args = parser.parse_args() + +tree = et.parse(args.file[0]) +root = tree.getroot() + +# Skip long tests +for tag in root.findall('.//tag'): + if tag.text == 'long': + print('skipped\tTest is tagged as long.') + sys.exit(0) + +# Check main metric +main_metric_element = root.find('main_metric/*') +if main_metric_element is not None and main_metric_element.tag != 'min_time': + raise Exception('Only the min_time main metric is supported. This test uses \'{}\''.format(main_metric_element.tag)) + +# FIXME another way to detect infinite tests. They should have an appropriate main_metric but sometimes they don't. +infinite_sign = root.find('.//average_speed_not_changing_for_ms') +if infinite_sign is not None: + raise Exception('Looks like the test is infinite (sign 1)') + +# Open connections +servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)] +connections = [clickhouse_driver.Client(**server) for server in servers] + +for s in servers: + print('server\t{}\t{}'.format(s['host'], s['port'])) + +report_stage_end('connect') + +# Process query parameters +subst_elems = root.findall('substitutions/substitution') +available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } +for e in subst_elems: + available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] + +# Take care to keep the order of queries -- sometimes we have DROP IF EXISTS +# followed by CREATE in create queries section, so the order matters. +def substitute_parameters(query_templates): + result = [] + for q in query_templates: + keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) + values = [available_parameters[k] for k in keys] + result.extend([ + q.format(**dict(zip(keys, values_combo))) + for values_combo in itertools.product(*values)]) + return result + +report_stage_end('substitute') + +# Run drop queries, ignoring errors. Do this before all other activity, because +# clickhouse_driver disconnects on error (this is not configurable), and the new +# connection loses the changes in settings. +drop_query_templates = [q.text for q in root.findall('drop_query')] +drop_queries = substitute_parameters(drop_query_templates) +for c in connections: + for q in drop_queries: + try: + c.execute(q) + except: + traceback.print_exc() + pass + +report_stage_end('drop1') + +# Apply settings +settings = root.findall('settings/*') +for c in connections: + for s in settings: + c.execute("set {} = '{}'".format(s.tag, s.text)) + +report_stage_end('settings') + +# Check tables that should exist. If they don't exist, just skip this test. +tables = [e.text for e in root.findall('preconditions/table_exists')] +for t in tables: + for c in connections: + try: + res = c.execute("show create table {}".format(t)) + except: + print('skipped\t' + traceback.format_exception_only(*sys.exc_info()[:2])[-1]) + traceback.print_exc() + sys.exit(0) + +report_stage_end('preconditions') + +# Run create queries +create_query_templates = [q.text for q in root.findall('create_query')] +create_queries = substitute_parameters(create_query_templates) +for c in connections: + for q in create_queries: + c.execute(q) + +# Run fill queries +fill_query_templates = [q.text for q in root.findall('fill_query')] +fill_queries = substitute_parameters(fill_query_templates) +for c in connections: + for q in fill_queries: + c.execute(q) + +report_stage_end('fill') + +# Run test queries +def tsv_escape(s): + return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') + +test_query_templates = [q.text for q in root.findall('query')] +test_queries = substitute_parameters(test_query_templates) + +report_stage_end('substitute2') + +for q in test_queries: + # Prewarm: run once on both servers. Helps to bring the data into memory, + # precompile the queries, etc. +<<<<<<< HEAD + for conn_index, c in enumerate(connections): + res = c.execute(q, query_id = 'prewarm {} {}'.format(0, q)) + print('prewarm\t' + tsv_escape(q) + '\t' + str(conn_index) + '\t' + str(c.last_query.elapsed)) +======= + try: + for conn_index, c in enumerate(connections): + prewarm_id = f'{query_prefix}.prewarm0' + res = c.execute(q, query_id = prewarm_id) + print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') + except KeyboardInterrupt: + raise + except: + # If prewarm fails for some query -- skip it, and try to test the others. + # This might happen if the new test introduces some function that the + # old server doesn't support. Still, report it as an error. + # FIXME the driver reconnects on error and we lose settings, so this might + # lead to further errors or unexpected behavior. + print(traceback.format_exc(), file=sys.stderr) + continue +>>>>>>> 4b1bb43543... Merge pull request #11076 from ClickHouse/aku/join-error-messages + + # Now, perform measured runs. + # Track the time spent by the client to process this query, so that we can notice + # out the queries that take long to process on the client side, e.g. by sending + # excessive data. + start_seconds = time.perf_counter() + server_seconds = 0 + for run in range(0, args.runs): + for conn_index, c in enumerate(connections): + res = c.execute(q) + print('query\t' + tsv_escape(q) + '\t' + str(run) + '\t' + str(conn_index) + '\t' + str(c.last_query.elapsed)) + server_seconds += c.last_query.elapsed + + client_seconds = time.perf_counter() - start_seconds + print('client-time\t{}\t{}\t{}'.format(tsv_escape(q), client_seconds, server_seconds)) + +report_stage_end('benchmark') + +# Run drop queries +drop_query_templates = [q.text for q in root.findall('drop_query')] +drop_queries = substitute_parameters(drop_query_templates) +for c in connections: + for q in drop_queries: + c.execute(q) + +report_stage_end('drop2') From 05ad9b9fffe1a49cf91eaf3e5d0df89e42d55124 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 20 Aug 2020 23:59:40 +0300 Subject: [PATCH 044/432] opentelemetry wip --- base/daemon/BaseDaemon.cpp | 2 +- programs/server/config.xml | 10 ++++ src/Common/Exception.h | 7 ++- src/Interpreters/ClientInfo.h | 5 ++ src/Interpreters/Context.cpp | 53 +++++++++++------- src/Interpreters/Context.h | 2 + src/Interpreters/InterpreterSystemQuery.cpp | 4 +- src/Interpreters/OpenTelemetryLog.cpp | 46 ++++++++++++++++ src/Interpreters/OpenTelemetryLog.h | 57 ++++++++++++++++++++ src/Interpreters/SystemLog.cpp | 6 +++ src/Interpreters/SystemLog.h | 3 ++ src/Interpreters/executeQuery.cpp | 4 ++ src/Storages/StorageFile.cpp | 59 ++++++++++++++++++++- 13 files changed, 233 insertions(+), 25 deletions(-) create mode 100644 src/Interpreters/OpenTelemetryLog.cpp create mode 100644 src/Interpreters/OpenTelemetryLog.h diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 78801e71a6f..cc5a765a52d 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -153,7 +153,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context) if (sig != SIGTSTP) /// This signal is used for debugging. { /// The time that is usually enough for separate thread to print info into log. - sleepForSeconds(10); + sleepForSeconds(1); call_default_signal_handler(sig); } diff --git a/programs/server/config.xml b/programs/server/config.xml index af01e880dc2..7e88150b95f 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -563,6 +563,16 @@ 60000 + + + system + opentelemetry_log
+ 7500 +
+ + diff --git a/src/Common/Exception.h b/src/Common/Exception.h index 763b90048bb..d0de8d6a3f2 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -39,8 +39,11 @@ public: const char * name() const throw() override { return "DB::Exception"; } const char * what() const throw() override { return message().data(); } - /// Add something to the existing message. - void addMessage(const std::string & arg) { extendedMessage(arg); } + template + void addMessage(Fmt&&... fmt) + { + extendedMessage(fmt::format(std::forward(fmt)...)); + } std::string getStackTraceString() const; diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index f3a99112170..575d1521b6f 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -57,6 +57,11 @@ public: String initial_user; String initial_query_id; Poco::Net::SocketAddress initial_address; + + + UInt128 trace_id; + UInt64 span_id; + UInt64 parent_span_id; /// All below are parameters related to initial query. diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 70cf41a679c..93e8ac49114 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1075,35 +1075,37 @@ void Context::setCurrentQueryId(const String & query_id) if (!client_info.current_query_id.empty()) throw Exception("Logical error: attempt to set query_id twice", ErrorCodes::LOGICAL_ERROR); - String query_id_to_set = query_id; + /// Generate random UUID, but using lower quality RNG, + /// because Poco::UUIDGenerator::generateRandom method is using /dev/random, that is very expensive. + /// NOTE: Actually we don't need to use UUIDs for query identifiers. + /// We could use any suitable string instead. + union + { + char bytes[16]; + struct + { + UInt64 a; + UInt64 b; + } words; + UInt128 uuid; + } random; + random.words.a = thread_local_rng(); //-V656 + random.words.b = thread_local_rng(); //-V656 + + trace_id = random.uuid; + + + String query_id_to_set = query_id; if (query_id_to_set.empty()) /// If the user did not submit his query_id, then we generate it ourselves. { - /// Generate random UUID, but using lower quality RNG, - /// because Poco::UUIDGenerator::generateRandom method is using /dev/random, that is very expensive. - /// NOTE: Actually we don't need to use UUIDs for query identifiers. - /// We could use any suitable string instead. - - union - { - char bytes[16]; - struct - { - UInt64 a; - UInt64 b; - } words; - } random; - - random.words.a = thread_local_rng(); //-V656 - random.words.b = thread_local_rng(); //-V656 - /// Use protected constructor. struct QueryUUID : Poco::UUID { QueryUUID(const char * bytes, Poco::UUID::Version version) : Poco::UUID(bytes, version) {} }; - + query_id_to_set = QueryUUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); } @@ -1705,6 +1707,17 @@ std::shared_ptr Context::getAsynchronousMetricLog() } +std::shared_ptr Context::getOpenTelemetryLog() +{ + auto lock = getLock(); + + if (!shared->system_logs) + return {}; + + return shared->system_logs->opentelemetry_log; +} + + CompressionCodecPtr Context::chooseCompressionCodec(size_t part_size, double part_size_ratio) const { auto lock = getLock(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index c8d13baa9ae..6cabac3db0d 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -81,6 +81,7 @@ class TextLog; class TraceLog; class MetricLog; class AsynchronousMetricLog; +class OpenTelemetrySpanLog; struct MergeTreeSettings; class StorageS3Settings; class IDatabase; @@ -531,6 +532,7 @@ public: std::shared_ptr getTextLog(); std::shared_ptr getMetricLog(); std::shared_ptr getAsynchronousMetricLog(); + std::shared_ptr getOpenTelemetryLog(); /// Returns an object used to log operations with parts if it possible. /// Provide table name to make required checks. diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 4bfa84090c2..24fb0637b3d 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -321,7 +322,8 @@ BlockIO InterpreterSystemQuery::execute() [&] () { if (auto trace_log = context.getTraceLog()) trace_log->flush(true); }, [&] () { if (auto text_log = context.getTextLog()) text_log->flush(true); }, [&] () { if (auto metric_log = context.getMetricLog()) metric_log->flush(true); }, - [&] () { if (auto asynchronous_metric_log = context.getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); } + [&] () { if (auto asynchronous_metric_log = context.getAsynchronousMetricLog()) asynchronous_metric_log->flush(true); }, + [&] () { if (auto opentelemetry_log = context.getOpenTelemetryLog()) opentelemetry_log->flush(true); } ); break; case Type::STOP_LISTEN_QUERIES: diff --git a/src/Interpreters/OpenTelemetryLog.cpp b/src/Interpreters/OpenTelemetryLog.cpp new file mode 100644 index 00000000000..511c820797a --- /dev/null +++ b/src/Interpreters/OpenTelemetryLog.cpp @@ -0,0 +1,46 @@ +#include "OpenTelemetryLog.h" + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +Block OpenTelemetrySpanLogElement::createBlock() +{ + return { + {std::make_shared(), "trace_id"}, + {std::make_shared(), "span_id"}, + {std::make_shared(), "parent_span_id"}, + {std::make_shared(), "operation_name"}, + {std::make_shared(), "start_date"}, + {std::make_shared(), "start_time"}, + {std::make_shared(), "finish_time"}, + {std::make_shared(std::make_shared()), + "attribute.names"}, + {std::make_shared(std::make_shared()), + "attribute.values"} + }; +} + +void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const +{ + size_t i = 0; + + columns[i++]->insert(trace_id); + columns[i++]->insert(span_id); + columns[i++]->insert(parent_span_id); + columns[i++]->insert(operation_name); + columns[i++]->insert(DateLUT::instance().toDayNum(start_time)); + columns[i++]->insert(start_time); + columns[i++]->insert(finish_time); + columns[i++]->insert(attribute_names); + columns[i++]->insert(attribute_values); +} + +} + diff --git a/src/Interpreters/OpenTelemetryLog.h b/src/Interpreters/OpenTelemetryLog.h new file mode 100644 index 00000000000..723a4201783 --- /dev/null +++ b/src/Interpreters/OpenTelemetryLog.h @@ -0,0 +1,57 @@ +#pragma once + +#include + +namespace DB +{ + +/* +struct OpenTelemetrySpanContext +{ + UInt128 trace_id; + UInt64 span_id; + UInt8 trace_flags; + String trace_state; +}; +*/ + +// TODO figure out precisely which part of this is run time, and which part we +// must log. +struct OpenTelemetrySpan +{ + UInt128 trace_id; + UInt64 span_id; + UInt64 parent_span_id; + std::string operation_name; + time_t start_time{}; + time_t finish_time{}; + Array attribute_names; + Array attribute_values; + // I don't understand how Links work, namely, which direction should they + // point to, and how they are related with parent_span_id, so no Links for + // now. + + // The following fields look like something that is runtime only and doesn't + // require logging. + UInt8 trace_flags; + // Vendor-specific info, key-value pairs. Keep it as a string as described + // here: https://w3c.github.io/trace-context/#tracestate-header + String trace_state; +}; + +struct OpenTelemetrySpanLogElement : public OpenTelemetrySpan +{ + static std::string name() { return "OpenTelemetrySpanLog"; } + static Block createBlock(); + void appendToBlock(MutableColumns & columns) const; +}; + +// OpenTelemetry standartizes some Log data as well, so it's not just +// OpenTelemetryLog to avoid confusion. +class OpenTelemetrySpanLog : public SystemLog +{ +public: + using SystemLog::SystemLog; +}; + +} diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index e64444edd67..27236c99a23 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,9 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi asynchronous_metric_log = createSystemLog( global_context, "system", "asynchronous_metric_log", config, "asynchronous_metric_log"); + opentelemetry_log = createSystemLog( + global_context, "system", "opentelemetry_log", config, + "opentelemetry_log"); if (query_log) logs.emplace_back(query_log.get()); @@ -105,6 +109,8 @@ SystemLogs::SystemLogs(Context & global_context, const Poco::Util::AbstractConfi logs.emplace_back(metric_log.get()); if (asynchronous_metric_log) logs.emplace_back(asynchronous_metric_log.get()); + if (opentelemetry_log) + logs.emplace_back(opentelemetry_log.get()); try { diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index a2e97747d12..15ebf04c6ce 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -71,6 +71,7 @@ class TraceLog; class CrashLog; class MetricLog; class AsynchronousMetricLog; +class OpenTelemetrySpanLog; class ISystemLog @@ -105,6 +106,8 @@ struct SystemLogs std::shared_ptr metric_log; /// Used to log all metrics. /// Metrics from system.asynchronous_metrics. std::shared_ptr asynchronous_metric_log; + /// OpenTelemetry trace spans + std::shared_ptr opentelemetry_log; std::vector logs; }; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index fa5b3e0bcc2..27ac5fda56f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -584,6 +584,10 @@ static std::tuple executeQueryImpl( if (auto query_log = context.getQueryLog()) query_log->add(elem); } + + if (auto opentelemetry_log = context.getOpenTelemetryLog()) + { + } }; auto exception_callback = [elem, &context, ast, log_queries, log_queries_min_type = settings.log_queries_min_type, quota(quota), diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 558216a6216..d22a0625a67 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -199,6 +199,45 @@ StorageFile::StorageFile(CommonArguments args) setInMemoryMetadata(storage_metadata); } +struct ScopedExceptionHint +{ + std::string hint; + + // Format message with fmt::format + template + ScopedExceptionHint(Fmt&&... fmt) + : hint(fmt::format(std::forward(fmt)...)) + { + fprintf(stderr, "hint %s created\n", hint.c_str()); + } + + ~ScopedExceptionHint() + { + fprintf(stderr, "hint %s is being destroyed\n", hint.c_str()); + std::exception_ptr exception = std::current_exception(); + if (exception) + { + try { + std::rethrow_exception(exception); + } + catch (Exception & e) + { + e.addMessage(hint); + fprintf(stderr, "added hint %s\n", hint.c_str()); + } + catch (...) + { + fprintf(stderr, "unknown exception\n"); + // pass? + } + } + else + { + fprintf(stderr, "no exception\n"); + } + } +}; + class StorageFileSource : public SourceWithProgress { public: @@ -317,10 +356,28 @@ public: if (!column_defaults.empty()) reader = std::make_shared(reader, column_defaults, context); + ScopedExceptionHint("Reading prefix of file '{}'", current_path); reader->readPrefix(); } - if (auto res = reader->read()) + ScopedExceptionHint("Reader->read() {}", reader->getName()); + + Block res; + try + { + res = reader->read(); + } + catch (Exception & e) + { + e.addMessage("Error while reading from '{}'", current_path); + throw; + } + catch (...) + { + throw; + } + + if (res) { Columns columns = res.getColumns(); UInt64 num_rows = res.rows(); From 5d31442fa88271072a5478b95f4903977cef4fb1 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 26 Aug 2020 03:26:54 +0300 Subject: [PATCH 045/432] Delete perf.py.orig --- .../test/performance-comparison/perf.py.orig | 190 ------------------ 1 file changed, 190 deletions(-) delete mode 100755 docker/test/performance-comparison/perf.py.orig diff --git a/docker/test/performance-comparison/perf.py.orig b/docker/test/performance-comparison/perf.py.orig deleted file mode 100755 index c25a3041a67..00000000000 --- a/docker/test/performance-comparison/perf.py.orig +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python3 - -import os -import sys -import itertools -import clickhouse_driver -import xml.etree.ElementTree as et -import argparse -import pprint -import string -import time -import traceback - -stage_start_seconds = time.perf_counter() - -def report_stage_end(stage_name): - global stage_start_seconds - print('{}\t{}'.format(stage_name, time.perf_counter() - stage_start_seconds)) - stage_start_seconds = time.perf_counter() - -report_stage_end('start') - -parser = argparse.ArgumentParser(description='Run performance test.') -# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. -parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') -parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.") -parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.") -parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') -parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.') -args = parser.parse_args() - -tree = et.parse(args.file[0]) -root = tree.getroot() - -# Skip long tests -for tag in root.findall('.//tag'): - if tag.text == 'long': - print('skipped\tTest is tagged as long.') - sys.exit(0) - -# Check main metric -main_metric_element = root.find('main_metric/*') -if main_metric_element is not None and main_metric_element.tag != 'min_time': - raise Exception('Only the min_time main metric is supported. This test uses \'{}\''.format(main_metric_element.tag)) - -# FIXME another way to detect infinite tests. They should have an appropriate main_metric but sometimes they don't. -infinite_sign = root.find('.//average_speed_not_changing_for_ms') -if infinite_sign is not None: - raise Exception('Looks like the test is infinite (sign 1)') - -# Open connections -servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)] -connections = [clickhouse_driver.Client(**server) for server in servers] - -for s in servers: - print('server\t{}\t{}'.format(s['host'], s['port'])) - -report_stage_end('connect') - -# Process query parameters -subst_elems = root.findall('substitutions/substitution') -available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } -for e in subst_elems: - available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] - -# Take care to keep the order of queries -- sometimes we have DROP IF EXISTS -# followed by CREATE in create queries section, so the order matters. -def substitute_parameters(query_templates): - result = [] - for q in query_templates: - keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) - values = [available_parameters[k] for k in keys] - result.extend([ - q.format(**dict(zip(keys, values_combo))) - for values_combo in itertools.product(*values)]) - return result - -report_stage_end('substitute') - -# Run drop queries, ignoring errors. Do this before all other activity, because -# clickhouse_driver disconnects on error (this is not configurable), and the new -# connection loses the changes in settings. -drop_query_templates = [q.text for q in root.findall('drop_query')] -drop_queries = substitute_parameters(drop_query_templates) -for c in connections: - for q in drop_queries: - try: - c.execute(q) - except: - traceback.print_exc() - pass - -report_stage_end('drop1') - -# Apply settings -settings = root.findall('settings/*') -for c in connections: - for s in settings: - c.execute("set {} = '{}'".format(s.tag, s.text)) - -report_stage_end('settings') - -# Check tables that should exist. If they don't exist, just skip this test. -tables = [e.text for e in root.findall('preconditions/table_exists')] -for t in tables: - for c in connections: - try: - res = c.execute("show create table {}".format(t)) - except: - print('skipped\t' + traceback.format_exception_only(*sys.exc_info()[:2])[-1]) - traceback.print_exc() - sys.exit(0) - -report_stage_end('preconditions') - -# Run create queries -create_query_templates = [q.text for q in root.findall('create_query')] -create_queries = substitute_parameters(create_query_templates) -for c in connections: - for q in create_queries: - c.execute(q) - -# Run fill queries -fill_query_templates = [q.text for q in root.findall('fill_query')] -fill_queries = substitute_parameters(fill_query_templates) -for c in connections: - for q in fill_queries: - c.execute(q) - -report_stage_end('fill') - -# Run test queries -def tsv_escape(s): - return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') - -test_query_templates = [q.text for q in root.findall('query')] -test_queries = substitute_parameters(test_query_templates) - -report_stage_end('substitute2') - -for q in test_queries: - # Prewarm: run once on both servers. Helps to bring the data into memory, - # precompile the queries, etc. -<<<<<<< HEAD - for conn_index, c in enumerate(connections): - res = c.execute(q, query_id = 'prewarm {} {}'.format(0, q)) - print('prewarm\t' + tsv_escape(q) + '\t' + str(conn_index) + '\t' + str(c.last_query.elapsed)) -======= - try: - for conn_index, c in enumerate(connections): - prewarm_id = f'{query_prefix}.prewarm0' - res = c.execute(q, query_id = prewarm_id) - print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') - except KeyboardInterrupt: - raise - except: - # If prewarm fails for some query -- skip it, and try to test the others. - # This might happen if the new test introduces some function that the - # old server doesn't support. Still, report it as an error. - # FIXME the driver reconnects on error and we lose settings, so this might - # lead to further errors or unexpected behavior. - print(traceback.format_exc(), file=sys.stderr) - continue ->>>>>>> 4b1bb43543... Merge pull request #11076 from ClickHouse/aku/join-error-messages - - # Now, perform measured runs. - # Track the time spent by the client to process this query, so that we can notice - # out the queries that take long to process on the client side, e.g. by sending - # excessive data. - start_seconds = time.perf_counter() - server_seconds = 0 - for run in range(0, args.runs): - for conn_index, c in enumerate(connections): - res = c.execute(q) - print('query\t' + tsv_escape(q) + '\t' + str(run) + '\t' + str(conn_index) + '\t' + str(c.last_query.elapsed)) - server_seconds += c.last_query.elapsed - - client_seconds = time.perf_counter() - start_seconds - print('client-time\t{}\t{}\t{}'.format(tsv_escape(q), client_seconds, server_seconds)) - -report_stage_end('benchmark') - -# Run drop queries -drop_query_templates = [q.text for q in root.findall('drop_query')] -drop_queries = substitute_parameters(drop_query_templates) -for c in connections: - for q in drop_queries: - c.execute(q) - -report_stage_end('drop2') From 54b3047d19d5aed63c59fe46215c70138052386d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 27 Aug 2020 21:44:20 +0300 Subject: [PATCH 046/432] fixup --- src/Interpreters/ClientInfo.h | 3 +- src/Interpreters/Context.cpp | 7 ++-- src/Interpreters/OpenTelemetryLog.cpp | 13 +++++--- src/Interpreters/OpenTelemetryLog.h | 2 +- src/Interpreters/executeQuery.cpp | 48 +++++++++++++++++++++++++++ 5 files changed, 63 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 575d1521b6f..bc6afec36f4 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB @@ -59,7 +60,7 @@ public: Poco::Net::SocketAddress initial_address; - UInt128 trace_id; + __uint128_t trace_id; UInt64 span_id; UInt64 parent_span_id; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 93e8ac49114..c225c332248 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1087,14 +1087,15 @@ void Context::setCurrentQueryId(const String & query_id) UInt64 a; UInt64 b; } words; - UInt128 uuid; + __uint128_t uuid; } random; random.words.a = thread_local_rng(); //-V656 random.words.b = thread_local_rng(); //-V656 - trace_id = random.uuid; - + client_info.trace_id = random.uuid; + client_info.span_id = 1; + client_info.parent_span_id = 0; String query_id_to_set = query_id; if (query_id_to_set.empty()) /// If the user did not submit his query_id, then we generate it ourselves. diff --git a/src/Interpreters/OpenTelemetryLog.cpp b/src/Interpreters/OpenTelemetryLog.cpp index 511c820797a..f8d7d684478 100644 --- a/src/Interpreters/OpenTelemetryLog.cpp +++ b/src/Interpreters/OpenTelemetryLog.cpp @@ -13,12 +13,15 @@ namespace DB Block OpenTelemetrySpanLogElement::createBlock() { return { + // event_date is the date part of event_time, used for indexing. + {std::make_shared(), "event_date"}, + // event_time is the span start time, named so to be compatible with + // the standard ClickHouse system log column names. + {std::make_shared(), "event_time"}, {std::make_shared(), "trace_id"}, {std::make_shared(), "span_id"}, {std::make_shared(), "parent_span_id"}, {std::make_shared(), "operation_name"}, - {std::make_shared(), "start_date"}, - {std::make_shared(), "start_time"}, {std::make_shared(), "finish_time"}, {std::make_shared(std::make_shared()), "attribute.names"}, @@ -31,12 +34,12 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(trace_id); + columns[i++]->insert(DateLUT::instance().toDayNum(start_time)); + columns[i++]->insert(start_time); + columns[i++]->insert(UInt128(Int128(trace_id))); columns[i++]->insert(span_id); columns[i++]->insert(parent_span_id); columns[i++]->insert(operation_name); - columns[i++]->insert(DateLUT::instance().toDayNum(start_time)); - columns[i++]->insert(start_time); columns[i++]->insert(finish_time); columns[i++]->insert(attribute_names); columns[i++]->insert(attribute_values); diff --git a/src/Interpreters/OpenTelemetryLog.h b/src/Interpreters/OpenTelemetryLog.h index 723a4201783..73ad5382c95 100644 --- a/src/Interpreters/OpenTelemetryLog.h +++ b/src/Interpreters/OpenTelemetryLog.h @@ -19,7 +19,7 @@ struct OpenTelemetrySpanContext // must log. struct OpenTelemetrySpan { - UInt128 trace_id; + __uint128_t trace_id; UInt64 span_id; UInt64 parent_span_id; std::string operation_name; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 27ac5fda56f..e74b24c4aa7 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -146,6 +147,11 @@ static void logQuery(const String & query, const Context & context, bool interna (current_user != "default" ? ", user: " + context.getClientInfo().current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), joinLines(query)); + + LOG_TRACE(&Poco::Logger::get("executeQuery"), + "OpenTelemetry trace id {:x}, span id {:x}, parent span id {:x}", + context.getClientInfo().trace_id, context.getClientInfo().span_id, + context.getClientInfo().parent_span_id); } } @@ -216,6 +222,29 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c if (auto query_log = context.getQueryLog()) query_log->add(elem); + if (auto opentelemetry_log = context.getOpenTelemetryLog()) + { + OpenTelemetrySpanLogElement span; + span.trace_id = context.getClientInfo().trace_id; + span.span_id = context.getClientInfo().span_id; + span.parent_span_id = context.getClientInfo().parent_span_id; + span.operation_name = "query"; + span.start_time = current_time; + span.finish_time = current_time; + + // keep values synchonized to type enum in QueryLogElement::createBlock + span.attribute_names.push_back("status"); + span.attribute_values.push_back("ExceptionBeforeStart"); + + span.attribute_names.push_back("query"); + span.attribute_values.push_back(elem.query); + + span.attribute_names.push_back("query_id"); + span.attribute_values.push_back(elem.client_info.current_query_id); + + opentelemetry_log->add(span); + } + ProfileEvents::increment(ProfileEvents::FailedQuery); if (ast) @@ -587,6 +616,25 @@ static std::tuple executeQueryImpl( if (auto opentelemetry_log = context.getOpenTelemetryLog()) { + OpenTelemetrySpanLogElement span; + span.trace_id = context.getClientInfo().trace_id; + span.span_id = context.getClientInfo().span_id; + span.parent_span_id = context.getClientInfo().parent_span_id; + span.operation_name = "query"; + span.start_time = elem.query_start_time; + span.finish_time = time(nullptr); // current time + + // keep values synchonized to type enum in QueryLogElement::createBlock + span.attribute_names.push_back("status"); + span.attribute_values.push_back("QueryFinish"); + + span.attribute_names.push_back("query"); + span.attribute_values.push_back(elem.query); + + span.attribute_names.push_back("query_id"); + span.attribute_values.push_back(elem.client_info.current_query_id); + + opentelemetry_log->add(span); } }; From 2bef406200c9e097cfc42910b9b055e1f350b840 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 27 Aug 2020 21:49:06 +0300 Subject: [PATCH 047/432] remove accidental changes --- base/daemon/BaseDaemon.cpp | 2 +- src/Interpreters/ClientInfo.h | 1 - src/Storages/StorageFile.cpp | 59 +---------------------------------- 3 files changed, 2 insertions(+), 60 deletions(-) diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index cc5a765a52d..78801e71a6f 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -153,7 +153,7 @@ static void signalHandler(int sig, siginfo_t * info, void * context) if (sig != SIGTSTP) /// This signal is used for debugging. { /// The time that is usually enough for separate thread to print info into log. - sleepForSeconds(1); + sleepForSeconds(10); call_default_signal_handler(sig); } diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index bc6afec36f4..52391d6cf73 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -59,7 +59,6 @@ public: String initial_query_id; Poco::Net::SocketAddress initial_address; - __uint128_t trace_id; UInt64 span_id; UInt64 parent_span_id; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index d22a0625a67..558216a6216 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -199,45 +199,6 @@ StorageFile::StorageFile(CommonArguments args) setInMemoryMetadata(storage_metadata); } -struct ScopedExceptionHint -{ - std::string hint; - - // Format message with fmt::format - template - ScopedExceptionHint(Fmt&&... fmt) - : hint(fmt::format(std::forward(fmt)...)) - { - fprintf(stderr, "hint %s created\n", hint.c_str()); - } - - ~ScopedExceptionHint() - { - fprintf(stderr, "hint %s is being destroyed\n", hint.c_str()); - std::exception_ptr exception = std::current_exception(); - if (exception) - { - try { - std::rethrow_exception(exception); - } - catch (Exception & e) - { - e.addMessage(hint); - fprintf(stderr, "added hint %s\n", hint.c_str()); - } - catch (...) - { - fprintf(stderr, "unknown exception\n"); - // pass? - } - } - else - { - fprintf(stderr, "no exception\n"); - } - } -}; - class StorageFileSource : public SourceWithProgress { public: @@ -356,28 +317,10 @@ public: if (!column_defaults.empty()) reader = std::make_shared(reader, column_defaults, context); - ScopedExceptionHint("Reading prefix of file '{}'", current_path); reader->readPrefix(); } - ScopedExceptionHint("Reader->read() {}", reader->getName()); - - Block res; - try - { - res = reader->read(); - } - catch (Exception & e) - { - e.addMessage("Error while reading from '{}'", current_path); - throw; - } - catch (...) - { - throw; - } - - if (res) + if (auto res = reader->read()) { Columns columns = res.getColumns(); UInt64 num_rows = res.rows(); From 46127946788d0ae89aa8c895b8d31fbb453e04c1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 28 Aug 2020 04:21:08 +0300 Subject: [PATCH 048/432] opentelemetry context propagation --- src/Core/Defines.h | 5 +- src/Interpreters/ClientInfo.cpp | 140 ++++++++++++++++++++++++++++++ src/Interpreters/ClientInfo.h | 17 +++- src/Interpreters/Context.cpp | 20 ++++- src/Interpreters/executeQuery.cpp | 33 +++++-- src/Interpreters/ya.make | 1 + src/Server/HTTPHandler.cpp | 23 ++++- src/Server/TCPHandler.cpp | 8 +- src/Storages/StorageURL.cpp | 18 +++- 9 files changed, 244 insertions(+), 21 deletions(-) diff --git a/src/Core/Defines.h b/src/Core/Defines.h index e244581c339..d19513d1434 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -67,8 +67,11 @@ /// Minimum revision supporting SettingsBinaryFormat::STRINGS. #define DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS 54429 +/// Minimum revision supporting OpenTelemetry +#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54227 + /// Version of ClickHouse TCP protocol. Set to git tag with latest protocol change. -#define DBMS_TCP_PROTOCOL_VERSION 54226 +#define DBMS_TCP_PROTOCOL_VERSION 54227 /// The boundary on which the blocks for asynchronous file operations should be aligned. #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 378375dcc18..9e501ca5c11 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -60,6 +60,18 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) writeVarUInt(client_version_patch, out); } + + if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_OPENTELEMETRY) + { + // No point writing these numbers with variable length, because they + // are random and will probably require the full length anyway. + writeBinary(opentelemetry_trace_id, out); + writeBinary(opentelemetry_span_id, out); + writeBinary(opentelemetry_parent_span_id, out); + writeBinary(opentelemetry_tracestate, out); + writeBinary(opentelemetry_trace_flags, out); + std::cerr << fmt::format("wrote {:x}, {}, {}\n", opentelemetry_trace_id, opentelemetry_span_id, opentelemetry_parent_span_id) << StackTrace().toString() << std::endl; + } } @@ -113,6 +125,17 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) else client_version_patch = client_revision; } + + if (client_protocol_revision >= DBMS_MIN_REVISION_WITH_OPENTELEMETRY) + { + readBinary(opentelemetry_trace_id, in); + readBinary(opentelemetry_span_id, in); + readBinary(opentelemetry_parent_span_id, in); + readBinary(opentelemetry_tracestate, in); + readBinary(opentelemetry_trace_flags, in); + + std::cerr << fmt::format("read {:x}, {}, {}\n", opentelemetry_trace_id, opentelemetry_span_id, opentelemetry_parent_span_id) << StackTrace().toString() << std::endl; + } } @@ -123,6 +146,123 @@ void ClientInfo::setInitialQuery() client_name = (DBMS_NAME " ") + client_name; } +template +bool readLowercaseHexDigits(const char *& begin, const char * end, T & dest_value, std::string & error) +{ + char * dest_begin = reinterpret_cast(&dest_value); + char * dest_end = dest_begin + sizeof(dest_value); + bool odd_character = true; + for (;;) + { + if (begin == end) + { + if (dest_begin == dest_end) + { + return true; + } + error = fmt::format("Not enough charaters in the input, got {}, need {} more", end - begin, dest_end - dest_begin); + return false; + } + + if (dest_begin == dest_end) + { + return true; + } + + int cur = 0; + if (*begin >= '0' && *begin <= '9') + { + cur = *begin - '0'; + } + else if (*begin >= 'a' && *begin <= 'f') + { + cur = 10 + *begin - 'a'; + } + else + { + error = fmt::format("Encountered '{}' which is not a lowercase hexadecimal digit", *begin); + return false; + } + + // Two characters per byte, little-endian. + if (odd_character) + { + *(dest_end - 1) = cur; + } + else + { + *(dest_end - 1) = *(dest_end - 1) << 8 | cur; + --dest_end; + } + + begin++; + odd_character = !odd_character; + } +} + +bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, + std::string & error) +{ + uint8_t version = -1; + __uint128_t trace_id = 0; + uint64_t trace_parent = 0; + uint8_t trace_flags = 0; + + const char * begin = &traceparent[0]; + const char * end = begin + traceparent.length(); + +#define CHECK_CONDITION(condition, ...) \ + ((condition) || (error = fmt::format(__VA_ARGS__), false)) + +#define CHECK_DELIMITER \ + (begin >= end \ + ? (error = fmt::format( \ + "Expected '-' delimiter, got EOL at position {}", \ + begin - &traceparent[0]), \ + false) \ + : *begin != '-' \ + ? (error = fmt::format( \ + "Expected '-' delimiter, got '{}' at position {}", \ + *begin, begin - &traceparent[0]), \ + false) \ + : (++begin, true)) + + bool result = readLowercaseHexDigits(begin, end, version, error) + && CHECK_CONDITION(version == 0, "Expected version 00, got {}", version) + && CHECK_DELIMITER + && readLowercaseHexDigits(begin, end, trace_id, error) + && CHECK_DELIMITER + && readLowercaseHexDigits(begin, end, trace_parent, error) + && CHECK_DELIMITER + && readLowercaseHexDigits(begin, end, trace_flags, error) + && CHECK_CONDITION(begin == end, + "Expected end of string, got {} at position {}", *begin, end - begin); + +#undef CHECK +#undef CHECK_DELIMITER + + if (!result) + { + return false; + } + + opentelemetry_trace_id = trace_id; + opentelemetry_parent_span_id = trace_parent; + opentelemetry_trace_flags = trace_flags; + return true; +} + + +std::string ClientInfo::getOpenTelemetryTraceparentForChild() const +{ + // This span is a parent for its children (so deep...), so we specify + // this span_id as a parent id. + return fmt::format("00-{:032x}-{:016x}-{:02x}", opentelemetry_trace_id, + opentelemetry_span_id, + // This cast is because fmt is being weird and complaining that + // "mixing character types is not allowed". + static_cast(opentelemetry_trace_flags)); +} void ClientInfo::fillOSUserHostNameAndVersionInfo() { diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 52391d6cf73..413e1c42bf7 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -59,9 +59,17 @@ public: String initial_query_id; Poco::Net::SocketAddress initial_address; - __uint128_t trace_id; - UInt64 span_id; - UInt64 parent_span_id; + // OpenTelemetry things + __uint128_t opentelemetry_trace_id = 0; + // Span ID is not strictly the client info, but convenient to keep here. + // The span id we get the in the incoming client info becomes our parent span + // id, and the span id we send becomes downstream parent span id. + UInt64 opentelemetry_span_id = 0; + UInt64 opentelemetry_parent_span_id = 0; + // the incoming tracestate header, we just pass it downstream. + // https://www.w3.org/TR/trace-context/ + String opentelemetry_tracestate; + UInt8 opentelemetry_trace_flags; /// All below are parameters related to initial query. @@ -95,6 +103,9 @@ public: /// Initialize parameters on client initiating query. void setInitialQuery(); + bool setOpenTelemetryTraceparent(const std::string & traceparent, std::string & error); + std::string getOpenTelemetryTraceparentForChild() const; + private: void fillOSUserHostNameAndVersionInfo(); }; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c225c332248..c5fd2d585e1 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1093,9 +1093,23 @@ void Context::setCurrentQueryId(const String & query_id) random.words.a = thread_local_rng(); //-V656 random.words.b = thread_local_rng(); //-V656 - client_info.trace_id = random.uuid; - client_info.span_id = 1; - client_info.parent_span_id = 0; + fmt::print(stderr, "traceid {}, ==0 {}\n", client_info.opentelemetry_trace_id, client_info.opentelemetry_trace_id == 0); + if (client_info.opentelemetry_trace_id == 0) + { + // If trace_id is not initialized, it means that this is an initial query + // without any parent OpenTelemetry trace. Use the randomly generated + // default query id as the new trace id. + client_info.opentelemetry_trace_id = random.uuid; + client_info.opentelemetry_parent_span_id = 0; + client_info.opentelemetry_span_id = thread_local_rng(); + } + else + { + // The incoming span id becomes our parent span id. + client_info.opentelemetry_parent_span_id = client_info.opentelemetry_span_id; + client_info.opentelemetry_span_id = thread_local_rng(); + } + fmt::print(stderr, "traceid {}, ==0 {}\n{}\n", client_info.opentelemetry_trace_id, client_info.opentelemetry_trace_id == 0, StackTrace().toString()); String query_id_to_set = query_id; if (query_id_to_set.empty()) /// If the user did not submit his query_id, then we generate it ourselves. diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index e74b24c4aa7..2a35bc205fa 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -149,9 +149,11 @@ static void logQuery(const String & query, const Context & context, bool interna joinLines(query)); LOG_TRACE(&Poco::Logger::get("executeQuery"), - "OpenTelemetry trace id {:x}, span id {:x}, parent span id {:x}", - context.getClientInfo().trace_id, context.getClientInfo().span_id, - context.getClientInfo().parent_span_id); + "OpenTelemetry trace id {:x}, span id {}, parent span id {}", + context.getClientInfo().opentelemetry_trace_id, context.getClientInfo().opentelemetry_span_id, + context.getClientInfo().opentelemetry_parent_span_id); + + std::cerr << StackTrace().toString() << std::endl; } } @@ -225,9 +227,9 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c if (auto opentelemetry_log = context.getOpenTelemetryLog()) { OpenTelemetrySpanLogElement span; - span.trace_id = context.getClientInfo().trace_id; - span.span_id = context.getClientInfo().span_id; - span.parent_span_id = context.getClientInfo().parent_span_id; + span.trace_id = context.getClientInfo().opentelemetry_trace_id; + span.span_id = context.getClientInfo().opentelemetry_span_id; + span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; span.start_time = current_time; span.finish_time = current_time; @@ -242,6 +244,13 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c span.attribute_names.push_back("query_id"); span.attribute_values.push_back(elem.client_info.current_query_id); + if (!context.getClientInfo().opentelemetry_tracestate.empty()) + { + span.attribute_names.push_back("tracestate"); + span.attribute_values.push_back( + context.getClientInfo().opentelemetry_tracestate); + } + opentelemetry_log->add(span); } @@ -617,9 +626,9 @@ static std::tuple executeQueryImpl( if (auto opentelemetry_log = context.getOpenTelemetryLog()) { OpenTelemetrySpanLogElement span; - span.trace_id = context.getClientInfo().trace_id; - span.span_id = context.getClientInfo().span_id; - span.parent_span_id = context.getClientInfo().parent_span_id; + span.trace_id = context.getClientInfo().opentelemetry_trace_id; + span.span_id = context.getClientInfo().opentelemetry_span_id; + span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; span.start_time = elem.query_start_time; span.finish_time = time(nullptr); // current time @@ -633,6 +642,12 @@ static std::tuple executeQueryImpl( span.attribute_names.push_back("query_id"); span.attribute_values.push_back(elem.client_info.current_query_id); + if (!context.getClientInfo().opentelemetry_tracestate.empty()) + { + span.attribute_names.push_back("tracestate"); + span.attribute_values.push_back( + context.getClientInfo().opentelemetry_tracestate); + } opentelemetry_log->add(span); } diff --git a/src/Interpreters/ya.make b/src/Interpreters/ya.make index 23cde61a744..8c4086722c8 100644 --- a/src/Interpreters/ya.make +++ b/src/Interpreters/ya.make @@ -113,6 +113,7 @@ SRCS( MutationsInterpreter.cpp MySQL/InterpretersMySQLDDLQuery.cpp NullableUtils.cpp + OpenTelemetryLog.cpp OptimizeIfChains.cpp OptimizeIfWithConstantConditionVisitor.cpp PartLog.cpp diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 95f56b715b8..fb630010198 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -96,6 +96,7 @@ namespace ErrorCodes extern const int WRONG_PASSWORD; extern const int REQUIRED_PASSWORD; + extern const int BAD_REQUEST_PARAMETER; extern const int INVALID_SESSION_TIMEOUT; extern const int HTTP_LENGTH_REQUIRED; } @@ -279,9 +280,7 @@ void HTTPHandler::processQuery( } } - std::string query_id = params.get("query_id", ""); context.setUser(user, password, request.clientAddress()); - context.setCurrentQueryId(query_id); if (!quota_key.empty()) context.setQuotaKey(quota_key); @@ -311,6 +310,26 @@ void HTTPHandler::processQuery( session->release(); }); + std::string query_id = params.get("query_id", ""); + context.setCurrentQueryId(query_id); + + if (request.has("traceparent")) + { + std::string opentelemetry_traceparent = request.get("traceparent"); + std::string error; + if (!context.getClientInfo().setOpenTelemetryTraceparent( + opentelemetry_traceparent, error)) + { + throw Exception(ErrorCodes::BAD_REQUEST_PARAMETER, + "Failed to parse OpenTelemetry traceparent header '{}': {}", + opentelemetry_traceparent, error); + } + + context.getClientInfo().opentelemetry_tracestate = request.get("tracestate", ""); + + + } + /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). String http_response_compression_methods = request.get("Accept-Encoding", ""); CompressionMethod http_response_compression_method = CompressionMethod::None; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ab4ce820666..e83bbb02cad 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -845,13 +845,17 @@ void TCPHandler::receiveQuery() state.is_empty = false; readStringBinary(state.query_id, *in); - query_context->setCurrentQueryId(state.query_id); - /// Client info ClientInfo & client_info = query_context->getClientInfo(); if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) client_info.read(*in, client_revision); + // It is convenient to generate default OpenTelemetry trace id and default + // query id together. ClientInfo might contain upstream trace id, so we + // decide whether to use the default ids after we have received the ClientInfo. + // We also set up the parent span id while we're at it. + query_context->setCurrentQueryId(state.query_id); + /// For better support of old clients, that does not send ClientInfo. if (client_info.query_kind == ClientInfo::QueryKind::NO_QUERY) { diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index c2f7bfd18d2..d5e86c08a8b 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -67,6 +67,22 @@ namespace const CompressionMethod compression_method) : SourceWithProgress(sample_block), name(std::move(name_)) { + ReadWriteBufferFromHTTP::HTTPHeaderEntries header; + + // Propagate OpenTelemetry trace context, if any, downstream. + auto & client_info = context.getClientInfo(); + if (client_info.opentelemetry_trace_id) + { + header.emplace_back("traceparent", + client_info.getOpenTelemetryTraceparentForChild()); + + if (!client_info.opentelemetry_tracestate.empty()) + { + header.emplace_back("tracestate", + client_info.opentelemetry_tracestate); + } + } + read_buf = wrapReadBufferWithCompressionMethod( std::make_unique( uri, @@ -76,7 +92,7 @@ namespace context.getSettingsRef().max_http_get_redirects, Poco::Net::HTTPBasicCredentials{}, DBMS_DEFAULT_BUFFER_SIZE, - ReadWriteBufferFromHTTP::HTTPHeaderEntries{}, + header, context.getRemoteHostFilter()), compression_method); From f87db0fbe3a9a32fdb2041ecb822cf304edbcf5e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 28 Aug 2020 04:34:06 +0300 Subject: [PATCH 049/432] less crazy parsing --- src/Interpreters/ClientInfo.cpp | 113 +++++++------------------------- 1 file changed, 24 insertions(+), 89 deletions(-) diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 9e501ca5c11..0e8da221ca9 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -146,107 +146,42 @@ void ClientInfo::setInitialQuery() client_name = (DBMS_NAME " ") + client_name; } -template -bool readLowercaseHexDigits(const char *& begin, const char * end, T & dest_value, std::string & error) -{ - char * dest_begin = reinterpret_cast(&dest_value); - char * dest_end = dest_begin + sizeof(dest_value); - bool odd_character = true; - for (;;) - { - if (begin == end) - { - if (dest_begin == dest_end) - { - return true; - } - error = fmt::format("Not enough charaters in the input, got {}, need {} more", end - begin, dest_end - dest_begin); - return false; - } - - if (dest_begin == dest_end) - { - return true; - } - - int cur = 0; - if (*begin >= '0' && *begin <= '9') - { - cur = *begin - '0'; - } - else if (*begin >= 'a' && *begin <= 'f') - { - cur = 10 + *begin - 'a'; - } - else - { - error = fmt::format("Encountered '{}' which is not a lowercase hexadecimal digit", *begin); - return false; - } - - // Two characters per byte, little-endian. - if (odd_character) - { - *(dest_end - 1) = cur; - } - else - { - *(dest_end - 1) = *(dest_end - 1) << 8 | cur; - --dest_end; - } - - begin++; - odd_character = !odd_character; - } -} - bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, std::string & error) { uint8_t version = -1; - __uint128_t trace_id = 0; + __uint64_t trace_id_high = 0; + __uint64_t trace_id_low = 0; uint64_t trace_parent = 0; uint8_t trace_flags = 0; - const char * begin = &traceparent[0]; - const char * end = begin + traceparent.length(); + int result = sscanf(&traceparent[0], + "%2" SCNx8 "-%16" SCNx64 "%16" SCNx64 "-%16" SCNx64 "-%2" SCNx8, + &version, &trace_id_high, &trace_id_low, &trace_parent, &trace_flags); -#define CHECK_CONDITION(condition, ...) \ - ((condition) || (error = fmt::format(__VA_ARGS__), false)) - -#define CHECK_DELIMITER \ - (begin >= end \ - ? (error = fmt::format( \ - "Expected '-' delimiter, got EOL at position {}", \ - begin - &traceparent[0]), \ - false) \ - : *begin != '-' \ - ? (error = fmt::format( \ - "Expected '-' delimiter, got '{}' at position {}", \ - *begin, begin - &traceparent[0]), \ - false) \ - : (++begin, true)) - - bool result = readLowercaseHexDigits(begin, end, version, error) - && CHECK_CONDITION(version == 0, "Expected version 00, got {}", version) - && CHECK_DELIMITER - && readLowercaseHexDigits(begin, end, trace_id, error) - && CHECK_DELIMITER - && readLowercaseHexDigits(begin, end, trace_parent, error) - && CHECK_DELIMITER - && readLowercaseHexDigits(begin, end, trace_flags, error) - && CHECK_CONDITION(begin == end, - "Expected end of string, got {} at position {}", *begin, end - begin); - -#undef CHECK -#undef CHECK_DELIMITER - - if (!result) + if (result == EOF) { + error = "Failed to parse traceparent header (EOF)"; return false; } - opentelemetry_trace_id = trace_id; + if (result != 5) + { + error = fmt::format("Failed to parse traceparent header" + "(could only read {} parts instead of the expected 5)", + result); + return false; + } + + if (version != 0) + { + error = fmt::format("Unexpected version {} of traceparent header:" + "expected 00", version); + return false; + } + + opentelemetry_trace_id = static_cast<__uint128_t>(trace_id_high) << 64 + | trace_id_low; opentelemetry_parent_span_id = trace_parent; opentelemetry_trace_flags = trace_flags; return true; From 063cc386a3508cb8ec857373bfe10ca83f2a4c58 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 28 Aug 2020 22:02:50 +0300 Subject: [PATCH 050/432] cosmetic fixes --- src/Interpreters/ClientInfo.cpp | 27 +++++++++++++++++++++------ src/Interpreters/ClientInfo.h | 2 +- src/Interpreters/Context.cpp | 4 ++-- src/Interpreters/executeQuery.cpp | 2 +- 4 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 0e8da221ca9..f7ed8eafa46 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -70,7 +70,6 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) writeBinary(opentelemetry_parent_span_id, out); writeBinary(opentelemetry_tracestate, out); writeBinary(opentelemetry_trace_flags, out); - std::cerr << fmt::format("wrote {:x}, {}, {}\n", opentelemetry_trace_id, opentelemetry_span_id, opentelemetry_parent_span_id) << StackTrace().toString() << std::endl; } } @@ -155,28 +154,44 @@ bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, uint64_t trace_parent = 0; uint8_t trace_flags = 0; + // Version 00, which is the only one we can parse, is fixed width. Use this + // fact for an additional sanity check. + const int expected_length = 2 + 1 + 32 + 1 + 16 + 1 + 2; + if (traceparent.length() != expected_length) + { + error = fmt::format("unexpected length {}, expected {}", + traceparent.length(), expected_length); + return false; + } + + // clang-tidy doesn't like sscanf: + // error: 'sscanf' used to convert a string to an unsigned integer value, + // but function will not report conversion errors; consider using 'strtoul' + // instead [cert-err34-c,-warnings-as-errors] + // There is no other ready solution, and hand-rolling a more complicated + // parser for an HTTP header in C++ sounds like RCE. + // NOLINTNEXTLINE(cert-err34-c) int result = sscanf(&traceparent[0], "%2" SCNx8 "-%16" SCNx64 "%16" SCNx64 "-%16" SCNx64 "-%2" SCNx8, &version, &trace_id_high, &trace_id_low, &trace_parent, &trace_flags); if (result == EOF) { - error = "Failed to parse traceparent header (EOF)"; + error = "EOF"; return false; } + // We read uint128 as two uint64, so 5 parts and not 4. if (result != 5) { - error = fmt::format("Failed to parse traceparent header" - "(could only read {} parts instead of the expected 5)", + error = fmt::format("could only read {} parts instead of the expected 5", result); return false; } if (version != 0) { - error = fmt::format("Unexpected version {} of traceparent header:" - "expected 00", version); + error = fmt::format("unexpected version {}, expected 00", version); return false; } diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 413e1c42bf7..604a9771a52 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -58,7 +58,7 @@ public: String initial_user; String initial_query_id; Poco::Net::SocketAddress initial_address; - + // OpenTelemetry things __uint128_t opentelemetry_trace_id = 0; // Span ID is not strictly the client info, but convenient to keep here. diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c5fd2d585e1..362e762df6d 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1092,7 +1092,7 @@ void Context::setCurrentQueryId(const String & query_id) random.words.a = thread_local_rng(); //-V656 random.words.b = thread_local_rng(); //-V656 - + fmt::print(stderr, "traceid {}, ==0 {}\n", client_info.opentelemetry_trace_id, client_info.opentelemetry_trace_id == 0); if (client_info.opentelemetry_trace_id == 0) { @@ -1120,7 +1120,7 @@ void Context::setCurrentQueryId(const String & query_id) QueryUUID(const char * bytes, Poco::UUID::Version version) : Poco::UUID(bytes, version) {} }; - + query_id_to_set = QueryUUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 2a35bc205fa..05a0211adce 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -147,7 +147,7 @@ static void logQuery(const String & query, const Context & context, bool interna (current_user != "default" ? ", user: " + context.getClientInfo().current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), joinLines(query)); - + LOG_TRACE(&Poco::Logger::get("executeQuery"), "OpenTelemetry trace id {:x}, span id {}, parent span id {}", context.getClientInfo().opentelemetry_trace_id, context.getClientInfo().opentelemetry_span_id, From 79e0b184ac16675322f25d1d840c50201a2ab822 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 28 Aug 2020 23:40:23 +0300 Subject: [PATCH 051/432] A test + reinterpretAsUUID --- src/Functions/reinterpretStringAs.cpp | 8 +++-- src/Storages/StorageURL.cpp | 2 +- .../0_stateless/01455_opentelemetry.reference | 3 ++ .../0_stateless/01455_opentelemetry.sh | 31 +++++++++++++++++++ tests/queries/shell_config.sh | 20 ++++++------ 5 files changed, 52 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/01455_opentelemetry.reference create mode 100755 tests/queries/0_stateless/01455_opentelemetry.sh diff --git a/src/Functions/reinterpretStringAs.cpp b/src/Functions/reinterpretStringAs.cpp index bb290b33b6d..34e53f32dd2 100644 --- a/src/Functions/reinterpretStringAs.cpp +++ b/src/Functions/reinterpretStringAs.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ public: size_t offset = 0; for (size_t i = 0; i < size; ++i) { - ToFieldType value = 0; + ToFieldType value{}; memcpy(&value, &data_from[offset], std::min(static_cast(sizeof(ToFieldType)), offsets_from[i] - offset - 1)); vec_res[i] = value; offset = offsets_from[i]; @@ -88,7 +89,7 @@ public: size_t copy_size = std::min(step, sizeof(ToFieldType)); for (size_t i = 0; i < size; ++i) { - ToFieldType value = 0; + ToFieldType value{}; memcpy(&value, &data_from[offset], copy_size); vec_res[i] = value; offset += step; @@ -118,6 +119,7 @@ struct NameReinterpretAsFloat32 { static constexpr auto name = "reinterpretA struct NameReinterpretAsFloat64 { static constexpr auto name = "reinterpretAsFloat64"; }; struct NameReinterpretAsDate { static constexpr auto name = "reinterpretAsDate"; }; struct NameReinterpretAsDateTime { static constexpr auto name = "reinterpretAsDateTime"; }; +struct NameReinterpretAsUUID { static constexpr auto name = "reinterpretAsUUID"; }; using FunctionReinterpretAsUInt8 = FunctionReinterpretStringAs; using FunctionReinterpretAsUInt16 = FunctionReinterpretStringAs; @@ -131,6 +133,7 @@ using FunctionReinterpretAsFloat32 = FunctionReinterpretStringAs; using FunctionReinterpretAsDate = FunctionReinterpretStringAs; using FunctionReinterpretAsDateTime = FunctionReinterpretStringAs; +using FunctionReinterpretAsUUID = FunctionReinterpretStringAs; void registerFunctionsReinterpretStringAs(FunctionFactory & factory) @@ -147,6 +150,7 @@ void registerFunctionsReinterpretStringAs(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); } } diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index d5e86c08a8b..cd1b5dd882a 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -70,7 +70,7 @@ namespace ReadWriteBufferFromHTTP::HTTPHeaderEntries header; // Propagate OpenTelemetry trace context, if any, downstream. - auto & client_info = context.getClientInfo(); + const auto & client_info = context.getClientInfo(); if (client_info.opentelemetry_trace_id) { header.emplace_back("traceparent", diff --git a/tests/queries/0_stateless/01455_opentelemetry.reference b/tests/queries/0_stateless/01455_opentelemetry.reference new file mode 100644 index 00000000000..b0484d7df0b --- /dev/null +++ b/tests/queries/0_stateless/01455_opentelemetry.reference @@ -0,0 +1,3 @@ +1 +4 +1 diff --git a/tests/queries/0_stateless/01455_opentelemetry.sh b/tests/queries/0_stateless/01455_opentelemetry.sh new file mode 100755 index 00000000000..f537377dfe6 --- /dev/null +++ b/tests/queries/0_stateless/01455_opentelemetry.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -ue + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +# Generate some random trace id so that the prevous runs of the test do not interfere. +trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") + +${CLICKHOUSE_CURL} --header "traceparent: 00-$trace_id-0000000000000010-01" --header "tracestate: some custom state" "http://localhost:8123/" --get --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" + +${CLICKHOUSE_CLIENT} -q "system flush logs" + +# Check that the HTTP traceparent was read, and then passed to `remote` instance. +# We expect 4 queries, because there are two DESC TABLE queries for the shard. +# This is bug-ish, see https://github.com/ClickHouse/ClickHouse/issues/14228 +${CLICKHOUSE_CLIENT} -q "select count(*) from system.opentelemetry_log where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id')))" + +# Check that the tracestate header was read and passed. Must have +# exactly the same value for all "query" spans in this trace. +${CLICKHOUSE_CLIENT} -q " + select count(distinct attribute.values) + from system.opentelemetry_log + array join attribute.names, attribute.values + where + trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + and operation_name = 'query' + and attribute.names = 'tracestate' +" + + diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index 8c66d79b5b1..cd4fd418c81 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -1,16 +1,18 @@ +#!/usr/bin/env bash + export CLICKHOUSE_DATABASE=${CLICKHOUSE_DATABASE:="test"} export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL:="warning"} -[ -n "$CLICKHOUSE_CONFIG_CLIENT" ] && CLICKHOUSE_CLIENT_OPT0+=" --config-file=${CLICKHOUSE_CONFIG_CLIENT} " -[ -n "${CLICKHOUSE_HOST}" ] && CLICKHOUSE_CLIENT_OPT0+=" --host=${CLICKHOUSE_HOST} " -[ -n "${CLICKHOUSE_PORT_TCP}" ] && CLICKHOUSE_CLIENT_OPT0+=" --port=${CLICKHOUSE_PORT_TCP} " -[ -n "${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}" ] && CLICKHOUSE_CLIENT_OPT0+=" --send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL} " -[ -n "${CLICKHOUSE_DATABASE}" ] && CLICKHOUSE_CLIENT_OPT0+=" --database=${CLICKHOUSE_DATABASE} " +[ -v CLICKHOUSE_CONFIG_CLIENT ] && CLICKHOUSE_CLIENT_OPT0+=" --config-file=${CLICKHOUSE_CONFIG_CLIENT} " +[ -v CLICKHOUSE_HOST ] && CLICKHOUSE_CLIENT_OPT0+=" --host=${CLICKHOUSE_HOST} " +[ -v CLICKHOUSE_PORT_TCP ] && CLICKHOUSE_CLIENT_OPT0+=" --port=${CLICKHOUSE_PORT_TCP} " +[ -v CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL ] && CLICKHOUSE_CLIENT_OPT0+=" --send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL} " +[ -v CLICKHOUSE_DATABASE ] && CLICKHOUSE_CLIENT_OPT0+=" --database=${CLICKHOUSE_DATABASE} " export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"} [ -x "$CLICKHOUSE_BINARY-client" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client} [ -x "$CLICKHOUSE_BINARY" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY client} export CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client} -export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="$CLICKHOUSE_CLIENT_BINARY ${CLICKHOUSE_CLIENT_OPT0} ${CLICKHOUSE_CLIENT_OPT}"} +export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="$CLICKHOUSE_CLIENT_BINARY ${CLICKHOUSE_CLIENT_OPT0:-} ${CLICKHOUSE_CLIENT_OPT:-}"} [ -x "${CLICKHOUSE_BINARY}-local" ] && CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"} [ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY} local"} export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"} @@ -42,7 +44,7 @@ export CLICKHOUSE_PORT_HTTPS=${CLICKHOUSE_PORT_HTTPS:="8443"} export CLICKHOUSE_PORT_HTTP_PROTO=${CLICKHOUSE_PORT_HTTP_PROTO:="http"} # Add database to url params -if [ -n "${CLICKHOUSE_URL_PARAMS}" ] +if [ -v CLICKHOUSE_URL_PARAMS ] then export CLICKHOUSE_URL_PARAMS="${CLICKHOUSE_URL_PARAMS}&database=${CLICKHOUSE_DATABASE}" else @@ -53,7 +55,7 @@ export CLICKHOUSE_URL=${CLICKHOUSE_URL:="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICK export CLICKHOUSE_URL_HTTPS=${CLICKHOUSE_URL_HTTPS:="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/"} # Add url params to url -if [ -n "${CLICKHOUSE_URL_PARAMS}" ] +if [ -v CLICKHOUSE_URL_PARAMS ] then export CLICKHOUSE_URL="${CLICKHOUSE_URL}?${CLICKHOUSE_URL_PARAMS}" export CLICKHOUSE_URL_HTTPS="${CLICKHOUSE_URL_HTTPS}?${CLICKHOUSE_URL_PARAMS}" @@ -65,7 +67,7 @@ export CLICKHOUSE_URL_INTERSERVER=${CLICKHOUSE_URL_INTERSERVER:="${CLICKHOUSE_PO export CLICKHOUSE_CURL_COMMAND=${CLICKHOUSE_CURL_COMMAND:="curl"} export CLICKHOUSE_CURL_TIMEOUT=${CLICKHOUSE_CURL_TIMEOUT:="10"} -export CLICKHOUSE_CURL=${CLICKHOUSE_CURL:="${CLICKHOUSE_CURL_COMMAND} -q --max-time ${CLICKHOUSE_CURL_TIMEOUT}"} +export CLICKHOUSE_CURL=${CLICKHOUSE_CURL:="${CLICKHOUSE_CURL_COMMAND} -q -s --max-time ${CLICKHOUSE_CURL_TIMEOUT}"} export CLICKHOUSE_TMP=${CLICKHOUSE_TMP:="."} mkdir -p ${CLICKHOUSE_TMP} From c02f9e1dd1ca825a6dba653b2fb6cf3722ea195d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Sat, 29 Aug 2020 01:48:48 +0300 Subject: [PATCH 052/432] remove some debug output to pass the tests --- src/Interpreters/Context.cpp | 2 -- src/Interpreters/executeQuery.cpp | 2 -- 2 files changed, 4 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 362e762df6d..4bc66173a9c 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1093,7 +1093,6 @@ void Context::setCurrentQueryId(const String & query_id) random.words.a = thread_local_rng(); //-V656 random.words.b = thread_local_rng(); //-V656 - fmt::print(stderr, "traceid {}, ==0 {}\n", client_info.opentelemetry_trace_id, client_info.opentelemetry_trace_id == 0); if (client_info.opentelemetry_trace_id == 0) { // If trace_id is not initialized, it means that this is an initial query @@ -1109,7 +1108,6 @@ void Context::setCurrentQueryId(const String & query_id) client_info.opentelemetry_parent_span_id = client_info.opentelemetry_span_id; client_info.opentelemetry_span_id = thread_local_rng(); } - fmt::print(stderr, "traceid {}, ==0 {}\n{}\n", client_info.opentelemetry_trace_id, client_info.opentelemetry_trace_id == 0, StackTrace().toString()); String query_id_to_set = query_id; if (query_id_to_set.empty()) /// If the user did not submit his query_id, then we generate it ourselves. diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 05a0211adce..1da12d993ea 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -152,8 +152,6 @@ static void logQuery(const String & query, const Context & context, bool interna "OpenTelemetry trace id {:x}, span id {}, parent span id {}", context.getClientInfo().opentelemetry_trace_id, context.getClientInfo().opentelemetry_span_id, context.getClientInfo().opentelemetry_parent_span_id); - - std::cerr << StackTrace().toString() << std::endl; } } From fa8eebed780de4b79c000613341da25d4efe6f4a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Sat, 29 Aug 2020 02:25:30 +0300 Subject: [PATCH 053/432] more tests + clickhouse-client options --- programs/client/Client.cpp | 21 ++++++++ .../0_stateless/01455_opentelemetry.sh | 31 ----------- ...01455_opentelemetry_distributed.reference} | 3 ++ .../01455_opentelemetry_distributed.sh | 52 +++++++++++++++++++ 4 files changed, 76 insertions(+), 31 deletions(-) delete mode 100755 tests/queries/0_stateless/01455_opentelemetry.sh rename tests/queries/0_stateless/{01455_opentelemetry.reference => 01455_opentelemetry_distributed.reference} (50%) create mode 100755 tests/queries/0_stateless/01455_opentelemetry_distributed.sh diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index e3a9b68dc47..fc2adff337e 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -2180,6 +2180,8 @@ public: ("log-level", po::value(), "client log level") ("server_logs_file", po::value(), "put server logs into specified file") ("query-fuzzer-runs", po::value()->default_value(0), "query fuzzer runs") + ("opentelemetry-traceparent", po::value(), "OpenTelemetry traceparent header as described by W3C Trace Context recommendation") + ("opentelemetry-tracestate", po::value(), "OpenTelemetry tracestate header as described by W3C Trace Context recommendation") ; Settings cmd_settings; @@ -2348,6 +2350,25 @@ public: ignore_error = true; } + if (options.count("opentelemetry-traceparent")) + { + std::string traceparent = options["opentelemetry-traceparent"].as(); + std::string error; + if (!context.getClientInfo().setOpenTelemetryTraceparent( + traceparent, error)) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Cannot parse OpenTelemetry traceparent '{}': {}", + traceparent, error); + } + } + + if (options.count("opentelemetry-tracestate")) + { + context.getClientInfo().opentelemetry_tracestate = + options["opentelemetry-tracestate"].as(); + } + argsToConfig(common_arguments, config(), 100); clearPasswordFromCommandLine(argc, argv); diff --git a/tests/queries/0_stateless/01455_opentelemetry.sh b/tests/queries/0_stateless/01455_opentelemetry.sh deleted file mode 100755 index f537377dfe6..00000000000 --- a/tests/queries/0_stateless/01455_opentelemetry.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -set -ue - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. "$CURDIR"/../shell_config.sh - -# Generate some random trace id so that the prevous runs of the test do not interfere. -trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") - -${CLICKHOUSE_CURL} --header "traceparent: 00-$trace_id-0000000000000010-01" --header "tracestate: some custom state" "http://localhost:8123/" --get --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" - -${CLICKHOUSE_CLIENT} -q "system flush logs" - -# Check that the HTTP traceparent was read, and then passed to `remote` instance. -# We expect 4 queries, because there are two DESC TABLE queries for the shard. -# This is bug-ish, see https://github.com/ClickHouse/ClickHouse/issues/14228 -${CLICKHOUSE_CLIENT} -q "select count(*) from system.opentelemetry_log where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id')))" - -# Check that the tracestate header was read and passed. Must have -# exactly the same value for all "query" spans in this trace. -${CLICKHOUSE_CLIENT} -q " - select count(distinct attribute.values) - from system.opentelemetry_log - array join attribute.names, attribute.values - where - trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) - and operation_name = 'query' - and attribute.names = 'tracestate' -" - - diff --git a/tests/queries/0_stateless/01455_opentelemetry.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference similarity index 50% rename from tests/queries/0_stateless/01455_opentelemetry.reference rename to tests/queries/0_stateless/01455_opentelemetry_distributed.reference index b0484d7df0b..5993b628ad4 100644 --- a/tests/queries/0_stateless/01455_opentelemetry.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -1,3 +1,6 @@ 1 4 1 +1 +2 +1 diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh new file mode 100755 index 00000000000..df5c194b2be --- /dev/null +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -ue + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +function check_log +{ +${CLICKHOUSE_CLIENT} -nq " +system flush logs; + +-- Check the number of spans with given trace id, to verify it was propagated. +select count(*) + from system.opentelemetry_log + where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + and operation_name = 'query' + ; + +-- Check that the tracestate header was propagated. It must have exactly the +-- same non-empty value for all 'query' spans in this trace. +select count(distinct value) + from system.opentelemetry_log + array join attribute.names as name, attribute.values as value + where + trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + and operation_name = 'query' + and name = 'tracestate' + and length(value) > 0 + ; +" +} + +# Generate some random trace id so that the prevous runs of the test do not interfere. +trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") + +# Check that the HTTP traceparent is read, and then passed through `remote` table function. +# We expect 4 queries, because there are two DESC TABLE queries for the shard. +# This is bug-ish, see https://github.com/ClickHouse/ClickHouse/issues/14228 +${CLICKHOUSE_CURL} --header "traceparent: 00-$trace_id-0000000000000010-01" --header "tracestate: some custom state" "http://localhost:8123/" --get --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" + +check_log + +# With another trace id, check that clickhouse-client accepts traceparent, and +# that it is passed through URL table function. We expect two query spans, one +# for the initial query, and one for the HTTP query. +trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") + +${CLICKHOUSE_CLIENT} --opentelemetry-traceparent "00-$trace_id-0000000000000020-02" --opentelemetry-tracestate "another custom state" --query " + select * from url('http://127.0.0.2:8123/?query=select%201', CSV, 'a int') +" + +check_log From d0a9926e7dba352adfe6e89c3fa8a3bd8f940377 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 8 Sep 2020 16:19:27 +0300 Subject: [PATCH 054/432] fixes and some docs --- docs/en/operations/opentelemetry-draft.md | 61 +++++++++++++++++++ programs/server/config.xml | 15 +++++ src/Core/Settings.h | 1 + src/Interpreters/ClientInfo.cpp | 49 ++++++++++----- src/Interpreters/ClientInfo.h | 2 +- src/Interpreters/Context.cpp | 26 +++++--- src/Interpreters/OpenTelemetryLog.cpp | 11 ++-- src/Interpreters/OpenTelemetryLog.h | 5 +- src/Interpreters/executeQuery.cpp | 54 +++++++++------- src/Server/TCPHandler.cpp | 26 +++++--- .../01455_opentelemetry_distributed.reference | 4 ++ .../01455_opentelemetry_distributed.sh | 41 +++++++++++-- 12 files changed, 226 insertions(+), 69 deletions(-) create mode 100644 docs/en/operations/opentelemetry-draft.md diff --git a/docs/en/operations/opentelemetry-draft.md b/docs/en/operations/opentelemetry-draft.md new file mode 100644 index 00000000000..3363b37b6d6 --- /dev/null +++ b/docs/en/operations/opentelemetry-draft.md @@ -0,0 +1,61 @@ +# [draft] OpenTelemetry support + +[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting +traces and metrics from distributed application. ClickHouse has some support +for OpenTelemetry. + + +## Supplying Trace Context to ClickHouse + +ClickHouse accepts trace context HTTP headers, as described by +the [W3C recommendation](https://www.w3.org/TR/trace-context/). +It also accepts trace context over native protocol that is used for +communication between ClickHouse servers or between the client and server. +For manual testing, trace context headers conforming to the Trace Context +recommendation can be supplied to `clickhouse-client` using +`--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags. + +If no parent trace context is supplied, ClickHouse can start a new trace, with +probability controlled by the `opentelemetry_start_trace_probability` setting. + + +## Propagating the Trace Context + +The trace context is propagated to downstream services in the following cases: + +* Queries to remote ClickHouse servers, such as when using `Distributed` table + engine. + +* `URL` table function. Trace context information is sent in HTTP headers. + + +## Tracing the ClickHouse Itself + +ClickHouse creates _trace spans_ for each query and some of the query execution +stages, such as query planning or distributed queries. + +To be useful, the tracing information has to be exported to a monitoring system +that supports OpenTelemetry, such as Jaeger or Prometheus. ClickHouse avoids +a dependency on a particular monitoring system, instead only +providing the tracing data conforming to the standard. A natural way to do so +in an SQL RDBMS is a system table. OpenTelemetry trace span information +[required by the standard](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/overview.md#span) +is stored in the system table called `system.opentelemetry_log`. + +The table must be enabled in the server configuration, see the `opentelemetry_log` +element in the default config file `config.xml`. It is enabled by default. + +The table has the following columns: + +- `trace_id` +- `span_id` +- `parent_span_id` +- `operation_name` +- `start_time` +- `finish_time` +- `finish_date` +- `attribute.name` +- `attribute.values` + +The tags or attributes are saved as two parallel arrays, containing the keys +and values. Use `ARRAY JOIN` to work with them. diff --git a/programs/server/config.xml b/programs/server/config.xml index 7e88150b95f..913f43fc435 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -567,6 +567,21 @@ OpenTelemetry log contains OpenTelemetry trace spans. --> + + + engine MergeTree + partition by toYYYYMM(finish_date) + order by (finish_date, finish_time, trace_id) + system opentelemetry_log
7500 diff --git a/src/Core/Settings.h b/src/Core/Settings.h index f7438b356d5..bb3baae6ba4 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -218,6 +218,7 @@ class IColumn; M(UInt64, query_profiler_cpu_time_period_ns, 1000000000, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \ M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \ M(String, metrics_perf_events_list, "", "Comma separated list of perf metrics that will be measured throughout queries' execution. Empty means all events. See PerfEventInfo in sources for the available events.", 0) \ + M(Float, opentelemetry_start_trace_probability, 0., "Probability to start an OpenTelemetry trace for an incoming query.", 0) \ \ \ /** Limits during query execution are part of the settings. \ diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index f7ed8eafa46..da9f14ec879 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -65,11 +65,19 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) { // No point writing these numbers with variable length, because they // are random and will probably require the full length anyway. - writeBinary(opentelemetry_trace_id, out); - writeBinary(opentelemetry_span_id, out); - writeBinary(opentelemetry_parent_span_id, out); - writeBinary(opentelemetry_tracestate, out); - writeBinary(opentelemetry_trace_flags, out); + if (opentelemetry_trace_id) + { + writeBinary(uint8_t(1), out); + writeBinary(opentelemetry_trace_id, out); + writeBinary(opentelemetry_span_id, out); + writeBinary(opentelemetry_parent_span_id, out); + writeBinary(opentelemetry_tracestate, out); + writeBinary(opentelemetry_trace_flags, out); + } + else + { + writeBinary(uint8_t(0), out); + } } } @@ -125,15 +133,24 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) client_version_patch = client_revision; } + // TODO what does it even mean to read this structure over HTTP? I thought + // this was for native protocol? See interface == Interface::HTTP. if (client_protocol_revision >= DBMS_MIN_REVISION_WITH_OPENTELEMETRY) { - readBinary(opentelemetry_trace_id, in); - readBinary(opentelemetry_span_id, in); - readBinary(opentelemetry_parent_span_id, in); - readBinary(opentelemetry_tracestate, in); - readBinary(opentelemetry_trace_flags, in); + uint8_t have_trace_id = 0; + readBinary(have_trace_id, in); + if (have_trace_id) + { + readBinary(opentelemetry_trace_id, in); + readBinary(opentelemetry_span_id, in); + readBinary(opentelemetry_parent_span_id, in); + readBinary(opentelemetry_tracestate, in); + readBinary(opentelemetry_trace_flags, in); - std::cerr << fmt::format("read {:x}, {}, {}\n", opentelemetry_trace_id, opentelemetry_span_id, opentelemetry_parent_span_id) << StackTrace().toString() << std::endl; + fmt::print(stderr, "read {:x}, {}, {} at\n{}\n", + opentelemetry_trace_id, opentelemetry_span_id, + opentelemetry_parent_span_id, StackTrace().toString()); + } } } @@ -149,8 +166,8 @@ bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, std::string & error) { uint8_t version = -1; - __uint64_t trace_id_high = 0; - __uint64_t trace_id_low = 0; + uint64_t trace_id_high = 0; + uint64_t trace_id_low = 0; uint64_t trace_parent = 0; uint8_t trace_flags = 0; @@ -205,11 +222,11 @@ bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, std::string ClientInfo::getOpenTelemetryTraceparentForChild() const { - // This span is a parent for its children (so deep...), so we specify - // this span_id as a parent id. + // This span is a parent for its children, so we specify this span_id as a + // parent id. return fmt::format("00-{:032x}-{:016x}-{:02x}", opentelemetry_trace_id, opentelemetry_span_id, - // This cast is because fmt is being weird and complaining that + // This cast is needed because fmt is being weird and complaining that // "mixing character types is not allowed". static_cast(opentelemetry_trace_flags)); } diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 604a9771a52..f57d1853b76 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -69,7 +69,7 @@ public: // the incoming tracestate header, we just pass it downstream. // https://www.w3.org/TR/trace-context/ String opentelemetry_tracestate; - UInt8 opentelemetry_trace_flags; + UInt8 opentelemetry_trace_flags = 0; /// All below are parameters related to initial query. diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 4bc66173a9c..e12747a7f95 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1093,18 +1093,28 @@ void Context::setCurrentQueryId(const String & query_id) random.words.a = thread_local_rng(); //-V656 random.words.b = thread_local_rng(); //-V656 - if (client_info.opentelemetry_trace_id == 0) + if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY + && client_info.opentelemetry_trace_id == 0) { - // If trace_id is not initialized, it means that this is an initial query - // without any parent OpenTelemetry trace. Use the randomly generated - // default query id as the new trace id. - client_info.opentelemetry_trace_id = random.uuid; - client_info.opentelemetry_parent_span_id = 0; - client_info.opentelemetry_span_id = thread_local_rng(); + // If this is an initial query without any parent OpenTelemetry trace, we + // might start the trace ourselves, with some configurable probability. + std::bernoulli_distribution should_start_trace{ + settings.opentelemetry_start_trace_probability}; + + if (should_start_trace(thread_local_rng)) + { + // Use the randomly generated default query id as the new trace id. + client_info.opentelemetry_trace_id = random.uuid; + client_info.opentelemetry_parent_span_id = 0; + client_info.opentelemetry_span_id = thread_local_rng(); + // Mark this trace as sampled in the flags. + client_info.opentelemetry_trace_flags = 1; + } } else { - // The incoming span id becomes our parent span id. + // The incoming request has an OpenTelemtry trace context. Its span id + // becomes our parent span id. client_info.opentelemetry_parent_span_id = client_info.opentelemetry_span_id; client_info.opentelemetry_span_id = thread_local_rng(); } diff --git a/src/Interpreters/OpenTelemetryLog.cpp b/src/Interpreters/OpenTelemetryLog.cpp index f8d7d684478..161af01107f 100644 --- a/src/Interpreters/OpenTelemetryLog.cpp +++ b/src/Interpreters/OpenTelemetryLog.cpp @@ -13,16 +13,13 @@ namespace DB Block OpenTelemetrySpanLogElement::createBlock() { return { - // event_date is the date part of event_time, used for indexing. - {std::make_shared(), "event_date"}, - // event_time is the span start time, named so to be compatible with - // the standard ClickHouse system log column names. - {std::make_shared(), "event_time"}, {std::make_shared(), "trace_id"}, {std::make_shared(), "span_id"}, {std::make_shared(), "parent_span_id"}, {std::make_shared(), "operation_name"}, + {std::make_shared(), "start_time"}, {std::make_shared(), "finish_time"}, + {std::make_shared(), "finish_date"}, {std::make_shared(std::make_shared()), "attribute.names"}, {std::make_shared(std::make_shared()), @@ -34,13 +31,13 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const { size_t i = 0; - columns[i++]->insert(DateLUT::instance().toDayNum(start_time)); - columns[i++]->insert(start_time); columns[i++]->insert(UInt128(Int128(trace_id))); columns[i++]->insert(span_id); columns[i++]->insert(parent_span_id); columns[i++]->insert(operation_name); + columns[i++]->insert(start_time); columns[i++]->insert(finish_time); + columns[i++]->insert(DateLUT::instance().toDayNum(finish_time)); columns[i++]->insert(attribute_names); columns[i++]->insert(attribute_values); } diff --git a/src/Interpreters/OpenTelemetryLog.h b/src/Interpreters/OpenTelemetryLog.h index 73ad5382c95..e2906a99a02 100644 --- a/src/Interpreters/OpenTelemetryLog.h +++ b/src/Interpreters/OpenTelemetryLog.h @@ -23,8 +23,9 @@ struct OpenTelemetrySpan UInt64 span_id; UInt64 parent_span_id; std::string operation_name; - time_t start_time{}; - time_t finish_time{}; + time_t start_time; + time_t finish_time; + UInt64 duration_ns; Array attribute_names; Array attribute_values; // I don't understand how Links work, namely, which direction should they diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 1da12d993ea..141a9ade57f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -138,20 +138,26 @@ static void logQuery(const String & query, const Context & context, bool interna } else { - const auto & current_query_id = context.getClientInfo().current_query_id; - const auto & initial_query_id = context.getClientInfo().initial_query_id; - const auto & current_user = context.getClientInfo().current_user; + const auto & client_info = context.getClientInfo(); + + const auto & current_query_id = client_info.current_query_id; + const auto & initial_query_id = client_info.initial_query_id; + const auto & current_user = client_info.current_user; LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}) {}", - context.getClientInfo().current_address.toString(), - (current_user != "default" ? ", user: " + context.getClientInfo().current_user : ""), + client_info.current_address.toString(), + (current_user != "default" ? ", user: " + current_user : ""), (!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()), joinLines(query)); - LOG_TRACE(&Poco::Logger::get("executeQuery"), - "OpenTelemetry trace id {:x}, span id {}, parent span id {}", - context.getClientInfo().opentelemetry_trace_id, context.getClientInfo().opentelemetry_span_id, - context.getClientInfo().opentelemetry_parent_span_id); + if (client_info.opentelemetry_trace_id) + { + LOG_TRACE(&Poco::Logger::get("executeQuery"), + "OpenTelemetry trace id {:x}, span id {}, parent span id {}", + client_info.opentelemetry_trace_id, + client_info.opentelemetry_span_id, + client_info.opentelemetry_parent_span_id); + } } } @@ -222,7 +228,9 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c if (auto query_log = context.getQueryLog()) query_log->add(elem); - if (auto opentelemetry_log = context.getOpenTelemetryLog()) + if (auto opentelemetry_log = context.getOpenTelemetryLog(); + context.getClientInfo().opentelemetry_trace_id + && opentelemetry_log) { OpenTelemetrySpanLogElement span; span.trace_id = context.getClientInfo().opentelemetry_trace_id; @@ -231,20 +239,21 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c span.operation_name = "query"; span.start_time = current_time; span.finish_time = current_time; + span.duration_ns = 0; // keep values synchonized to type enum in QueryLogElement::createBlock - span.attribute_names.push_back("status"); + span.attribute_names.push_back("clickhouse.query_status"); span.attribute_values.push_back("ExceptionBeforeStart"); - span.attribute_names.push_back("query"); + span.attribute_names.push_back("db.statement"); span.attribute_values.push_back(elem.query); - span.attribute_names.push_back("query_id"); + span.attribute_names.push_back("clickhouse.query_id"); span.attribute_values.push_back(elem.client_info.current_query_id); if (!context.getClientInfo().opentelemetry_tracestate.empty()) { - span.attribute_names.push_back("tracestate"); + span.attribute_names.push_back("clickhouse.tracestate"); span.attribute_values.push_back( context.getClientInfo().opentelemetry_tracestate); } @@ -285,7 +294,7 @@ static std::tuple executeQueryImpl( bool has_query_tail, ReadBuffer * istr) { - time_t current_time = time(nullptr); + const time_t current_time = time(nullptr); /// If we already executing query and it requires to execute internal query, than /// don't replace thread context with given (it can be temporary). Otherwise, attach context to thread. @@ -621,7 +630,9 @@ static std::tuple executeQueryImpl( query_log->add(elem); } - if (auto opentelemetry_log = context.getOpenTelemetryLog()) + if (auto opentelemetry_log = context.getOpenTelemetryLog(); + context.getClientInfo().opentelemetry_trace_id + && opentelemetry_log) { OpenTelemetrySpanLogElement span; span.trace_id = context.getClientInfo().opentelemetry_trace_id; @@ -629,20 +640,21 @@ static std::tuple executeQueryImpl( span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; span.start_time = elem.query_start_time; - span.finish_time = time(nullptr); // current time + span.finish_time = elem.event_time; + span.duration_ns = elapsed_seconds * 1000000000; // keep values synchonized to type enum in QueryLogElement::createBlock - span.attribute_names.push_back("status"); + span.attribute_names.push_back("clickhouse.query_status"); span.attribute_values.push_back("QueryFinish"); - span.attribute_names.push_back("query"); + span.attribute_names.push_back("db.statement"); span.attribute_values.push_back(elem.query); - span.attribute_names.push_back("query_id"); + span.attribute_names.push_back("clickhouse.query_id"); span.attribute_values.push_back(elem.client_info.current_query_id); if (!context.getClientInfo().opentelemetry_tracestate.empty()) { - span.attribute_names.push_back("tracestate"); + span.attribute_names.push_back("clickhouse.tracestate"); span.attribute_values.push_back( context.getClientInfo().opentelemetry_tracestate); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index e83bbb02cad..49f737c9550 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -850,12 +850,6 @@ void TCPHandler::receiveQuery() if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) client_info.read(*in, client_revision); - // It is convenient to generate default OpenTelemetry trace id and default - // query id together. ClientInfo might contain upstream trace id, so we - // decide whether to use the default ids after we have received the ClientInfo. - // We also set up the parent span id while we're at it. - query_context->setCurrentQueryId(state.query_id); - /// For better support of old clients, that does not send ClientInfo. if (client_info.query_kind == ClientInfo::QueryKind::NO_QUERY) { @@ -884,8 +878,11 @@ void TCPHandler::receiveQuery() /// Per query settings are also passed via TCP. /// We need to check them before applying due to they can violate the settings constraints. - auto settings_format = (client_revision >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsWriteFormat::STRINGS_WITH_FLAGS - : SettingsWriteFormat::BINARY; + auto settings_format = + (client_revision >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) + ? SettingsWriteFormat::STRINGS_WITH_FLAGS + : SettingsWriteFormat::BINARY; + Settings passed_settings; passed_settings.read(*in, settings_format); auto settings_changes = passed_settings.changes(); @@ -900,12 +897,23 @@ void TCPHandler::receiveQuery() query_context->clampToSettingsConstraints(settings_changes); } query_context->applySettingsChanges(settings_changes); - const Settings & settings = query_context->getSettingsRef(); + + // Use the received query id, or generate a random default. It is convenient + // to also generate the default OpenTelemetry trace id at the same time, and + // and and set the trace parent. + // Why is this done here and not earlier: + // 1) ClientInfo might contain upstream trace id, so we decide whether to use + // the default ids after we have received the ClientInfo. + // 2) There is the opentelemetry_start_trace_probability setting that + // controls when we start a new trace. It can be changed via Native protocol, + // so we have to apply the changes first. + query_context->setCurrentQueryId(state.query_id); /// Sync timeouts on client and server during current query to avoid dangling queries on server /// NOTE: We use settings.send_timeout for the receive timeout and vice versa (change arguments ordering in TimeoutSetter), /// because settings.send_timeout is client-side setting which has opposite meaning on the server side. /// NOTE: these settings are applied only for current connection (not for distributed tables' connections) + const Settings & settings = query_context->getSettingsRef(); state.timeout_setter = std::make_unique(socket(), settings.receive_timeout, settings.send_timeout); readVarUInt(stage, *in); diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference index 5993b628ad4..176626befa7 100644 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -1,6 +1,10 @@ +===http=== 1 4 1 +===native=== 1 2 1 +===sampled=== +1 1 diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index df5c194b2be..1cb65113179 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -24,29 +24,60 @@ select count(distinct value) where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) and operation_name = 'query' - and name = 'tracestate' + and name = 'clickhouse.tracestate' and length(value) > 0 ; " } # Generate some random trace id so that the prevous runs of the test do not interfere. +echo "===http===" trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") # Check that the HTTP traceparent is read, and then passed through `remote` table function. # We expect 4 queries, because there are two DESC TABLE queries for the shard. # This is bug-ish, see https://github.com/ClickHouse/ClickHouse/issues/14228 -${CLICKHOUSE_CURL} --header "traceparent: 00-$trace_id-0000000000000010-01" --header "tracestate: some custom state" "http://localhost:8123/" --get --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" +${CLICKHOUSE_CURL} \ + --header "traceparent: 00-$trace_id-0000000000000010-01" \ + --header "tracestate: some custom state" "http://localhost:8123/" \ + --get \ + --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" check_log # With another trace id, check that clickhouse-client accepts traceparent, and # that it is passed through URL table function. We expect two query spans, one # for the initial query, and one for the HTTP query. +echo "===native===" trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") -${CLICKHOUSE_CLIENT} --opentelemetry-traceparent "00-$trace_id-0000000000000020-02" --opentelemetry-tracestate "another custom state" --query " - select * from url('http://127.0.0.2:8123/?query=select%201', CSV, 'a int') -" +${CLICKHOUSE_CLIENT} \ + --opentelemetry-traceparent "00-$trace_id-0000000000000020-02" \ + --opentelemetry-tracestate "another custom state" \ + --query "select * from url('http://127.0.0.2:8123/?query=select%201', CSV, 'a int')" check_log + +# Test sampled tracing. The traces should be started with the specified probability, +# only for initial queries. +echo "===sampled===" +query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") + +for _ in {1..200} +do + ${CLICKHOUSE_CLIENT} \ + --opentelemetry_start_trace_probability=0.1 \ + --query_id "$query_id" \ + --query "select 1 from remote('127.0.0.2', system, one) format Null" +done + +${CLICKHOUSE_CLIENT} -q " + with count(*) as c + -- expect 200 * 0.1 = 20 sampled events on average + select c > 10, c < 30 + from system.opentelemetry_log + array join attribute.names as name, attribute.values as value + where name = 'clickhouse.query_id' + and value = '$query_id' + ; +" From f0c5459807baa594c5138a26a0fdd6b3714f6c9c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 9 Sep 2020 14:05:49 +0300 Subject: [PATCH 055/432] tests --- src/Core/BaseSettings.h | 22 +++++++++++++------ .../queries/0_stateless/00396_uuid.reference | 1 + tests/queries/0_stateless/00396_uuid.sql | 6 +++++ .../01455_opentelemetry_distributed.reference | 2 +- .../01455_opentelemetry_distributed.sh | 2 +- 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/Core/BaseSettings.h b/src/Core/BaseSettings.h index 7de87b345c1..b193fdd4c93 100644 --- a/src/Core/BaseSettings.h +++ b/src/Core/BaseSettings.h @@ -390,13 +390,21 @@ String BaseSettings::valueToStringUtil(const std::string_view & name, c template Field BaseSettings::stringToValueUtil(const std::string_view & name, const String & str) { - const auto & accessor = Traits::Accessor::instance(); - if (size_t index = accessor.find(name); index != static_cast(-1)) - return accessor.stringToValueUtil(index, str); - if constexpr (Traits::allow_custom_settings) - return Field::restoreFromDump(str); - else - BaseSettingsHelpers::throwSettingNotFound(name); + try + { + const auto & accessor = Traits::Accessor::instance(); + if (size_t index = accessor.find(name); index != static_cast(-1)) + return accessor.stringToValueUtil(index, str); + if constexpr (Traits::allow_custom_settings) + return Field::restoreFromDump(str); + else + BaseSettingsHelpers::throwSettingNotFound(name); + } + catch (Exception & e) + { + e.addMessage("while parsing value '{}' for setting '{}'", str, name); + throw; + } } template diff --git a/tests/queries/0_stateless/00396_uuid.reference b/tests/queries/0_stateless/00396_uuid.reference index fe92b3684a6..d70322ec4c1 100644 --- a/tests/queries/0_stateless/00396_uuid.reference +++ b/tests/queries/0_stateless/00396_uuid.reference @@ -5,3 +5,4 @@ 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 01234567-89ab-cdef-0123-456789abcdef 3f1ed72e-f7fe-4459-9cbe-95fe9298f845 +1 diff --git a/tests/queries/0_stateless/00396_uuid.sql b/tests/queries/0_stateless/00396_uuid.sql index d671ce844e2..863ff13f5c2 100644 --- a/tests/queries/0_stateless/00396_uuid.sql +++ b/tests/queries/0_stateless/00396_uuid.sql @@ -5,3 +5,9 @@ SELECT hex(UUIDStringToNum(materialize('01234567-89ab-cdef-0123-456789abcdef'))) SELECT '01234567-89ab-cdef-0123-456789abcdef' AS str, UUIDNumToString(UUIDStringToNum(str)), UUIDNumToString(UUIDStringToNum(toFixedString(str, 36))); SELECT materialize('01234567-89ab-cdef-0123-456789abcdef') AS str, UUIDNumToString(UUIDStringToNum(str)), UUIDNumToString(UUIDStringToNum(toFixedString(str, 36))); SELECT toString(toUUID('3f1ed72e-f7fe-4459-9cbe-95fe9298f845')); + +-- conversion back and forth to big-endian hex string +with generateUUIDv4() as uuid, + identity(lower(hex(reverse(reinterpretAsString(uuid))))) as str, + reinterpretAsUUID(reverse(unhex(str))) uuid2 +select uuid = uuid2; diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference index 176626befa7..e0eadd91a97 100644 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -7,4 +7,4 @@ 2 1 ===sampled=== -1 1 +OK diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index 1cb65113179..d3114ab66ff 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -74,7 +74,7 @@ done ${CLICKHOUSE_CLIENT} -q " with count(*) as c -- expect 200 * 0.1 = 20 sampled events on average - select c > 10, c < 30 + select if(c > 10 and c < 30, 'OK', 'fail: ' || toString(c)) from system.opentelemetry_log array join attribute.names as name, attribute.values as value where name = 'clickhouse.query_id' From 8212d7fa2838c3f8c5251cc9070ecda6f50b29cb Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 17 Sep 2020 15:14:30 +0300 Subject: [PATCH 056/432] fix the uuid test --- tests/queries/0_stateless/00396_uuid.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00396_uuid.sql b/tests/queries/0_stateless/00396_uuid.sql index 863ff13f5c2..9d8b48bddb0 100644 --- a/tests/queries/0_stateless/00396_uuid.sql +++ b/tests/queries/0_stateless/00396_uuid.sql @@ -9,5 +9,5 @@ SELECT toString(toUUID('3f1ed72e-f7fe-4459-9cbe-95fe9298f845')); -- conversion back and forth to big-endian hex string with generateUUIDv4() as uuid, identity(lower(hex(reverse(reinterpretAsString(uuid))))) as str, - reinterpretAsUUID(reverse(unhex(str))) uuid2 + reinterpretAsUUID(reverse(unhex(str))) as uuid2 select uuid = uuid2; From 7b8ad02a124cad4cb90280cac856b60d54551fda Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 17 Sep 2020 15:14:49 +0300 Subject: [PATCH 057/432] cleanup --- src/Server/HTTPHandler.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index fb630010198..2f1e978d48f 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -326,8 +326,6 @@ void HTTPHandler::processQuery( } context.getClientInfo().opentelemetry_tracestate = request.get("tracestate", ""); - - } /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). From a374541214c7a4bece9bf77df132eda23e4c6e85 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 17 Sep 2020 15:15:05 +0300 Subject: [PATCH 058/432] straighten the protocol version --- base/daemon/BaseDaemon.cpp | 2 +- programs/copier/ClusterCopierApp.cpp | 2 +- programs/server/Server.cpp | 2 +- src/Client/Connection.cpp | 4 +- src/Common/ClickHouseRevision.cpp | 2 +- src/Common/ClickHouseRevision.h | 2 +- src/Common/StatusFile.cpp | 2 +- src/Common/config_version.h.in | 13 +---- src/Core/Defines.h | 4 +- src/DataStreams/TemporaryFileStream.h | 3 +- src/Interpreters/Aggregator.cpp | 2 +- src/Interpreters/ClientInfo.cpp | 9 ++-- src/Interpreters/ClientInfo.h | 2 +- src/Interpreters/CrashLog.cpp | 2 +- src/Interpreters/QueryLog.cpp | 4 +- src/Interpreters/QueryThreadLog.cpp | 2 +- src/Interpreters/TextLog.cpp | 2 +- src/Interpreters/TraceLog.cpp | 2 +- .../Transforms/AggregatingTransform.cpp | 3 +- src/Server/TCPHandler.cpp | 53 +++++++++---------- src/Server/TCPHandler.h | 2 +- src/Storages/Distributed/DirectoryMonitor.cpp | 7 ++- .../DistributedBlockOutputStream.cpp | 7 ++- .../System/StorageSystemProcesses.cpp | 2 +- 24 files changed, 58 insertions(+), 77 deletions(-) diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index 78801e71a6f..22455d09cf2 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -781,7 +781,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() void BaseDaemon::logRevision() const { Poco::Logger::root().information("Starting " + std::string{VERSION_FULL} - + " with revision " + std::to_string(ClickHouseRevision::get()) + + " with revision " + std::to_string(ClickHouseRevision::getVersionRevision()) + ", " + build_id_info + ", PID " + std::to_string(getpid())); } diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index ec64e118f45..08a7e50a9d7 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -105,7 +105,7 @@ void ClusterCopierApp::mainImpl() ThreadStatus thread_status; auto * log = &logger(); - LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::get()); + LOG_INFO(log, "Starting clickhouse-copier (id {}, host_id {}, path {}, revision {})", process_id, host_id, process_path, ClickHouseRevision::getVersionRevision()); SharedContextHolder shared_context = Context::createShared(); auto context = std::make_unique(Context::createGlobal(shared_context.get())); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 56778b8dd69..3f3f8ccef3a 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -256,7 +256,7 @@ int Server::main(const std::vector & /*args*/) #endif #endif - CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::get()); + CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision()); CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger()); if (ThreadFuzzer::instance().isEffective()) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index ed27a878b5a..0adfd6a0103 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -162,14 +162,12 @@ void Connection::sendHello() || has_control_character(password)) throw Exception("Parameters 'default_database', 'user' and 'password' must not contain ASCII control characters", ErrorCodes::BAD_ARGUMENTS); - auto client_revision = ClickHouseRevision::get(); - writeVarUInt(Protocol::Client::Hello, *out); writeStringBinary((DBMS_NAME " ") + client_name, *out); writeVarUInt(DBMS_VERSION_MAJOR, *out); writeVarUInt(DBMS_VERSION_MINOR, *out); // NOTE For backward compatibility of the protocol, client cannot send its version_patch. - writeVarUInt(client_revision, *out); + writeVarUInt(DBMS_TCP_PROTOCOL_VERSION, *out); writeStringBinary(default_database, *out); writeStringBinary(user, *out); writeStringBinary(password, *out); diff --git a/src/Common/ClickHouseRevision.cpp b/src/Common/ClickHouseRevision.cpp index 0b81026adca..2c52ebb064a 100644 --- a/src/Common/ClickHouseRevision.cpp +++ b/src/Common/ClickHouseRevision.cpp @@ -6,6 +6,6 @@ namespace ClickHouseRevision { - unsigned get() { return VERSION_REVISION; } + unsigned getVersionRevision() { return VERSION_REVISION; } unsigned getVersionInteger() { return VERSION_INTEGER; } } diff --git a/src/Common/ClickHouseRevision.h b/src/Common/ClickHouseRevision.h index 1d097a5bf89..86d1e3db334 100644 --- a/src/Common/ClickHouseRevision.h +++ b/src/Common/ClickHouseRevision.h @@ -2,6 +2,6 @@ namespace ClickHouseRevision { - unsigned get(); + unsigned getVersionRevision(); unsigned getVersionInteger(); } diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index 7c6bbf814a0..b21454c9ed8 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -37,7 +37,7 @@ StatusFile::FillFunction StatusFile::write_full_info = [](WriteBuffer & out) { out << "PID: " << getpid() << "\n" << "Started at: " << LocalDateTime(time(nullptr)) << "\n" - << "Revision: " << ClickHouseRevision::get() << "\n"; + << "Revision: " << ClickHouseRevision::getVersionRevision() << "\n"; }; diff --git a/src/Common/config_version.h.in b/src/Common/config_version.h.in index c3c0c6df87b..880824f8ad0 100644 --- a/src/Common/config_version.h.in +++ b/src/Common/config_version.h.in @@ -2,18 +2,7 @@ // .h autogenerated by cmake! -#cmakedefine01 USE_DBMS_TCP_PROTOCOL_VERSION - -#if USE_DBMS_TCP_PROTOCOL_VERSION - #include "Core/Defines.h" - #ifndef VERSION_REVISION - #define VERSION_REVISION DBMS_TCP_PROTOCOL_VERSION - #endif -#else - #cmakedefine VERSION_REVISION @VERSION_REVISION@ -#endif - - +#cmakedefine VERSION_REVISION @VERSION_REVISION@ #cmakedefine VERSION_NAME "@VERSION_NAME@" #define DBMS_NAME VERSION_NAME #cmakedefine VERSION_MAJOR @VERSION_MAJOR@ diff --git a/src/Core/Defines.h b/src/Core/Defines.h index d19513d1434..0b8e0b8ae27 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -68,10 +68,10 @@ #define DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS 54429 /// Minimum revision supporting OpenTelemetry -#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54227 +#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54441 /// Version of ClickHouse TCP protocol. Set to git tag with latest protocol change. -#define DBMS_TCP_PROTOCOL_VERSION 54227 +#define DBMS_TCP_PROTOCOL_VERSION 54441 /// The boundary on which the blocks for asynchronous file operations should be aligned. #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 diff --git a/src/DataStreams/TemporaryFileStream.h b/src/DataStreams/TemporaryFileStream.h index 6871800a540..b481cef1bb2 100644 --- a/src/DataStreams/TemporaryFileStream.h +++ b/src/DataStreams/TemporaryFileStream.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -23,7 +22,7 @@ struct TemporaryFileStream TemporaryFileStream(const std::string & path) : file_in(path) , compressed_in(file_in) - , block_in(std::make_shared(compressed_in, ClickHouseRevision::get())) + , block_in(std::make_shared(compressed_in, DBMS_TCP_PROTOCOL_VERSION)) {} TemporaryFileStream(const std::string & path, const Block & header_) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 86a33dccb53..466370a22a2 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -844,7 +844,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co const std::string & path = file->path(); WriteBufferFromFile file_buf(path); CompressedWriteBuffer compressed_buf(file_buf); - NativeBlockOutputStream block_out(compressed_buf, ClickHouseRevision::get(), getHeader(false)); + NativeBlockOutputStream block_out(compressed_buf, DBMS_TCP_PROTOCOL_VERSION, getHeader(false)); LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}.", path); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index da9f14ec879..25cf9b8294e 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #if !defined(ARCADIA_BUILD) @@ -44,7 +43,7 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) writeBinary(client_name, out); writeVarUInt(client_version_major, out); writeVarUInt(client_version_minor, out); - writeVarUInt(client_revision, out); + writeVarUInt(client_tcp_protocol_version, out); } else if (interface == Interface::HTTP) { @@ -111,7 +110,7 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) readBinary(client_name, in); readVarUInt(client_version_major, in); readVarUInt(client_version_minor, in); - readVarUInt(client_revision, in); + readVarUInt(client_tcp_protocol_version, in); } else if (interface == Interface::HTTP) { @@ -130,7 +129,7 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) if (client_protocol_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) readVarUInt(client_version_patch, in); else - client_version_patch = client_revision; + client_version_patch = client_tcp_protocol_version; } // TODO what does it even mean to read this structure over HTTP? I thought @@ -244,7 +243,7 @@ void ClientInfo::fillOSUserHostNameAndVersionInfo() client_version_major = DBMS_VERSION_MAJOR; client_version_minor = DBMS_VERSION_MINOR; client_version_patch = DBMS_VERSION_PATCH; - client_revision = ClickHouseRevision::get(); + client_tcp_protocol_version = DBMS_TCP_PROTOCOL_VERSION; } diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index e41956eaee0..5b5c3b400eb 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -82,7 +82,7 @@ public: UInt64 client_version_major = 0; UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; - unsigned client_revision = 0; + unsigned client_tcp_protocol_version = 0; /// For http HTTPMethod http_method = HTTPMethod::UNKNOWN; diff --git a/src/Interpreters/CrashLog.cpp b/src/Interpreters/CrashLog.cpp index 12fd57c33dc..9d84d5a18e9 100644 --- a/src/Interpreters/CrashLog.cpp +++ b/src/Interpreters/CrashLog.cpp @@ -49,7 +49,7 @@ void CrashLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(trace); columns[i++]->insert(trace_full); columns[i++]->insert(VERSION_FULL); - columns[i++]->insert(ClickHouseRevision::get()); + columns[i++]->insert(ClickHouseRevision::getVersionRevision()); String build_id_hex; #if defined(__ELF__) && !defined(__FreeBSD__) diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 62dbc114633..75e0fae615a 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -118,7 +118,7 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const appendClientInfo(client_info, columns, i); - columns[i++]->insert(ClickHouseRevision::get()); + columns[i++]->insert(ClickHouseRevision::getVersionRevision()); { Array threads_array; @@ -172,7 +172,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo columns[i++]->insert(client_info.os_user); columns[i++]->insert(client_info.client_hostname); columns[i++]->insert(client_info.client_name); - columns[i++]->insert(client_info.client_revision); + columns[i++]->insert(client_info.client_tcp_protocol_version); columns[i++]->insert(client_info.client_version_major); columns[i++]->insert(client_info.client_version_minor); columns[i++]->insert(client_info.client_version_patch); diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp index 22ad60d96b4..e5a8cf7c5cf 100644 --- a/src/Interpreters/QueryThreadLog.cpp +++ b/src/Interpreters/QueryThreadLog.cpp @@ -93,7 +93,7 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const QueryLogElement::appendClientInfo(client_info, columns, i); - columns[i++]->insert(ClickHouseRevision::get()); + columns[i++]->insert(ClickHouseRevision::getVersionRevision()); if (profile_counters) { diff --git a/src/Interpreters/TextLog.cpp b/src/Interpreters/TextLog.cpp index d166b24ef4f..243bf6d299a 100644 --- a/src/Interpreters/TextLog.cpp +++ b/src/Interpreters/TextLog.cpp @@ -62,7 +62,7 @@ void TextLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(logger_name); columns[i++]->insert(message); - columns[i++]->insert(ClickHouseRevision::get()); + columns[i++]->insert(ClickHouseRevision::getVersionRevision()); columns[i++]->insert(source_file); columns[i++]->insert(source_line); diff --git a/src/Interpreters/TraceLog.cpp b/src/Interpreters/TraceLog.cpp index c4fa7307b1a..f7e82032f49 100644 --- a/src/Interpreters/TraceLog.cpp +++ b/src/Interpreters/TraceLog.cpp @@ -43,7 +43,7 @@ void TraceLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(DateLUT::instance().toDayNum(event_time)); columns[i++]->insert(event_time); columns[i++]->insert(timestamp_ns); - columns[i++]->insert(ClickHouseRevision::get()); + columns[i++]->insert(ClickHouseRevision::getVersionRevision()); columns[i++]->insert(static_cast(trace_type)); columns[i++]->insert(thread_id); columns[i++]->insertData(query_id.data(), query_id.size()); diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 42caf4b3446..0a97cc3d4cb 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -1,6 +1,5 @@ #include -#include #include #include #include @@ -56,7 +55,7 @@ namespace public: SourceFromNativeStream(const Block & header, const std::string & path) : ISource(header), file_in(path), compressed_in(file_in), - block_in(std::make_shared(compressed_in, ClickHouseRevision::get())) + block_in(std::make_shared(compressed_in, DBMS_TCP_PROTOCOL_VERSION)) { block_in->readPrefix(); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 2c7ca3ac6a4..ca8774c23ca 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1,7 +1,6 @@ #include #include #include -#include #include #include #include @@ -183,7 +182,7 @@ void TCPHandler::runImpl() /// Should we send internal logs to client? const auto client_logs_level = query_context->getSettingsRef().send_logs_level; - if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_LOGS + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_LOGS && client_logs_level != LogsLevel::none) { state.logs_queue = std::make_shared(); @@ -218,7 +217,7 @@ void TCPHandler::runImpl() state.need_receive_data_for_input = true; /// Send ColumnsDescription for input storage. - if (client_revision >= DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA && query_context->getSettingsRef().input_format_defaults_for_omitted_fields) { sendTableColumns(metadata_snapshot->getColumns()); @@ -248,7 +247,7 @@ void TCPHandler::runImpl() customizeContext(*query_context); - bool may_have_embedded_data = client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_SUPPORT_EMBEDDED_DATA; + bool may_have_embedded_data = client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_CLIENT_SUPPORT_EMBEDDED_DATA; /// Processing Query state.io = executeQuery(state.query, *query_context, false, state.stage, may_have_embedded_data); @@ -482,7 +481,7 @@ void TCPHandler::processInsertQuery(const Settings & connection_settings) state.io.out->writePrefix(); /// Send ColumnsDescription for insertion table - if (client_revision >= DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA) + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_COLUMN_DEFAULTS_METADATA) { const auto & table_id = query_context->getInsertionTable(); if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields) @@ -638,7 +637,7 @@ void TCPHandler::processOrdinaryQueryWithProcessors() void TCPHandler::processTablesStatusRequest() { TablesStatusRequest request; - request.read(*in, client_revision); + request.read(*in, client_tcp_protocol_version); TablesStatusResponse response; for (const QualifiedTableName & table_name: request.tables) @@ -661,13 +660,13 @@ void TCPHandler::processTablesStatusRequest() } writeVarUInt(Protocol::Server::TablesStatusResponse, *out); - response.write(*out, client_revision); + response.write(*out, client_tcp_protocol_version); } void TCPHandler::receiveUnexpectedTablesStatusRequest() { TablesStatusRequest skip_request; - skip_request.read(*in, client_revision); + skip_request.read(*in, client_tcp_protocol_version); throw NetException("Unexpected packet TablesStatusRequest received from client", ErrorCodes::UNEXPECTED_PACKET_FROM_CLIENT); } @@ -742,7 +741,7 @@ void TCPHandler::receiveHello() readVarUInt(client_version_major, *in); readVarUInt(client_version_minor, *in); // NOTE For backward compatibility of the protocol, client cannot send its version_patch. - readVarUInt(client_revision, *in); + readVarUInt(client_tcp_protocol_version, *in); readStringBinary(default_database, *in); readStringBinary(user, *in); readStringBinary(password, *in); @@ -750,7 +749,7 @@ void TCPHandler::receiveHello() LOG_DEBUG(log, "Connected {} version {}.{}.{}, revision: {}{}{}.", client_name, client_version_major, client_version_minor, client_version_patch, - client_revision, + client_tcp_protocol_version, (!default_database.empty() ? ", database: " + default_database : ""), (!user.empty() ? ", user: " + user : "")); @@ -781,12 +780,12 @@ void TCPHandler::sendHello() writeStringBinary(DBMS_NAME, *out); writeVarUInt(DBMS_VERSION_MAJOR, *out); writeVarUInt(DBMS_VERSION_MINOR, *out); - writeVarUInt(ClickHouseRevision::get(), *out); - if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) + writeVarUInt(DBMS_TCP_PROTOCOL_VERSION, *out); + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) writeStringBinary(DateLUT::instance().getTimeZone(), *out); - if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME) + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME) writeStringBinary(server_display_name, *out); - if (client_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) writeVarUInt(DBMS_VERSION_PATCH, *out); out->next(); } @@ -847,8 +846,8 @@ void TCPHandler::receiveQuery() /// Client info ClientInfo & client_info = query_context->getClientInfo(); - if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) - client_info.read(*in, client_revision); + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) + client_info.read(*in, client_tcp_protocol_version); /// For better support of old clients, that does not send ClientInfo. if (client_info.query_kind == ClientInfo::QueryKind::NO_QUERY) @@ -858,7 +857,7 @@ void TCPHandler::receiveQuery() client_info.client_version_major = client_version_major; client_info.client_version_minor = client_version_minor; client_info.client_version_patch = client_version_patch; - client_info.client_revision = client_revision; + client_info.client_tcp_protocol_version = client_tcp_protocol_version; } /// Set fields, that are known apriori. @@ -879,7 +878,7 @@ void TCPHandler::receiveQuery() /// Per query settings are also passed via TCP. /// We need to check them before applying due to they can violate the settings constraints. auto settings_format = - (client_revision >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) + (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsWriteFormat::STRINGS_WITH_FLAGS : SettingsWriteFormat::BINARY; @@ -900,7 +899,7 @@ void TCPHandler::receiveQuery() // Use the received query id, or generate a random default. It is convenient // to also generate the default OpenTelemetry trace id at the same time, and - // and and set the trace parent. + // set the trace parent. // Why is this done here and not earlier: // 1) ClientInfo might contain upstream trace id, so we decide whether to use // the default ids after we have received the ClientInfo. @@ -933,11 +932,11 @@ void TCPHandler::receiveUnexpectedQuery() readStringBinary(skip_string, *in); ClientInfo skip_client_info; - if (client_revision >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) - skip_client_info.read(*in, client_revision); + if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_CLIENT_INFO) + skip_client_info.read(*in, client_tcp_protocol_version); Settings skip_settings; - auto settings_format = (client_revision >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsWriteFormat::STRINGS_WITH_FLAGS + auto settings_format = (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsWriteFormat::STRINGS_WITH_FLAGS : SettingsWriteFormat::BINARY; skip_settings.read(*in, settings_format); @@ -1011,7 +1010,7 @@ void TCPHandler::receiveUnexpectedData() auto skip_block_in = std::make_shared( *maybe_compressed_in, last_block_in.header, - client_revision); + client_tcp_protocol_version); skip_block_in->read(); throw NetException("Unexpected packet Data received from client", ErrorCodes::UNEXPECTED_PACKET_FROM_CLIENT); @@ -1038,7 +1037,7 @@ void TCPHandler::initBlockInput() state.block_in = std::make_shared( *state.maybe_compressed_in, header, - client_revision); + client_tcp_protocol_version); } } @@ -1069,7 +1068,7 @@ void TCPHandler::initBlockOutput(const Block & block) state.block_out = std::make_shared( *state.maybe_compressed_out, - client_revision, + client_tcp_protocol_version, block.cloneEmpty(), !connection_context.getSettingsRef().low_cardinality_allow_in_native_format); } @@ -1082,7 +1081,7 @@ void TCPHandler::initLogsBlockOutput(const Block & block) /// Use uncompressed stream since log blocks usually contain only one row state.logs_block_out = std::make_shared( *out, - client_revision, + client_tcp_protocol_version, block.cloneEmpty(), !connection_context.getSettingsRef().low_cardinality_allow_in_native_format); } @@ -1186,7 +1185,7 @@ void TCPHandler::sendProgress() { writeVarUInt(Protocol::Server::Progress, *out); auto increment = state.progress.fetchAndResetPiecewiseAtomically(); - increment.write(*out, client_revision); + increment.write(*out, client_tcp_protocol_version); out->next(); } diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 3fec89264be..3c7747481f0 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -124,7 +124,7 @@ private: UInt64 client_version_major = 0; UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; - UInt64 client_revision = 0; + UInt64 client_tcp_protocol_version = 0; Context connection_context; std::optional query_context; diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index b67d3283ac9..61708dce325 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -357,7 +356,7 @@ void StorageDistributedDirectoryMonitor::readHeader( UInt64 initiator_revision; readVarUInt(initiator_revision, header_buf); - if (ClickHouseRevision::get() < initiator_revision) + if (DBMS_TCP_PROTOCOL_VERSION < initiator_revision) { LOG_WARNING(log, "ClickHouse shard version is older than ClickHouse initiator version. It may lack support for new features."); } @@ -576,7 +575,7 @@ public: explicit DirectoryMonitorBlockInputStream(const String & file_name) : in(file_name) , decompressing_in(in) - , block_in(decompressing_in, ClickHouseRevision::get()) + , block_in(decompressing_in, DBMS_TCP_PROTOCOL_VERSION) , log{&Poco::Logger::get("DirectoryMonitorBlockInputStream")} { Settings insert_settings; @@ -681,7 +680,7 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map readHeader(in, insert_settings, insert_query, client_info, log); CompressedReadBuffer decompressing_in(in); - NativeBlockInputStream block_in(decompressing_in, ClickHouseRevision::get()); + NativeBlockInputStream block_in(decompressing_in, DBMS_TCP_PROTOCOL_VERSION); block_in.readPrefix(); while (Block block = block_in.read()) diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index 172a398258f..f08cdf76cbf 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -583,16 +582,16 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std:: { WriteBufferFromFile out{first_file_tmp_path}; CompressedWriteBuffer compress{out}; - NativeBlockOutputStream stream{compress, ClickHouseRevision::get(), block.cloneEmpty()}; + NativeBlockOutputStream stream{compress, DBMS_TCP_PROTOCOL_VERSION, block.cloneEmpty()}; /// Prepare the header. /// We wrap the header into a string for compatibility with older versions: /// a shard will able to read the header partly and ignore other parts based on its version. WriteBufferFromOwnString header_buf; - writeVarUInt(ClickHouseRevision::get(), header_buf); + writeVarUInt(DBMS_TCP_PROTOCOL_VERSION, header_buf); writeStringBinary(query_string, header_buf); context.getSettingsRef().write(header_buf); - context.getClientInfo().write(header_buf, ClickHouseRevision::get()); + context.getClientInfo().write(header_buf, DBMS_TCP_PROTOCOL_VERSION); /// Add new fields here, for example: /// writeVarUInt(my_new_data, header_buf); diff --git a/src/Storages/System/StorageSystemProcesses.cpp b/src/Storages/System/StorageSystemProcesses.cpp index c65a6b78e41..d899a1708bf 100644 --- a/src/Storages/System/StorageSystemProcesses.cpp +++ b/src/Storages/System/StorageSystemProcesses.cpp @@ -91,7 +91,7 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, const Contex res_columns[i++]->insert(process.client_info.os_user); res_columns[i++]->insert(process.client_info.client_hostname); res_columns[i++]->insert(process.client_info.client_name); - res_columns[i++]->insert(process.client_info.client_revision); + res_columns[i++]->insert(process.client_info.client_tcp_protocol_version); res_columns[i++]->insert(process.client_info.client_version_major); res_columns[i++]->insert(process.client_info.client_version_minor); res_columns[i++]->insert(process.client_info.client_version_patch); From ab19bb25fd8c286713580649ad1c183d493ea5dc Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 22 Sep 2020 14:31:33 +0300 Subject: [PATCH 059/432] disable percpu arenas --- contrib/jemalloc-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index dd7f9f3e2bb..563d41301b1 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -81,7 +81,7 @@ if (NOT EXTERNAL_JEMALLOC_LIBRARY_FOUND OR NOT EXTERNAL_JEMALLOC_LIBRARY_WORKS) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") else() set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") endif() From 833c07f1f76b71527262362545fef3973fad686c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 23 Sep 2020 11:31:18 +0300 Subject: [PATCH 060/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 2fd5641b9fd..7851cf8e81d 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -7,6 +7,10 @@ trap 'kill $(jobs -pr) ||:' EXIT stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +# https://github.com/jemalloc/jemalloc/wiki/Getting-Started +export MALLOC_CONF="percpu_arena:disabled" +echo "$MALLOC_CONF" > /etc/malloc.conf ||: + function wait_for_server # port, pid { for _ in {1..60} From bb51aade56d478588a8db60315c30870208aeec3 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 23 Sep 2020 12:02:22 +0300 Subject: [PATCH 061/432] Update docker/test/performance-comparison/compare.sh Co-authored-by: Azat Khuzhin --- docker/test/performance-comparison/compare.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 7851cf8e81d..35bb5890488 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -8,8 +8,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # https://github.com/jemalloc/jemalloc/wiki/Getting-Started -export MALLOC_CONF="percpu_arena:disabled" -echo "$MALLOC_CONF" > /etc/malloc.conf ||: +ln -s "percpu_arena:disabled" > /etc/malloc.conf function wait_for_server # port, pid { From d96c89972cc9e3ff7e8baca7a3899c1aeffed891 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 23 Sep 2020 12:04:13 +0300 Subject: [PATCH 062/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 35bb5890488..ed89e6f875c 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -8,7 +8,8 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # https://github.com/jemalloc/jemalloc/wiki/Getting-Started -ln -s "percpu_arena:disabled" > /etc/malloc.conf +export MALLOC_CONF="percpu_arena:disabled" +ln -s "percpu_arena:disabled" /etc/malloc.conf function wait_for_server # port, pid { From 26abe8cb30819eea1c1f3383cc99620ab2e9da9a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 24 Sep 2020 12:24:30 +0300 Subject: [PATCH 063/432] Update compare.sh --- docker/test/performance-comparison/compare.sh | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index ed89e6f875c..7708d11d81f 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -8,8 +8,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # https://github.com/jemalloc/jemalloc/wiki/Getting-Started -export MALLOC_CONF="percpu_arena:disabled" -ln -s "percpu_arena:disabled" /etc/malloc.conf +export MALLOC_CONF="confirm_conf:true" function wait_for_server # port, pid { @@ -83,18 +82,16 @@ function restart set -m # Spawn servers in their own process groups - numactl --cpunodebind=0 --localalloc \ - left/clickhouse-server --config-file=left/config/config.xml \ - -- --path left/db --user_files_path left/db/user_files \ - &>> left-server-log.log & + left/clickhouse-server --config-file=left/config/config.xml \ + -- --path left/db --user_files_path left/db/user_files \ + &>> left-server-log.log & left_pid=$! kill -0 $left_pid disown $left_pid - numactl --cpunodebind=0 --localalloc \ - right/clickhouse-server --config-file=right/config/config.xml \ - -- --path right/db --user_files_path right/db/user_files \ - &>> right-server-log.log & + right/clickhouse-server --config-file=right/config/config.xml \ + -- --path right/db --user_files_path right/db/user_files \ + &>> right-server-log.log & right_pid=$! kill -0 $right_pid disown $right_pid From 425150e78308941c5e7a7c097ac66c84721008df Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 25 Sep 2020 13:19:37 +0300 Subject: [PATCH 064/432] bind to different nodes --- docker/test/performance-comparison/compare.sh | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 7708d11d81f..98040b037f6 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -7,9 +7,6 @@ trap 'kill $(jobs -pr) ||:' EXIT stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -# https://github.com/jemalloc/jemalloc/wiki/Getting-Started -export MALLOC_CONF="confirm_conf:true" - function wait_for_server # port, pid { for _ in {1..60} @@ -80,24 +77,31 @@ function restart while killall clickhouse-server; do echo . ; sleep 1 ; done echo all killed - set -m # Spawn servers in their own process groups + # https://github.com/jemalloc/jemalloc/wiki/Getting-Started + export MALLOC_CONF="percpu_arena:disabled,confirm_conf:true" - left/clickhouse-server --config-file=left/config/config.xml \ - -- --path left/db --user_files_path left/db/user_files \ - &>> left-server-log.log & + set -m # Spawn servers in their own process groups + + numactl --cpunodebind=1 --localalloc \ + left/clickhouse-server --config-file=left/config/config.xml \ + -- --path left/db --user_files_path left/db/user_files \ + &>> left-server-log.log & left_pid=$! kill -0 $left_pid disown $left_pid - right/clickhouse-server --config-file=right/config/config.xml \ - -- --path right/db --user_files_path right/db/user_files \ - &>> right-server-log.log & + numactl --cpunodebind=0 --localalloc \ + right/clickhouse-server --config-file=right/config/config.xml \ + -- --path right/db --user_files_path right/db/user_files \ + &>> right-server-log.log & right_pid=$! kill -0 $right_pid disown $right_pid set +m + unset MALLOC_CONF + wait_for_server 9001 $left_pid echo left ok From bde19bf240571ca6d05450412a838ae4bea2f782 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 28 Sep 2020 15:26:51 +0300 Subject: [PATCH 065/432] restart the build From a1f4d38019a408c2a840df1dc877ea04983cb089 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 29 Sep 2020 12:44:28 +0300 Subject: [PATCH 066/432] Add ability to run tests multiple time in stateless image --- docker/test/stateless/Dockerfile | 3 +++ docker/test/stateless/run.sh | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 516d8d5842b..c7529686cbe 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -33,5 +33,8 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +ENV NUM_TRIES=1 +ENV MAX_RUN_TIME=0 #unlimited + COPY run.sh / CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 9f2bb9bf62d..e736ef835c7 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -17,4 +17,13 @@ if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then SKIP_LIST_OPT="--use-skip-list" fi -clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt +function run_tests() +{ + for i in $(seq 1 $NUM_TRIES); do + clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt + done +} + +export -f run_tests + +timeout $MAX_RUN_TIME bash -c run_tests ||: From 4c6da6b70cb4105ab3383c342e4bcb4523215ae0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 29 Sep 2020 21:01:49 +0300 Subject: [PATCH 067/432] allow fast test to run locally --- docker/test/fasttest/run.sh | 165 +++++++++++++++++++++++++----------- 1 file changed, 114 insertions(+), 51 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index a277ddf9d36..b93aa0ce5cd 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -16,28 +16,55 @@ stage=${stage:-} read -ra FASTTEST_CMAKE_FLAGS <<< "${FASTTEST_CMAKE_FLAGS:-}" -function kill_clickhouse +FASTTEST_WORKSPACE=$(readlink -f "${FASTTEST_WORKSPACE:-.}") +FASTTEST_SOURCE=$(readlink -f "${FASTTEST_SOURCE:-$FASTTEST_WORKSPACE/ch}") +FASTTEST_BUILD=$(readlink -f "${FASTTEST_BUILD:-${BUILD:-$FASTTEST_WORKSPACE/build}}") +FASTTEST_DATA=$(readlink -f "${FASTTEST_DATA:-$FASTTEST_WORKSPACE/db-fasttest}") +FASTTEST_OUTPUT=$(readlink -f "${FASTTEST_OUTPUT:-$FASTTEST_WORKSPACE}") + +server_pid=none + +function stop_server { + if ! kill -- "$server_pid" + then + echo "The server we started ($server_pid) is not present, won't do anything" + return 0 + fi + for _ in {1..60} do - if ! pkill -f clickhouse-server ; then break ; fi + if ! kill -- "$server_pid" ; then break ; fi sleep 1 done - if pgrep -f clickhouse-server + if kill -0 -- "$server_pid" then pstree -apgT jobs - echo "Failed to kill the ClickHouse server $(pgrep -f clickhouse-server)" + echo "Failed to kill the ClickHouse server pid '$server_pid'" return 1 fi } -function wait_for_server_start +function start_server { + set -m # Spawn server in its own process groups + clickhouse-server --config-file="$FASTTEST_DATA/config.xml" -- --path "$FASTTEST_DATA" --user_files_path "$FASTTEST_DATA/user_files" &>> "$FASTTEST_OUTPUT/server.log" & + server_pid=$! + set +m + + if [ "$server_pid" == "0" ] + then + echo "Failed to start ClickHouse server" + # Avoid zero PID because `kill` treats it as our process group PID. + server_pid="none" + return 1 + fi + for _ in {1..60} do - if clickhouse-client --query "select 1" || ! pgrep -f clickhouse-server + if clickhouse-client --query "select 1" || ! kill -0 -- "$server_pid" then break fi @@ -47,20 +74,26 @@ function wait_for_server_start if ! clickhouse-client --query "select 1" then echo "Failed to wait until ClickHouse server starts." + server_pid="none" return 1 fi - echo "ClickHouse server pid '$(pgrep -f clickhouse-server)' started and responded" + if ! kill -0 -- "$server_pid" + then + echo "Wrong clickhouse server started: PID '$server_pid' we started is not running, but '$(pgrep -f clickhouse-server)' is running" + server_pid="none" + return 1 + fi + + echo "ClickHouse server pid '$server_pid' started and responded" } function clone_root { -git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt -cd ClickHouse -CLICKHOUSE_DIR=$(pwd) -export CLICKHOUSE_DIR - +git clone https://github.com/ClickHouse/ClickHouse.git -- "$FASTTEST_SOURCE" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/clone_log.txt" +( +cd "$FASTTEST_SOURCE" if [ "$PULL_REQUEST_NUMBER" != "0" ]; then if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then git checkout FETCH_HEAD @@ -71,22 +104,37 @@ if [ "$PULL_REQUEST_NUMBER" != "0" ]; then echo 'Checked out to commit' fi else - if [ "$COMMIT_SHA" != "" ]; then + if [ -v COMMIT_SHA ]; then git checkout "$COMMIT_SHA" fi fi +) } -function run +function clone_submodules { +( +cd "$FASTTEST_SOURCE" + SUBMODULES_TO_UPDATE=(contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11) -git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt +git submodule sync +git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}" +git submodule foreach git reset --hard +git submodule foreach git checkout @ -f +git submodule foreach git clean -xfd +) +} + +function build +{ CMAKE_LIBS_CONFIG=(-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1) -export CCACHE_DIR=/ccache -export CCACHE_BASEDIR=/ClickHouse +# TODO remove this? we don't use ccache anyway. An option would be to download it +# from S3 simultaneously with cloning. +export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache" +export CCACHE_BASEDIR="$FASTTEST_SOURCE" export CCACHE_NOHASHDIR=true export CCACHE_COMPILERCHECK=content export CCACHE_MAXSIZE=15G @@ -94,34 +142,40 @@ export CCACHE_MAXSIZE=15G ccache --show-stats ||: ccache --zero-stats ||: -mkdir build -cd build -cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt -time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt -ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt - +mkdir "$FASTTEST_BUILD" ||: +( +cd "$FASTTEST_BUILD" +cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt" +time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" +) ccache --show-stats ||: +} -mkdir -p /etc/clickhouse-server -mkdir -p /etc/clickhouse-client -mkdir -p /etc/clickhouse-server/config.d -mkdir -p /etc/clickhouse-server/users.d -ln -s /test_output /var/log/clickhouse-server -cp "$CLICKHOUSE_DIR/programs/server/config.xml" /etc/clickhouse-server/ -cp "$CLICKHOUSE_DIR/programs/server/users.xml" /etc/clickhouse-server/ +function configure +{ +export PATH="$FASTTEST_BUILD/programs:$FASTTEST_SOURCE/tests:$PATH" -# install tests config -$CLICKHOUSE_DIR/tests/config/install.sh +clickhouse-client --version +clickhouse-test --help + +mkdir -p "$FASTTEST_DATA"{,/client-config} +cp -a "$FASTTEST_SOURCE/programs/server/"{config,users}.xml "$FASTTEST_DATA" +cp -a "$FASTTEST_SOURCE/programs/server/"{config,users}.xml "$FASTTEST_DATA" +"$FASTTEST_SOURCE/tests/config/install.sh" "$FASTTEST_DATA" "$FASTTEST_DATA/client-config" # doesn't support SSL -rm -f /etc/clickhouse-server/config.d/secure_ports.xml +rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" +} + +function run_tests +{ +clickhouse-server --version +clickhouse-test --help # Kill the server in case we are running locally and not in docker -kill_clickhouse +stop_server ||: -clickhouse-server --config /etc/clickhouse-server/config.xml --daemon - -wait_for_server_start +start_server TESTS_TO_SKIP=( parquet @@ -191,11 +245,10 @@ TESTS_TO_SKIP=( 01460_DistributedFilesToInsert ) -time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt - +time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" # substr is to remove semicolon after test name -readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' /test_output/test_log.txt | tee /test_output/failed-parallel-tests.txt) +readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt") # We will rerun sequentially any tests that have failed during parallel run. # They might have failed because there was some interference from other tests @@ -206,19 +259,16 @@ readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, le # explicit instead of guessing. if [[ -n "${FAILED_TESTS[*]}" ]] then - kill_clickhouse + stop_server ||: # Clean the data so that there is no interference from the previous test run. - rm -rf /var/lib/clickhouse ||: - mkdir /var/lib/clickhouse + rm -rf "$FASTTEST_DATA"/{meta,}data ||: - clickhouse-server --config /etc/clickhouse-server/config.xml --daemon - - wait_for_server_start + start_server echo "Going to run again: ${FAILED_TESTS[*]}" - clickhouse-test --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_output/test_log.txt + clickhouse-test --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt" else echo "No failed tests" fi @@ -228,20 +278,33 @@ case "$stage" in "") ls -la ;& - "clone_root") clone_root # Pass control to the script from cloned sources, unless asked otherwise. if ! [ -v FASTTEST_LOCAL_SCRIPT ] then - stage=run "$CLICKHOUSE_DIR/docker/test/fasttest/run.sh" + # 'run' is deprecated, used for compatibility with old scripts. + # Replace with 'clone_submodules' after Nov 1, 2020. + stage=run "$FASTTEST_SOURCE/docker/test/fasttest/run.sh" exit $? fi ;& - "run") - run + # A deprecated stage that is called by old script and equivalent to everything + # after cloning root, starting with cloning submodules. + ;& +"clone_submodules") + clone_submodules | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt" + ;& +"build") + build + ;& +"configure") + configure + ;& +"run_tests") + run_tests ;& esac From 00f9888c5d3f631f0ac2b9f9ba06098d56bc33a6 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 29 Sep 2020 21:55:06 +0300 Subject: [PATCH 068/432] fasttest fixup --- docker/test/fasttest/run.sh | 27 ++++++++++++-------- tests/config/install.sh | 50 ++++++++++++++++++------------------- 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index b93aa0ce5cd..6a3f16d52f6 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -26,15 +26,9 @@ server_pid=none function stop_server { - if ! kill -- "$server_pid" - then - echo "The server we started ($server_pid) is not present, won't do anything" - return 0 - fi - for _ in {1..60} do - if ! kill -- "$server_pid" ; then break ; fi + if ! pkill -f "clickhouse-server" && ! kill -- "$server_pid" ; then break ; fi sleep 1 done @@ -45,6 +39,8 @@ function stop_server echo "Failed to kill the ClickHouse server pid '$server_pid'" return 1 fi + + server_pid=none } function start_server @@ -126,10 +122,9 @@ git submodule foreach git clean -xfd ) } - -function build +function run_cmake { -CMAKE_LIBS_CONFIG=(-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1) +CMAKE_LIBS_CONFIG=("-DENABLE_LIBRARIES=0" "-DENABLE_TESTS=0" "-DENABLE_UTILS=0" "-DENABLE_EMBEDDED_COMPILER=0" "-DENABLE_THINLTO=0" "-DUSE_UNWIND=1") # TODO remove this? we don't use ccache anyway. An option would be to download it # from S3 simultaneously with cloning. @@ -143,13 +138,20 @@ ccache --show-stats ||: ccache --zero-stats ||: mkdir "$FASTTEST_BUILD" ||: + ( cd "$FASTTEST_BUILD" cmake "$FASTTEST_SOURCE" -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 "${CMAKE_LIBS_CONFIG[@]}" "${FASTTEST_CMAKE_FLAGS[@]}" | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/cmake_log.txt" -time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" ) +} +function build +{ +( +cd "$FASTTEST_BUILD" +time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" ccache --show-stats ||: +) } function configure @@ -297,6 +299,9 @@ case "$stage" in "clone_submodules") clone_submodules | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt" ;& +"run_cmake") + run_cmake + ;& "build") build ;& diff --git a/tests/config/install.sh b/tests/config/install.sh index 0f33854ef95..ef9604904e7 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -15,40 +15,40 @@ mkdir -p $DEST_SERVER_PATH/config.d/ mkdir -p $DEST_SERVER_PATH/users.d/ mkdir -p $DEST_CLIENT_PATH -ln -s $SRC_PATH/config.d/zookeeper.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/part_log.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/metric_log.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/disks.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/ -ln -s $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ -ln -s $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/ -ln -s $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/config.d/zookeeper.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/part_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/metric_log.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/disks.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/clusters.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/graphite.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/ -ln -s $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/ -ln -s $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/ -ln -s $SRC_PATH/decimals_dictionary.xml $DEST_SERVER_PATH/ -ln -s $SRC_PATH/executable_dictionary.xml $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/decimals_dictionary.xml $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/executable_dictionary.xml $DEST_SERVER_PATH/ -ln -s $SRC_PATH/server.key $DEST_SERVER_PATH/ -ln -s $SRC_PATH/server.crt $DEST_SERVER_PATH/ -ln -s $SRC_PATH/dhparam.pem $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/server.key $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/server.crt $DEST_SERVER_PATH/ +ln -sf $SRC_PATH/dhparam.pem $DEST_SERVER_PATH/ # Retain any pre-existing config and allow ClickHouse to load it if required -ln -s --backup=simple --suffix=_original.xml \ +ln -sf --backup=simple --suffix=_original.xml \ $SRC_PATH/config.d/query_masking_rules.xml $DEST_SERVER_PATH/config.d/ if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then - ln -s $SRC_PATH/config.d/polymorphic_parts.xml $DEST_SERVER_PATH/config.d/ + ln -sf $SRC_PATH/config.d/polymorphic_parts.xml $DEST_SERVER_PATH/config.d/ fi if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then - ln -s $SRC_PATH/users.d/database_ordinary.xml $DEST_SERVER_PATH/users.d/ + ln -sf $SRC_PATH/users.d/database_ordinary.xml $DEST_SERVER_PATH/users.d/ fi ln -sf $SRC_PATH/client_config.xml $DEST_CLIENT_PATH/config.xml From 85ec02dcedcadea5cacd18913722a421100ccade Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 29 Sep 2020 23:09:52 +0300 Subject: [PATCH 069/432] fasttest fixup 2 --- docker/test/fasttest/run.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 6a3f16d52f6..6898000185d 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -306,7 +306,9 @@ case "$stage" in build ;& "configure") - configure + # The `install_log.txt` is also needed for compatibility with old CI task -- + # if there is no log, it will decide that build failed. + configure | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt" ;& "run_tests") run_tests From 13c6fd48296594f8dc705110fd142349a491a624 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 00:05:05 +0300 Subject: [PATCH 070/432] fasttest fixup 3 --- docker/test/fasttest/run.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 6898000185d..a9cbe87ecf9 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -156,7 +156,8 @@ ccache --show-stats ||: function configure { -export PATH="$FASTTEST_BUILD/programs:$FASTTEST_SOURCE/tests:$PATH" +PATH="$FASTTEST_BUILD/programs:$FASTTEST_SOURCE/tests:$PATH" +export PATH clickhouse-client --version clickhouse-test --help @@ -171,6 +172,7 @@ rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" function run_tests { +echo "$PATH" clickhouse-server --version clickhouse-test --help From 824d5b093c453506cb64f2abc67ce3034a184da6 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 14:32:49 +0300 Subject: [PATCH 071/432] bind everything to node 0 --- contrib/jemalloc-cmake/CMakeLists.txt | 2 +- docker/test/performance-comparison/Dockerfile | 9 ++++++++- docker/test/performance-comparison/compare.sh | 18 ++++++++++-------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt index 563d41301b1..dd7f9f3e2bb 100644 --- a/contrib/jemalloc-cmake/CMakeLists.txt +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -81,7 +81,7 @@ if (NOT EXTERNAL_JEMALLOC_LIBRARY_FOUND OR NOT EXTERNAL_JEMALLOC_LIBRARY_WORKS) # avoid spurious latencies and additional work associated with # MADV_DONTNEED. See # https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation. - set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") + set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:10000") else() set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:10000") endif() diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index a4f8af2f388..99f2f9b2b4b 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -37,7 +37,14 @@ RUN apt-get update \ COPY * / -CMD /entrypoint.sh +# Bind everything to node 0 early. We have to bind both servers and the tmpfs +# on which the database is stored. How to do it through Yandex Sandbox API is +# unclear, but by default tmpfs uses 'process allocation policy', not sure +# which process but hopefully the one that writes to it, so just bind the +# downloader script as well. +# We could also try to remount it with proper options in Sandbox task. +# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt +CMD ['numactl', '--cpunodebind=0', '--localalloc', '/entrypoint.sh'] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 0134d03aea1..2f03ecc9ad7 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -77,23 +77,25 @@ function restart while killall clickhouse-server; do echo . ; sleep 1 ; done echo all killed + # Disable percpu arenas because they segfault when the process is bound to + # a particular NUMA node: https://github.com/jemalloc/jemalloc/pull/1939 + # + # About the jemalloc settings: # https://github.com/jemalloc/jemalloc/wiki/Getting-Started export MALLOC_CONF="percpu_arena:disabled,confirm_conf:true" set -m # Spawn servers in their own process groups - numactl --cpunodebind=1 --localalloc \ - left/clickhouse-server --config-file=left/config/config.xml \ - -- --path left/db --user_files_path left/db/user_files \ - &>> left-server-log.log & + left/clickhouse-server --config-file=left/config/config.xml \ + -- --path left/db --user_files_path left/db/user_files \ + &>> left-server-log.log & left_pid=$! kill -0 $left_pid disown $left_pid - numactl --cpunodebind=0 --localalloc \ - right/clickhouse-server --config-file=right/config/config.xml \ - -- --path right/db --user_files_path right/db/user_files \ - &>> right-server-log.log & + right/clickhouse-server --config-file=right/config/config.xml \ + -- --path right/db --user_files_path right/db/user_files \ + &>> right-server-log.log & right_pid=$! kill -0 $right_pid disown $right_pid From edf5797d117467e879783a50a05b840740fd19ee Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 17:21:06 +0300 Subject: [PATCH 072/432] fasttest fixup 3 --- docker/test/fasttest/run.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index a9cbe87ecf9..4a47fcfe4dc 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -156,9 +156,6 @@ ccache --show-stats ||: function configure { -PATH="$FASTTEST_BUILD/programs:$FASTTEST_SOURCE/tests:$PATH" -export PATH - clickhouse-client --version clickhouse-test --help @@ -172,7 +169,6 @@ rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" function run_tests { -echo "$PATH" clickhouse-server --version clickhouse-test --help @@ -306,6 +302,8 @@ case "$stage" in ;& "build") build + PATH="$FASTTEST_BUILD/programs:$FASTTEST_SOURCE/tests:$PATH" + export PATH ;& "configure") # The `install_log.txt` is also needed for compatibility with old CI task -- From aa543a2d3d51f16c6d043a36bb9f4249ba35cd05 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 17:40:24 +0300 Subject: [PATCH 073/432] quotes --- docker/test/performance-comparison/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 99f2f9b2b4b..535f7de9e29 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -44,7 +44,7 @@ COPY * / # downloader script as well. # We could also try to remount it with proper options in Sandbox task. # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -CMD ['numactl', '--cpunodebind=0', '--localalloc', '/entrypoint.sh'] +CMD ["numactl", "--cpunodebind=0", "--localalloc", "/entrypoint.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison From 22a0ec0892c6058acb9b0e09f3f4b57f5b99647e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 17:55:40 +0300 Subject: [PATCH 074/432] try split debug -Og build in fasttest --- docker/test/fasttest/run.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 4a47fcfe4dc..e16a70fc3b2 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -124,7 +124,20 @@ git submodule foreach git clean -xfd function run_cmake { -CMAKE_LIBS_CONFIG=("-DENABLE_LIBRARIES=0" "-DENABLE_TESTS=0" "-DENABLE_UTILS=0" "-DENABLE_EMBEDDED_COMPILER=0" "-DENABLE_THINLTO=0" "-DUSE_UNWIND=1") +CMAKE_LIBS_CONFIG=( + "-DENABLE_LIBRARIES=0" + "-DENABLE_TESTS=0" + "-DENABLE_UTILS=0" + "-DENABLE_EMBEDDED_COMPILER=0" + "-DENABLE_THINLTO=0" + "-DUSE_UNWIND=1" + "-DUSE_STATIC_LIBRARIES=0" + "-DSPLIT_SHARED_LIBRARIES=1" + "-DCLICKHOUSE_SPLIT_BINARY=1" + "-DCMAKE_BUILD_TYPE=Debug" + "-DCMAKE_C_FLAGS=-Og" + "-DCMAKE_CXX_FLAGS=-Og" +) # TODO remove this? we don't use ccache anyway. An option would be to download it # from S3 simultaneously with cloning. From 222b6555175019c487d9f71196ea462231fa1530 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 18:12:53 +0300 Subject: [PATCH 075/432] fasttest compat with old script --- docker/test/fasttest/run.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 4a47fcfe4dc..c4a9a0ea5f3 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -284,8 +284,14 @@ case "$stage" in # Pass control to the script from cloned sources, unless asked otherwise. if ! [ -v FASTTEST_LOCAL_SCRIPT ] then - # 'run' is deprecated, used for compatibility with old scripts. + # 'run' stage is deprecated, used for compatibility with old scripts. # Replace with 'clone_submodules' after Nov 1, 2020. + # cd and CLICKHOUSE_DIR are also a setup for old scripts, remove as well. + # In modern script we undo it by changing back into workspace dir right + # away, see below. Remove that as well. + cd "$FASTTEST_SOURCE" + CLICKHOUSE_DIR=$(pwd) + export CLICKHOUSE_DIR stage=run "$FASTTEST_SOURCE/docker/test/fasttest/run.sh" exit $? fi @@ -295,6 +301,10 @@ case "$stage" in # after cloning root, starting with cloning submodules. ;& "clone_submodules") + # Recover after being called from the old script that changes into source directory. + # See the compatibility hacks in `clone_root` stage above. Remove at the same time, + # after Nov 1, 2020. + cd "$FASTTEST_WORKSPACE" clone_submodules | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/submodule_log.txt" ;& "run_cmake") From c5d1f51f5836da49a33c531dad6cd46dccc271d5 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 19:14:20 +0300 Subject: [PATCH 076/432] just split --- docker/test/fasttest/run.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index e16a70fc3b2..f769b342846 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -134,9 +134,9 @@ CMAKE_LIBS_CONFIG=( "-DUSE_STATIC_LIBRARIES=0" "-DSPLIT_SHARED_LIBRARIES=1" "-DCLICKHOUSE_SPLIT_BINARY=1" - "-DCMAKE_BUILD_TYPE=Debug" - "-DCMAKE_C_FLAGS=-Og" - "-DCMAKE_CXX_FLAGS=-Og" +# "-DCMAKE_BUILD_TYPE=Debug" +# "-DCMAKE_C_FLAGS=-Og" +# "-DCMAKE_CXX_FLAGS=-Og" ) # TODO remove this? we don't use ccache anyway. An option would be to download it @@ -255,7 +255,7 @@ TESTS_TO_SKIP=( 00974_query_profiler # Look at DistributedFilesToInsert, so cannot run in parallel. - 01460_DistributedFilesToInsert + 01457_DistributedFilesToInsert ) time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" From 626c2a3e28e2b179aa78ad363866bc1a53dd90d6 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 19:16:33 +0300 Subject: [PATCH 077/432] microsecond precision for start/finish time --- programs/server/config.xml | 2 +- src/Interpreters/ClientInfo.cpp | 6 +++-- src/Interpreters/OpenTelemetryLog.cpp | 11 ++++---- src/Interpreters/OpenTelemetryLog.h | 8 ++++-- src/Interpreters/executeQuery.cpp | 37 ++++++++++++--------------- 5 files changed, 34 insertions(+), 30 deletions(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index b3269f2e842..9a1b626b26a 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -611,7 +611,7 @@ engine MergeTree partition by toYYYYMM(finish_date) - order by (finish_date, finish_time, trace_id) + order by (finish_date, finish_time_us, trace_id) system opentelemetry_log
diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 25cf9b8294e..5570a1bc88d 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -62,11 +62,12 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_OPENTELEMETRY) { - // No point writing these numbers with variable length, because they - // are random and will probably require the full length anyway. if (opentelemetry_trace_id) { + // Have OpenTelemetry header. writeBinary(uint8_t(1), out); + // No point writing these numbers with variable length, because they + // are random and will probably require the full length anyway. writeBinary(opentelemetry_trace_id, out); writeBinary(opentelemetry_span_id, out); writeBinary(opentelemetry_parent_span_id, out); @@ -75,6 +76,7 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) } else { + // Don't have OpenTelemetry header. writeBinary(uint8_t(0), out); } } diff --git a/src/Interpreters/OpenTelemetryLog.cpp b/src/Interpreters/OpenTelemetryLog.cpp index 161af01107f..a247e557fcc 100644 --- a/src/Interpreters/OpenTelemetryLog.cpp +++ b/src/Interpreters/OpenTelemetryLog.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -17,8 +18,8 @@ Block OpenTelemetrySpanLogElement::createBlock() {std::make_shared(), "span_id"}, {std::make_shared(), "parent_span_id"}, {std::make_shared(), "operation_name"}, - {std::make_shared(), "start_time"}, - {std::make_shared(), "finish_time"}, + {std::make_shared(6), "start_time_us"}, + {std::make_shared(6), "finish_time_us"}, {std::make_shared(), "finish_date"}, {std::make_shared(std::make_shared()), "attribute.names"}, @@ -35,9 +36,9 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(span_id); columns[i++]->insert(parent_span_id); columns[i++]->insert(operation_name); - columns[i++]->insert(start_time); - columns[i++]->insert(finish_time); - columns[i++]->insert(DateLUT::instance().toDayNum(finish_time)); + columns[i++]->insert(start_time_us); + columns[i++]->insert(finish_time_us); + columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000)); columns[i++]->insert(attribute_names); columns[i++]->insert(attribute_values); } diff --git a/src/Interpreters/OpenTelemetryLog.h b/src/Interpreters/OpenTelemetryLog.h index e2906a99a02..2d8536ff47e 100644 --- a/src/Interpreters/OpenTelemetryLog.h +++ b/src/Interpreters/OpenTelemetryLog.h @@ -15,6 +15,10 @@ struct OpenTelemetrySpanContext }; */ +// using TimeMicroseconds = std::chrono::time_point< +// std::chrono::local_t, +// std::chrono::duration>; + // TODO figure out precisely which part of this is run time, and which part we // must log. struct OpenTelemetrySpan @@ -23,8 +27,8 @@ struct OpenTelemetrySpan UInt64 span_id; UInt64 parent_span_id; std::string operation_name; - time_t start_time; - time_t finish_time; + UInt64 start_time_us; + UInt64 finish_time_us; UInt64 duration_ns; Array attribute_names; Array attribute_values; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 30f3dcd87b1..d89725b30f4 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -205,7 +205,7 @@ inline UInt64 time_in_seconds(std::chrono::time_point return std::chrono::duration_cast(timepoint.time_since_epoch()).count(); } -static void onExceptionBeforeStart(const String & query_for_logging, Context & context, time_t current_time, UInt64 current_time_microseconds, ASTPtr ast) +static void onExceptionBeforeStart(const String & query_for_logging, Context & context, UInt64 current_time_us, ASTPtr ast) { /// Exception before the query execution. if (auto quota = context.getQuota()) @@ -221,9 +221,9 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c // all callers to onExceptionBeforeStart upstream construct the timespec for event_time and // event_time_microseconds from the same timespec. So it can be assumed that both of these // times are equal upto the precision of a second. - elem.event_time = current_time; - elem.query_start_time = current_time; - elem.query_start_time_microseconds = current_time_microseconds; + elem.event_time = current_time_us / 1000000; + elem.query_start_time = current_time_us / 1000000; + elem.query_start_time_microseconds = current_time_us; elem.current_database = context.getCurrentDatabase(); elem.query = query_for_logging; @@ -252,8 +252,8 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c span.span_id = context.getClientInfo().opentelemetry_span_id; span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; - span.start_time = current_time; - span.finish_time = current_time; + span.start_time_us = current_time_us; + span.finish_time_us = current_time_us; span.duration_ns = 0; // keep values synchonized to type enum in QueryLogElement::createBlock @@ -309,12 +309,7 @@ static std::tuple executeQueryImpl( bool has_query_tail, ReadBuffer * istr) { - // current_time and current_time_microseconds are both constructed from the same time point - // to ensure that both the times are equal upto the precision of a second. - const auto now = std::chrono::system_clock::now(); - - auto current_time = time_in_seconds(now); - auto current_time_microseconds = time_in_microseconds(now); + const auto current_time = std::chrono::system_clock::now(); /// If we already executing query and it requires to execute internal query, than /// don't replace thread context with given (it can be temporary). Otherwise, attach context to thread. @@ -364,7 +359,7 @@ static std::tuple executeQueryImpl( logQuery(query_for_logging, context, internal); if (!internal) - onExceptionBeforeStart(query_for_logging, context, current_time, current_time_microseconds, ast); + onExceptionBeforeStart(query_for_logging, context, time_in_microseconds(current_time), ast); throw; } @@ -528,9 +523,9 @@ static std::tuple executeQueryImpl( elem.type = QueryLogElementType::QUERY_START; - elem.event_time = current_time; - elem.query_start_time = current_time; - elem.query_start_time_microseconds = current_time_microseconds; + elem.event_time = time_in_seconds(current_time); + elem.query_start_time = time_in_seconds(current_time); + elem.query_start_time_microseconds = time_in_microseconds(current_time); elem.current_database = context.getCurrentDatabase(); elem.query = query_for_logging; @@ -599,7 +594,9 @@ static std::tuple executeQueryImpl( elem.type = QueryLogElementType::QUERY_FINISH; - elem.event_time = time(nullptr); + const auto current_time = std::chrono::system_clock::now(); + + elem.event_time = time_in_seconds(current_time); status_info_to_query_log(elem, info, ast); @@ -660,8 +657,8 @@ static std::tuple executeQueryImpl( span.span_id = context.getClientInfo().opentelemetry_span_id; span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; - span.start_time = elem.query_start_time; - span.finish_time = elem.event_time; + span.start_time_us = elem.query_start_time_microseconds; + span.finish_time_us = time_in_microseconds(current_time); span.duration_ns = elapsed_seconds * 1000000000; // keep values synchonized to type enum in QueryLogElement::createBlock @@ -751,7 +748,7 @@ static std::tuple executeQueryImpl( if (query_for_logging.empty()) query_for_logging = prepareQueryForLogging(query, context); - onExceptionBeforeStart(query_for_logging, context, current_time, current_time_microseconds, ast); + onExceptionBeforeStart(query_for_logging, context, time_in_microseconds(current_time), ast); } throw; From a2cd346303d89043855bd6bf0aa385386d72e2fd Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 20:35:46 +0300 Subject: [PATCH 078/432] fasttest fixup 5 --- docker/test/fasttest/run.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index c4a9a0ea5f3..5e586402bd7 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -15,13 +15,20 @@ stage=${stage:-} # empty parameter. read -ra FASTTEST_CMAKE_FLAGS <<< "${FASTTEST_CMAKE_FLAGS:-}" - FASTTEST_WORKSPACE=$(readlink -f "${FASTTEST_WORKSPACE:-.}") FASTTEST_SOURCE=$(readlink -f "${FASTTEST_SOURCE:-$FASTTEST_WORKSPACE/ch}") FASTTEST_BUILD=$(readlink -f "${FASTTEST_BUILD:-${BUILD:-$FASTTEST_WORKSPACE/build}}") FASTTEST_DATA=$(readlink -f "${FASTTEST_DATA:-$FASTTEST_WORKSPACE/db-fasttest}") FASTTEST_OUTPUT=$(readlink -f "${FASTTEST_OUTPUT:-$FASTTEST_WORKSPACE}") +# Export these variables, so that all subsequent invocations of the script +# use them, and not try to guess them anew, which leads to weird effects. +export FASTTEST_WORKSPACE +export FASTTEST_SOURCE +export FASTTEST_BUILD +export FASTTEST_DATA +export FASTTEST_OUT + server_pid=none function stop_server From 16ab4d59e6a90360064c765c6c390bb49ffbb764 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 30 Sep 2020 20:36:02 +0300 Subject: [PATCH 079/432] don't shadow local var --- src/Interpreters/executeQuery.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index d89725b30f4..94a58a1bda8 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -594,9 +594,9 @@ static std::tuple executeQueryImpl( elem.type = QueryLogElementType::QUERY_FINISH; - const auto current_time = std::chrono::system_clock::now(); + const auto finish_time = std::chrono::system_clock::now(); - elem.event_time = time_in_seconds(current_time); + elem.event_time = time_in_seconds(finish_time); status_info_to_query_log(elem, info, ast); @@ -658,7 +658,7 @@ static std::tuple executeQueryImpl( span.parent_span_id = context.getClientInfo().opentelemetry_parent_span_id; span.operation_name = "query"; span.start_time_us = elem.query_start_time_microseconds; - span.finish_time_us = time_in_microseconds(current_time); + span.finish_time_us = time_in_microseconds(finish_time); span.duration_ns = elapsed_seconds * 1000000000; // keep values synchonized to type enum in QueryLogElement::createBlock From 74f8e41b55a2504c3419cb33e7c2429a2c1e116a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 1 Oct 2020 13:56:56 +0300 Subject: [PATCH 080/432] calculate on all nodes --- docker/test/performance-comparison/compare.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 2f03ecc9ad7..08b18758874 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -462,7 +462,10 @@ wait unset IFS ) -parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log +# The comparison script might be bound to one NUMA node for better test +# stability, and the calculation runs out of memory because of this. Use +# all nodes. +numactl --all parallel --joblog analyze/parallel-log.txt --null < analyze/commands.txt 2>> analyze/errors.log clickhouse-local --query " -- Join the metric names back to the metric statistics we've calculated, and make From 3a963c988d613d316bfaf9e0d129452586033173 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 2 Oct 2020 14:38:32 +0300 Subject: [PATCH 081/432] fixup --- docker/test/fasttest/run.sh | 5 ++++- src/Common/Exception.h | 22 ++++++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 5e586402bd7..a0830ba5f12 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -329,7 +329,10 @@ case "$stage" in ;& "run_tests") run_tests - ;& + ;; +*) + echo "Unknown test stage '$stage'" + exit 1 esac pstree -apgT diff --git a/src/Common/Exception.h b/src/Common/Exception.h index d0de8d6a3f2..314c59cbf51 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -22,10 +22,14 @@ public: Exception() = default; Exception(const std::string & msg, int code); + Exception(int code, const std::string & message) + : Exception(message, code) + {} + // Format message with fmt::format, like the logging functions. - template - Exception(int code, Fmt&&... fmt) - : Exception(fmt::format(std::forward(fmt)...), code) + template + Exception(int code, const std::string & fmt, Args&&... args) + : Exception(fmt::format(fmt, std::forward(args)...), code) {} struct CreateFromPocoTag {}; @@ -39,10 +43,16 @@ public: const char * name() const throw() override { return "DB::Exception"; } const char * what() const throw() override { return message().data(); } - template - void addMessage(Fmt&&... fmt) + /// Add something to the existing message. + template + void addMessage(const std::string& format, Args&&... args) { - extendedMessage(fmt::format(std::forward(fmt)...)); + extendedMessage(fmt::format(format, std::forward(args)...)); + } + + void addMessage(const std::string& message) + { + extendedMessage(message); } std::string getStackTraceString() const; From 867216103f92fd60f566610e829146c694349147 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Mon, 21 Sep 2020 18:13:01 +0800 Subject: [PATCH 082/432] Extend trivial count optimization. --- src/Interpreters/InterpreterSelectQuery.cpp | 13 +++++- src/Interpreters/TreeRewriter.cpp | 20 ++++++++- src/Storages/IStorage.h | 3 ++ src/Storages/MergeTree/KeyCondition.cpp | 33 +++++++++++--- src/Storages/MergeTree/KeyCondition.h | 12 ++++- src/Storages/StorageMergeTree.cpp | 33 ++++++++++++++ src/Storages/StorageMergeTree.h | 1 + src/Storages/StorageReplicatedMergeTree.cpp | 30 +++++++++++++ src/Storages/StorageReplicatedMergeTree.h | 1 + .../00636_partition_key_parts_pruning.sh | 14 +++--- ...l_count_with_partition_predicate.reference | 9 ++++ ...trivial_count_with_partition_predicate.sql | 45 +++++++++++++++++++ 12 files changed, 196 insertions(+), 18 deletions(-) create mode 100644 tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.reference create mode 100644 tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 823808759a2..78223e56d2a 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1115,6 +1115,7 @@ void InterpreterSelectQuery::executeFetchColumns( /// Optimization for trivial query like SELECT count() FROM table. bool optimize_trivial_count = syntax_analyzer_result->optimize_trivial_count + && (settings.max_parallel_replicas <= 1) && storage && !filter_info && processing_stage == QueryProcessingStage::FetchColumns @@ -1126,7 +1127,17 @@ void InterpreterSelectQuery::executeFetchColumns( { const auto & desc = query_analyzer->aggregates()[0]; const auto & func = desc.function; - std::optional num_rows = storage->totalRows(); + std::optional num_rows{}; + if (!query.prewhere() && !query.where()) + num_rows = storage->totalRows(); + else // It's possible to optimize count() given only partition predicates + { + SelectQueryInfo temp_query_info; + temp_query_info.query = query_ptr; + temp_query_info.syntax_analyzer_result = syntax_analyzer_result; + temp_query_info.sets = query_analyzer->getPreparedSets(); + num_rows = storage->totalRowsByPartitionPredicate(temp_query_info, *context); + } if (num_rows) { AggregateFunctionCount & agg_count = static_cast(*func); diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 0b2f8ac3eb7..b45533f7a7b 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -478,6 +478,24 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select /// If we have no information about columns sizes, choose a column of minimum size of its data type. required.insert(ExpressionActions::getSmallestColumn(source_columns)); } + else if (is_select && metadata_snapshot) + { + const auto & partition_desc = metadata_snapshot->getPartitionKey(); + if (partition_desc.expression) + { + const auto & partition_source_columns = partition_desc.expression->getRequiredColumns(); + optimize_trivial_count = true; + for (const auto & required_column : required) + { + if (std::find(partition_source_columns.begin(), partition_source_columns.end(), required_column) + == partition_source_columns.end()) + { + optimize_trivial_count = false; + break; + } + } + } + } NameSet unknown_required_source_columns = required; @@ -620,7 +638,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (result.optimize_trivial_count) result.optimize_trivial_count = settings.optimize_trivial_count_query && - !select_query->where() && !select_query->prewhere() && !select_query->groupBy() && !select_query->having() && + !select_query->groupBy() && !select_query->having() && !select_query->sampleSize() && !select_query->sampleOffset() && !select_query->final() && (tables_with_columns.size() < 2 || isLeft(result.analyzed_join->kind())); diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 4a2e70aa84b..836e2d7dcf1 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -463,6 +463,9 @@ public: /// Does takes underlying Storage (if any) into account. virtual std::optional totalRows() const { return {}; } + /// Same as above but also take partition predicate into account. + virtual std::optional totalRowsByPartitionPredicate(const SelectQueryInfo &, const Context &) const { return {}; } + /// If it is possible to quickly determine exact number of bytes for the table on storage: /// - memory (approximated, resident) /// - disk (compressed) diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index bd45d970a7c..aae5cf3becf 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -368,8 +368,10 @@ KeyCondition::KeyCondition( const SelectQueryInfo & query_info, const Context & context, const Names & key_column_names, - const ExpressionActionsPtr & key_expr_) - : key_expr(key_expr_), prepared_sets(query_info.sets) + const ExpressionActionsPtr & key_expr_, + bool single_point_, + bool strict_) + : key_expr(key_expr_), prepared_sets(query_info.sets), single_point(single_point_), strict(strict_) { for (size_t i = 0, size = key_column_names.size(); i < size; ++i) { @@ -551,6 +553,18 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions( Field & out_value, DataTypePtr & out_type) { + /// We don't look for inversed key transformations when strict is true, which is required for trivial count(). + /// Consider the following test case: + /// + /// create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) order by k; + /// insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); + /// select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); + /// + /// toDate(DateTime) is always monotonic, but we cannot relaxing the predicates to be + /// >= toDate(toDateTime('2020-09-01 10:00:00')), which returns 3 instead of the right count: 2. + if (strict) + return false; + String expr_name = node->getColumnName(); const auto & sample_block = key_expr->getSampleBlock(); if (!sample_block.has(expr_name)) @@ -734,7 +748,8 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctions( arguments.push_back({ nullptr, key_column_type, "" }); auto func = func_builder->build(arguments); - if (!func || !func->hasInformationAboutMonotonicity()) + /// If we know the given range only contains one value, then we treat all functions as positive monotonic. + if (!func || (!single_point && !func->hasInformationAboutMonotonicity())) return false; key_column_type = func->getReturnType(); @@ -1163,13 +1178,16 @@ BoolMask KeyCondition::checkInRange( std::optional KeyCondition::applyMonotonicFunctionsChainToRange( Range key_range, const MonotonicFunctionsChain & functions, - DataTypePtr current_type) + DataTypePtr current_type, + bool single_point) { for (const auto & func : functions) { /// We check the monotonicity of each function on a specific range. - IFunction::Monotonicity monotonicity = func->getMonotonicityForRange( - *current_type.get(), key_range.left, key_range.right); + /// If we know the given range only contains one value, then we treat all functions as positive monotonic. + IFunction::Monotonicity monotonicity = single_point + ? IFunction::Monotonicity{true} + : func->getMonotonicityForRange(*current_type.get(), key_range.left, key_range.right); if (!monotonicity.is_monotonic) { @@ -1299,7 +1317,8 @@ BoolMask KeyCondition::checkInHyperrectangle( std::optional new_range = applyMonotonicFunctionsChainToRange( *key_range, element.monotonic_functions_chain, - data_types[element.key_column] + data_types[element.key_column], + single_point ); if (!new_range) diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index fdae7335646..04591f197bb 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -232,7 +232,9 @@ public: const SelectQueryInfo & query_info, const Context & context, const Names & key_column_names, - const ExpressionActionsPtr & key_expr); + const ExpressionActionsPtr & key_expr, + bool single_point_ = false, + bool strict_ = false); /// Whether the condition and its negation are feasible in the direct product of single column ranges specified by `hyperrectangle`. BoolMask checkInHyperrectangle( @@ -307,7 +309,8 @@ public: static std::optional applyMonotonicFunctionsChainToRange( Range key_range, const MonotonicFunctionsChain & functions, - DataTypePtr current_type); + DataTypePtr current_type, + bool single_point = false); bool matchesExactContinuousRange() const; @@ -413,6 +416,11 @@ private: ColumnIndices key_columns; ExpressionActionsPtr key_expr; PreparedSets prepared_sets; + + // If true, always allow key_expr to be wrapped by function + bool single_point; + // If true, do not use always_monotonic information to transform constants + bool strict; }; } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 55fb42b550e..e2d5daecada 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -189,6 +189,39 @@ std::optional StorageMergeTree::totalRows() const return getTotalActiveSizeInRows(); } +std::optional StorageMergeTree::totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, const Context & context) const +{ + auto metadata_snapshot = getInMemoryMetadataPtr(); + const auto & partition_key = metadata_snapshot->getPartitionKey(); + Names partition_key_columns = partition_key.column_names; + KeyCondition key_condition( + query_info, context, partition_key_columns, partition_key.expression, true /* single_point */, true /* strict */); + if (key_condition.alwaysUnknownOrTrue()) + return {}; + std::unordered_map partition_filter_map; + size_t res = 0; + auto lock = lockParts(); + for (const auto & part : getDataPartsStateRange(DataPartState::Committed)) + { + if (part->isEmpty()) + continue; + const auto & partition_id = part->info.partition_id; + bool is_valid; + if (auto it = partition_filter_map.find(partition_id); it != partition_filter_map.end()) + is_valid = it->second; + else + { + const auto & partition_value = part->partition.value; + std::vector index_value(partition_value.begin(), partition_value.end()); + is_valid = key_condition.mayBeTrueInRange(partition_value.size(), index_value.data(), index_value.data(), partition_key.data_types); + partition_filter_map.emplace(partition_id, is_valid); + } + if (is_valid) + res += part->rows_count; + } + return res; +} + std::optional StorageMergeTree::totalBytes() const { return getTotalActiveSizeInBytes(); diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index 5662f9e0088..7d092602703 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -47,6 +47,7 @@ public: unsigned num_streams) override; std::optional totalRows() const override; + std::optional totalRowsByPartitionPredicate(const SelectQueryInfo &, const Context &) const override; std::optional totalBytes() const override; BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 9613bd5111d..9bd749a32ff 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3563,6 +3563,36 @@ std::optional StorageReplicatedMergeTree::totalRows() const return res; } +std::optional StorageReplicatedMergeTree::totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, const Context & context) const +{ + auto metadata_snapshot = getInMemoryMetadataPtr(); + const auto & partition_key = metadata_snapshot->getPartitionKey(); + Names partition_key_columns = partition_key.column_names; + KeyCondition key_condition( + query_info, context, partition_key_columns, partition_key.expression, true /* single_point */, true /* strict */); + if (key_condition.alwaysUnknownOrTrue()) + return {}; + std::unordered_map partition_filter_map; + size_t res = 0; + foreachCommittedParts([&](auto & part) + { + const auto & partition_id = part->info.partition_id; + bool is_valid; + if (auto it = partition_filter_map.find(partition_id); it != partition_filter_map.end()) + is_valid = it->second; + else + { + const auto & partition_value = part->partition.value; + std::vector index_value(partition_value.begin(), partition_value.end()); + is_valid = key_condition.mayBeTrueInRange(partition_value.size(), index_value.data(), index_value.data(), partition_key.data_types); + partition_filter_map.emplace(partition_id, is_valid); + } + if (is_valid) + res += part->rows_count; + }); + return res; +} + std::optional StorageReplicatedMergeTree::totalBytes() const { UInt64 res = 0; diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index d851082d5c2..1ed2066f221 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -97,6 +97,7 @@ public: unsigned num_streams) override; std::optional totalRows() const override; + std::optional totalRowsByPartitionPredicate(const SelectQueryInfo & query_info, const Context & context) const override; std::optional totalBytes() const override; BlockOutputStreamPtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, const Context & context) override; diff --git a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh index aad54fc0a73..8150d52abc9 100755 --- a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh +++ b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh @@ -30,14 +30,14 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO composite_partition_key VALUES \ ${CLICKHOUSE_CLIENT} --query="INSERT INTO composite_partition_key VALUES \ (301, 20, 3), (302, 21, 3), (303, 22, 3)" -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a > 400 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b = 11 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE c = 4 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a > 400 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b = 11 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE c = 4 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a < 200 AND c = 2 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b < 20 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b >= 12 AND c = 2 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a < 200 AND c = 2 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b < 20 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE b >= 12 AND c = 2 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' -${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b = 21 AND c = 3 FORMAT XML" | grep -F rows_read | sed 's/^[ \t]*//g' +${CLICKHOUSE_CLIENT} --query="SELECT count() FROM composite_partition_key WHERE a = 301 AND b = 21 AND c = 3 FORMAT XML SETTINGS optimize_trivial_count_query = 0" | grep -F rows_read | sed 's/^[ \t]*//g' ${CLICKHOUSE_CLIENT} --query="DROP TABLE composite_partition_key" diff --git a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.reference b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.reference new file mode 100644 index 00000000000..4fe8dbf8cfb --- /dev/null +++ b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.reference @@ -0,0 +1,9 @@ +0 +0 +2 +1 +1 +0 +2 +0 +3 diff --git a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql new file mode 100644 index 00000000000..110e883803d --- /dev/null +++ b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql @@ -0,0 +1,45 @@ +drop table if exists test1; + +create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) order by k; +insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); + +set max_rows_to_read = 1; +-- non-optimized +select count() from test1 settings max_parallel_replicas = 3; -- { serverError 158; } +-- optimized (toYear is monotonic and we provide the partition expr as is) +select count() from test1 where toYear(toDate(p)) = 1999; +-- non-optimized (toDate(DateTime) is always monotonic, but we cannot relaxing the predicates to do trivial count()) +select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError 158; } +-- optimized (partition expr wrapped with non-monotonic functions) +select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 1; +select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 0; +-- non-optimized (some predicate depends on non-partition_expr columns) +select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError 158; } +-- optimized +select count() from test1 where toDate(p) > '2020-09-01'; + +create table test_tuple(p DateTime, i int, j int) engine MergeTree partition by (toDate(p), i) order by j; + +insert into test_tuple values ('2020-09-01 00:01:02', 1, 2), ('2020-09-01 00:01:03', 2, 3), ('2020-09-02 00:01:03', 3, 4); + +-- optimized +select count() from test_tuple where toDate(p) > '2020-09-01'; +-- optimized +select count() from test_tuple where toDate(p) > '2020-09-01' and i = 1; +-- optimized +select count() from test_tuple where i > 1; +-- optimized +select count() from test_tuple where i < 1; + +create table test_two_args(i int, j int, k int) engine MergeTree partition by i + j order by k; + +insert into test_two_args values (1, 2, 3), (2, 1, 3), (0, 3, 4); + +-- optimized +select count() from test_two_args where i + j = 3; +-- non-optimized +select count() from test_two_args where i = 1; -- { serverError 158; } + +drop table test1; +drop table test_tuple; +drop table test_two_args; From 6802db6954e1ce36a6c1eea997f47f7331c139a9 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Mon, 12 Oct 2020 09:59:35 +0300 Subject: [PATCH 083/432] Database or Table Engine descrition template upd --- docs/README.md | 2 +- ...ate-table-engine.md => template-engine.md} | 19 ++++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) rename docs/_description_templates/{template-table-engine.md => template-engine.md} (59%) diff --git a/docs/README.md b/docs/README.md index c7fa0221726..8b3066501bf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -195,7 +195,7 @@ Templates: - [Function](_description_templates/template-function.md) - [Setting](_description_templates/template-setting.md) -- [Table engine](_description_templates/template-table-engine.md) +- [Database or Table engine](_description_templates/template-engine.md) - [System table](_description_templates/template-system-table.md) diff --git a/docs/_description_templates/template-table-engine.md b/docs/_description_templates/template-engine.md similarity index 59% rename from docs/_description_templates/template-table-engine.md rename to docs/_description_templates/template-engine.md index c1bfcb3ec86..35181881134 100644 --- a/docs/_description_templates/template-table-engine.md +++ b/docs/_description_templates/template-engine.md @@ -1,8 +1,14 @@ # EngineName {#enginename} -- What the engine does. +- What the Database/Table engine does. - Relations with other engines if they exist. +## Creating a Database {#creating-a-database} +``` sql + CREATE DATABASE ... +``` +or + ## Creating a Table {#creating-a-table} ``` sql CREATE TABLE ... @@ -10,12 +16,19 @@ **Engine Parameters** -**Query Clauses** +**Query Clauses** (for Table engines only) -## Virtual columns {#virtual-columns} +## Virtual columns {#virtual-columns} (for Table engines only) List and virtual columns with description, if they exist. +## Data Types Support {#data_types-support} (for Database engines only) + +| EngineName | ClickHouse | +|-----------------------|------------------------------------| +| NativeDataTypeName | [ClickHouseDataTypeName](link#) | + + ## Specifics and recommendations {#specifics-and-recommendations} Algorithms From 20ebd4fd5bde96ff52ba2fa680d1b6a12b08191b Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Mon, 12 Oct 2020 19:37:04 +0300 Subject: [PATCH 084/432] better --- .../AggregateFunctionWelchTTest.cpp | 26 +-- .../AggregateFunctionWelchTTest.h | 205 +++++------------- .../0_stateless/01322_welch_ttest.reference | 8 +- .../queries/0_stateless/01322_welch_ttest.sql | 19 +- 4 files changed, 81 insertions(+), 177 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 3d2e98e2a0e..d9fce97680c 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -21,41 +21,25 @@ namespace DB namespace { -AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, - const DataTypes & argument_types, - const Array & parameters) +AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) { assertBinary(name, argument_types); - - // default value - Float64 significance_level = 0.1; - if (parameters.size() > 1) - { - throw Exception("Aggregate function " + name + " requires one parameter or less.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - } - - if (!parameters.empty()) - { - significance_level = applyVisitor(FieldVisitorConvertToNumber(), parameters[0]); - } + assertNoParameters(name, parameters); AggregateFunctionPtr res; if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) { - throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::NOT_IMPLEMENTED); } - else { - res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], significance_level, - argument_types, parameters)); + res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], argument_types)); } - if (!res) { - throw Exception("Aggregate function " + name + " only supports numerical types.", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::NOT_IMPLEMENTED); } return res; diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 2f56e5e6b6c..e445278e9e7 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -24,38 +25,10 @@ extern const int BAD_ARGUMENTS; namespace DB { -// hard-codded values - part of the algorithm -#define SIGN_LVL_CNT 6 - -Float64 CriticalValuesTable[SIGN_LVL_CNT][102] = { - // for significance level = 0.2 - {0.2, 3.078, 1.886, 1.638, 1.533, 1.476, 1.44, 1.415, 1.397, 1.383, 1.372, 1.363, 1.356, 1.35, 1.345, 1.341, 1.337, 1.333, 1.33, 1.328, 1.325, 1.323, 1.321, 1.319, 1.318, 1.316, 1.315, 1.314, 1.313, 1.311, 1.31, 1.309, 1.309, 1.308, 1.307, 1.306, 1.306, 1.305, 1.304, 1.304, 1.303, 1.303, 1.302, 1.302, 1.301, 1.301, 1.3, 1.3, 1.299, 1.299, 1.299, 1.298, 1.298, 1.298, 1.297, 1.297, 1.297, 1.297, 1.296, 1.296, 1.296, 1.296, 1.295, 1.295, 1.295, 1.295, 1.295, 1.294, 1.294, 1.294, 1.294, 1.294, 1.293, 1.293, 1.293, 1.293, 1.293, 1.293, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.292, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.291, 1.29, 1.29, 1.29, 1.29, 1.29, 1.282}, - - // for significance level = 0.1 - {0.1, 6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.86, 1.833, 1.812, 1.796, 1.782, 1.771, 1.761, 1.753, 1.746, 1.74, 1.734, 1.729, 1.725, 1.721, 1.717, 1.714, 1.711, 1.708, 1.706, 1.703, 1.701, 1.699, 1.697, 1.696, 1.694, 1.692, 1.691, 1.69, 1.688, 1.687, 1.686, 1.685, 1.684, 1.683, 1.682, 1.681, 1.68, 1.679, 1.679, 1.678, 1.677, 1.677, 1.676, 1.675, 1.675, 1.674, 1.674, 1.673, 1.673, 1.672, 1.672, 1.671, 1.671, 1.67, 1.67, 1.669, 1.669, 1.669, 1.668, 1.668, 1.668, 1.667, 1.667, 1.667, 1.666, 1.666, 1.666, 1.665, 1.665, 1.665, 1.665, 1.664, 1.664, 1.664, 1.664, 1.663, 1.663, 1.663, 1.663, 1.663, 1.662, 1.662, 1.662, 1.662, 1.662, 1.661, 1.661, 1.661, 1.661, 1.661, 1.661, 1.66, 1.66, 1.645}, - - // for significance level = 0.05 - {0.05, 12.706, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228, 2.201, 2.179, 2.16, 2.145, 2.131, 2.12, 2.11, 2.101, 2.093, 2.086, 2.08, 2.074, 2.069, 2.064, 2.06, 2.056, 2.052, 2.048, 2.045, 2.042, 2.04, 2.037, 2.035, 2.032, 2.03, 2.028, 2.026, 2.024, 2.023, 2.021, 2.02, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.01, 2.009, 2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.0, 2.0, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994, 1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.99, 1.99, 1.99, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987, 1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984, 1.96}, - - // for significance level = 0.02 - {0.02, 31.821, 6.965, 4.541, 3.747, 3.365, 3.143, 2.998, 2.896, 2.821, 2.764, 2.718, 2.681, 2.65, 2.624, 2.602, 2.583, 2.567, 2.552, 2.539, 2.528, 2.518, 2.508, 2.5, 2.492, 2.485, 2.479, 2.473, 2.467, 2.462, 2.457, 2.453, 2.449, 2.445, 2.441, 2.438, 2.434, 2.431, 2.429, 2.426, 2.423, 2.421, 2.418, 2.416, 2.414, 2.412, 2.41, 2.408, 2.407, 2.405, 2.403, 2.402, 2.4, 2.399, 2.397, 2.396, 2.395, 2.394, 2.392, 2.391, 2.39, 2.389, 2.388, 2.387, 2.386, 2.385, 2.384, 2.383, 2.382, 2.382, 2.381, 2.38, 2.379, 2.379, 2.378, 2.377, 2.376, 2.376, 2.375, 2.374, 2.374, 2.373, 2.373, 2.372, 2.372, 2.371, 2.37, 2.37, 2.369, 2.369, 2.368, 2.368, 2.368, 2.367, 2.367, 2.366, 2.366, 2.365, 2.365, 2.365, 2.364, 2.326}, - - // for significance level = 0.01 - {0.01, 63.657, 9.925, 5.841, 4.604, 4.032, 3.707, 3.499, 3.355, 3.25, 3.169, 3.106, 3.055, 3.012, 2.977, 2.947, 2.921, 2.898, 2.878, 2.861, 2.845, 2.831, 2.819, 2.807, 2.797, 2.787, 2.779, 2.771, 2.763, 2.756, 2.75, 2.744, 2.738, 2.733, 2.728, 2.724, 2.719, 2.715, 2.712, 2.708, 2.704, 2.701, 2.698, 2.695, 2.692, 2.69, 2.687, 2.685, 2.682, 2.68, 2.678, 2.676, 2.674, 2.672, 2.67, 2.668, 2.667, 2.665, 2.663, 2.662, 2.66, 2.659, 2.657, 2.656, 2.655, 2.654, 2.652, 2.651, 2.65, 2.649, 2.648, 2.647, 2.646, 2.645, 2.644, 2.643, 2.642, 2.641, 2.64, 2.64, 2.639, 2.638, 2.637, 2.636, 2.636, 2.635, 2.634, 2.634, 2.633, 2.632, 2.632, 2.631, 2.63, 2.63, 2.629, 2.629, 2.628, 2.627, 2.627, 2.626, 2.626, 2.576}, - - // for significance level = 0.002 - {0.002, 318.313, 22.327, 10.215, 7.173, 5.893, 5.208, 4.782, 4.499, 4.296, 4.143, 4.024, 3.929, 3.852, 3.787, 3.733, 3.686, 3.646, 3.61, 3.579, 3.552, 3.527, 3.505, 3.485, 3.467, 3.45, 3.435, 3.421, 3.408, 3.396, 3.385, 3.375, 3.365, 3.356, 3.348, 3.34, 3.333, 3.326, 3.319, 3.313, 3.307, 3.301, 3.296, 3.291, 3.286, 3.281, 3.277, 3.273, 3.269, 3.265, 3.261, 3.258, 3.255, 3.251, 3.248, 3.245, 3.242, 3.239, 3.237, 3.234, 3.232, 3.229, 3.227, 3.225, 3.223, 3.22, 3.218, 3.216, 3.214, 3.213, 3.211, 3.209, 3.207, 3.206, 3.204, 3.202, 3.201, 3.199, 3.198, 3.197, 3.195, 3.194, 3.193, 3.191, 3.19, 3.189, 3.188, 3.187, 3.185, 3.184, 3.183, 3.182, 3.181, 3.18, 3.179, 3.178, 3.177, 3.176, 3.175, 3.175, 3.174, 3.09} -}; - -// our algorithm implementation via vectors: -// https://gist.github.com/ltybc-coder/792748cfdb2f7cadef424ffb7b011c71 -// col, col, bool template -//template struct AggregateFunctionWelchTTestData final { - size_t size_x = 0; size_t size_y = 0; X sum_x = static_cast(0); @@ -65,25 +38,6 @@ struct AggregateFunctionWelchTTestData final Float64 mean_x = static_cast(0); Float64 mean_y = static_cast(0); - /* - not yet sure how to use them - void add_x(X x) - { - mean_x = (Float64)(sum_x + x) / (size_x + 1); - size_x ++; - sum_x += x; - square_sum_x += x * x; - } - - void add_y(Y y) - { - mean_y = (sum_y + y) / (size_y + 1); - size_y ++; - sum_y += y; - square_sum_y += y * y; - } - */ - void add(X x, Y y) { sum_x += x; @@ -142,100 +96,80 @@ struct AggregateFunctionWelchTTestData final return size_x; } - Float64 getSx() const + Float64 getSxSquared() const { - return static_cast(square_sum_x + size_x * mean_x * mean_x - 2 * mean_x * sum_x) / (size_x - 1); + /// The original formulae looks like \frac{1}{size_x - 1} \sum_{i = 1}^{size_x}{(x_i - \bar{x}) ^ 2} + /// But we made some mathematical transformations not to store original sequences. + /// Also we dropped sqrt, because later it will be squared later. + return static_cast(square_sum_x + size_x * std::pow(mean_x, 2) - 2 * mean_x * sum_x) / (size_x - 1); } - Float64 getSy() const + Float64 getSySquared() const { - return static_cast(square_sum_y + size_y * mean_y * mean_y - 2 * mean_y * sum_y) / (size_y - 1); + /// The original formulae looks like \frac{1}{size_y - 1} \sum_{i = 1}^{size_y}{(y_i - \bar{y}) ^ 2} + /// But we made some mathematical transformations not to store original sequences. + /// Also we dropped sqrt, because later it will be squared later. + return static_cast(square_sum_y + size_y * std::pow(mean_y, 2) - 2 * mean_y * sum_y) / (size_y - 1); } - Float64 getT(Float64 sx, Float64 sy) const + Float64 getTStatisticSquared() const { - if (sx == 0 && sy == 0) + if (size_x == 0 || size_y == 0) { - throw Exception("division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); + throw Exception("Division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); } - if (sx == -sy && size_x == size_y) - { - throw Exception("division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); - } - - return static_cast(mean_x - mean_y) / std::sqrt(sx / size_x + sy / size_y); + return std::pow(mean_x - mean_y, 2) / (getSxSquared() / size_x + getSySquared() / size_y); } - Float64 getDegreesOfFreedom(Float64 sx, Float64 sy) const + Float64 getDegreesOfFreedom() const { - return static_cast(sx / size_x + sy / size_y) * (sx / size_x + sy / size_y) / - ((sx * sx / (size_x * size_x * (size_x - 1))) + (sy * sy / (size_y * size_y * (size_y - 1)))); + auto sx = getSxSquared(); + auto sy = getSySquared(); + Float64 numerator = std::pow(sx / size_x + sy / size_y, 2); + Float64 denominator_first = std::pow(sx, 2) / (std::pow(size_x, 2) * (size_x - 1)); + Float64 denominator_second = std::pow(sy, 2) / (std::pow(size_y, 2) * (size_y - 1)); + return numerator / (denominator_first + denominator_second); } - UInt8 getResult(Float64 t, Float64 dof, Float64 parametr) const + static Float64 integrateSimpson(Float64 a, Float64 b, std::function func, size_t iterations = 1e6) { - //find our table - int table = 0; - for (int i = 0; i < SIGN_LVL_CNT; ++i) - { - if (CriticalValuesTable[i][0] == parametr) - { - table = i; - } - } + double h = (b - a) / iterations; + Float64 sum_odds = 0.0; + for (size_t i = 1; i < iterations; i += 2) + sum_odds += func(a + i * h); + Float64 sum_evens = 0.0; + for (size_t i = 2; i < iterations; i += 2) + sum_evens += func(a + i * h); + return (func(a) + func(b) + 2 * sum_evens + 4 * sum_odds) * h / 3; + } - //round or make infinity dof - int i_dof = static_cast(dof); + Float64 getPValue() const + { + const Float64 v = getDegreesOfFreedom(); + const Float64 t = getTStatisticSquared(); + auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; + Float64 numenator = integrateSimpson(0, v / (t + v), f); + Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); + return numenator / denominator; + } - if (i_dof > 100) - { - i_dof = 101; - } - - if (i_dof < 1) - { - i_dof = 1; - } - - //check if abs of t is greater than table[dof] - t = abs(t); - if (t >= CriticalValuesTable[table][i_dof]) - { - return static_cast(0); - //in this case we reject the null hypothesis - } - else - { - return static_cast(1); - } + Float64 getResult() const + { + return getPValue(); } }; +/// Returns p-value template -class AggregateFunctionWelchTTest : public - IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest - > +class AggregateFunctionWelchTTest : + public IAggregateFunctionDataHelper,AggregateFunctionWelchTTest> { -private: - Float64 significance_level; - public: - AggregateFunctionWelchTTest( - Float64 sglvl_, - const DataTypes & arguments, - const Array & params - ): - IAggregateFunctionDataHelper< - AggregateFunctionWelchTTestData, - AggregateFunctionWelchTTest - > ({arguments}, params), significance_level(sglvl_) - { - // notice: arguments has been in factory - } + AggregateFunctionWelchTTest(const DataTypes & arguments) + : IAggregateFunctionDataHelper, AggregateFunctionWelchTTest> ({arguments}, {}) + {} String getName() const override { @@ -244,15 +178,10 @@ public: DataTypePtr getReturnType() const override { - return std::make_shared>(); + return std::make_shared>(); } - void add( - AggregateDataPtr place, - const IColumn ** columns, - size_t row_num, - Arena * - ) const override + void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override { auto col_x = assert_cast *>(columns[0]); auto col_y = assert_cast *>(columns[1]); @@ -263,34 +192,22 @@ public: this->data(place).add(x, y); } - void merge( - AggregateDataPtr place, - ConstAggregateDataPtr rhs, Arena * - ) const override + void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override { this->data(place).merge(this->data(rhs)); } - void serialize( - ConstAggregateDataPtr place, - WriteBuffer & buf - ) const override + void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override { this->data(place).serialize(buf); } - void deserialize( - AggregateDataPtr place, - ReadBuffer & buf, Arena * - ) const override + void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override { this->data(place).deserialize(buf); } - void insertResultInto( - AggregateDataPtr place, - IColumn & to - ) const override + void insertResultInto(AggregateDataPtr place, IColumn & to, Arena * /*arena*/) const override { size_t size_x = this->data(place).getSizeX(); size_t size_y = this->data(place).getSizeY(); @@ -300,14 +217,8 @@ public: throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } - Float64 sx = this->data(place).getSx(); - Float64 sy = this->data(place).getSy(); - Float64 t_value = this->data(place).getT(sx, sy); - Float64 dof = this->data(place).getDegreesOfFreedom(sx, sy); - UInt8 result = this->data(place).getResult(t_value, dof, significance_level); - - auto & column = static_cast &>(to); - column.getData().push_back(result); + auto & column = static_cast &>(to); + column.getData().push_back(this->data(place).getResult()); } }; diff --git a/tests/queries/0_stateless/01322_welch_ttest.reference b/tests/queries/0_stateless/01322_welch_ttest.reference index aa47d0d46d4..015dd503b7e 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.reference +++ b/tests/queries/0_stateless/01322_welch_ttest.reference @@ -1,2 +1,6 @@ -0 -0 +0.021378001462867 +0.021378 +0.090773324285671 +0.09077332 +0.00339907162713746 +0.00339907 diff --git a/tests/queries/0_stateless/01322_welch_ttest.sql b/tests/queries/0_stateless/01322_welch_ttest.sql index b8e881a069b..073e71f69fe 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.sql +++ b/tests/queries/0_stateless/01322_welch_ttest.sql @@ -1,13 +1,18 @@ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; - -INSERT INTO welch_ttest VALUES (2224.779, 2465.0984), (2588.11, 1909.0328), (1979.625, 1175.8747), (2137.442, 2171.378), (2565.818, 2193.2821), (1754.023, 2854.9475), (1654.947, 2060.1777), (1789.256, 2258.2366), (2320.659, 1856.0535), (2039.532, 1501.8126), (1983.497, 2987.6542), (2232.903, 1681.9778), (2513.93, 2479.6776), (2066.382, 1259.8584), (2492.715, 1120.9043), (1988.287, 1982.1213), (1840.036, 3012.3949), (2249.749, 2252.373), (1766.982, 2591.3122), (1724.84, 1940.589), (0, 1995.185), (0, 2535.1344), (0, 597.3155), (0, 2343.2192), (0, 3154.84), (0, 1125.1966), (0, 1227.8842), (0, 1692.805), (0, 2539.6772), (0, 1936.1927), (0, 1783.7795), (0, 1703.4384), (0, 2077.194), (0, 1614.4071), (0, 2360.0365), (0, 1619.2781), (0, 2033.5109), (0, 2333.7834), (0, 2144.0485), (0, 2583.8709), (0, 1116.7213), (0, 1601.9383), (0, 1570.0431), (0, 1963.0777), (0, 1639.2533), (0, 2277.5223), (0, 1991.9286), (0, 2044.3338), (0, 1794.4781), (0, 1597.9119) -SELECT WelchTTest(0.1)(left, right) from welch_ttest; - +INSERT INTO welch_ttest VALUES (27.5,27.1), (21.0,22.0), (19.0,20.8), (23.6,23.4), (17.0,23.4), (17.9,23.5), (16.9,25.8), (20.1,22.0), (21.9,24.8), (22.6,20.2), (23.1,21.9), (19.6,22.1), (19.0,22.9), (21.7,20.5), (21.4,24.4); +SELECT '0.021378001462867'; +SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; + CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (30.02,29.89), (29.99,29.93), (30.11,29.72), (29.97,29.98), (30.01,30.02), (29.99,29.98); +SELECT '0.090773324285671'; +SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; -INSERT INTO welch_ttest VALUES (2224.779, 2465.0984), (2588.11, 1909.0328), (1979.625, 1175.8747), (2137.442, 2171.378), (2565.818, 2193.2821), (1754.023, 2854.9475), (1654.947, 2060.1777), (1789.256, 2258.2366), (2320.659, 1856.0535), (2039.532, 1501.8126), (1983.497, 2987.6542), (2232.903, 1681.9778), (2513.93, 2479.6776), (2066.382, 1259.8584), (2492.715, 1120.9043), (1988.287, 1982.1213), (1840.036, 3012.3949), (2249.749, 2252.373), (1766.982, 2591.3122), (1724.84, 1940.589), (0, 1995.185), (0, 2535.1344), (0, 597.3155), (0, 2343.2192), (0, 3154.84), (0, 1125.1966), (0, 1227.8842), (0, 1692.805), (0, 2539.6772), (0, 1936.1927), (0, 1783.7795), (0, 1703.4384), (0, 2077.194), (0, 1614.4071), (0, 2360.0365), (0, 1619.2781), (0, 2033.5109), (0, 2333.7834), (0, 2144.0485), (0, 2583.8709), (0, 1116.7213), (0, 1601.9383), (0, 1570.0431), (0, 1963.0777), (0, 1639.2533), (0, 2277.5223), (0, 1991.9286), (0, 2044.3338), (0, 1794.4781), (0, 1597.9119) -SELECT WelchTTest(0.02)(left, right) from welch_ttest; - +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (0.010268,0.159258), (0.000167,0.136278), (0.000167,0.122389); +SELECT '0.00339907162713746'; +SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; \ No newline at end of file From e65a2a1cbd6d784e8a0307f9d84e0dde9c241e16 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Mon, 12 Oct 2020 21:10:01 +0300 Subject: [PATCH 085/432] add student t test --- .../AggregateFunctionStudentTTest.cpp | 56 +++++ .../AggregateFunctionStudentTTest.h | 230 ++++++++++++++++++ .../registerAggregateFunctions.cpp | 1 + .../registerAggregateFunctions.h | 1 + 4 files changed, 288 insertions(+) create mode 100644 src/AggregateFunctions/AggregateFunctionStudentTTest.cpp create mode 100644 src/AggregateFunctions/AggregateFunctionStudentTTest.h diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp new file mode 100644 index 00000000000..b6f32409946 --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include "registerAggregateFunctions.h" + +#include +#include + + +// the return type is boolean (we use UInt8 as we do not have boolean in clickhouse) + +namespace ErrorCodes +{ +extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +extern const int NOT_IMPLEMENTED; +} + +namespace DB +{ + +namespace +{ + +AggregateFunctionPtr createAggregateFunctionStudentTTest(const std::string & name, const DataTypes & argument_types, const Array & parameters) +{ + assertBinary(name, argument_types); + assertNoParameters(name, parameters); + + AggregateFunctionPtr res; + + if (isDecimal(argument_types[0]) || isDecimal(argument_types[1])) + { + throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::NOT_IMPLEMENTED); + } + else + { + res.reset(createWithTwoNumericTypes(*argument_types[0], *argument_types[1], argument_types)); + } + + if (!res) + { + throw Exception("Aggregate function " + name + " only supports numerical types", ErrorCodes::NOT_IMPLEMENTED); + } + + return res; +} + +} + + +void registerAggregateFunctionStudentTTest(AggregateFunctionFactory & factory) +{ + factory.registerFunction("StudentTTest", createAggregateFunctionStudentTTest, AggregateFunctionFactory::CaseInsensitive); +} + +} diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h new file mode 100644 index 00000000000..b03f9178709 --- /dev/null +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -0,0 +1,230 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace ErrorCodes +{ +extern const int BAD_ARGUMENTS; +} + +namespace DB +{ + +template +struct AggregateFunctionStudentTTestData final +{ + size_t size_x = 0; + size_t size_y = 0; + X sum_x = static_cast(0); + Y sum_y = static_cast(0); + X square_sum_x = static_cast(0); + Y square_sum_y = static_cast(0); + Float64 mean_x = static_cast(0); + Float64 mean_y = static_cast(0); + + void add(X x, Y y) + { + sum_x += x; + sum_y += y; + size_x++; + size_y++; + mean_x = static_cast(sum_x) / size_x; + mean_y = static_cast(sum_y) / size_y; + square_sum_x += x * x; + square_sum_y += y * y; + } + + void merge(const AggregateFunctionStudentTTestData &other) + { + sum_x += other.sum_x; + sum_y += other.sum_y; + size_x += other.size_x; + size_y += other.size_y; + mean_x = static_cast(sum_x) / size_x; + mean_y = static_cast(sum_y) / size_y; + square_sum_x += other.square_sum_x; + square_sum_y += other.square_sum_y; + } + + void serialize(WriteBuffer &buf) const + { + writeBinary(mean_x, buf); + writeBinary(mean_y, buf); + writeBinary(sum_x, buf); + writeBinary(sum_y, buf); + writeBinary(square_sum_x, buf); + writeBinary(square_sum_y, buf); + writeBinary(size_x, buf); + writeBinary(size_y, buf); + } + + void deserialize(ReadBuffer &buf) + { + readBinary(mean_x, buf); + readBinary(mean_y, buf); + readBinary(sum_x, buf); + readBinary(sum_y, buf); + readBinary(square_sum_x, buf); + readBinary(square_sum_y, buf); + readBinary(size_x, buf); + readBinary(size_y, buf); + } + + size_t getSizeY() const + { + return size_y; + } + + size_t getSizeX() const + { + return size_x; + } + + Float64 getSSquared() const + { + /// TODO: Update comment with Tex. + /// The original formulae looks like ... + /// But we made some mathematical transformations not to store original sequences. + /// Also we dropped sqrt, because later it will be squared later. + const Float64 all_x = square_sum_x + size_x * std::pow(mean_x, 2) - 2 * mean_x * sum_x; + const Float64 all_y = square_sum_y + size_y * std::pow(mean_y, 2) - 2 * mean_y * sum_y; + return static_cast(all_x + all_y) / (size_x + size_y - 2); + } + + + Float64 getTStatisticSquared() const + { + if (size_x == 0 || size_y == 0) + { + throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); + } + + if (mean_x - mean_y < 1e-8) + { + return static_cast(0.0); + } + + return std::pow(mean_x - mean_y, 2) / getStandartErrorSquared(); + } + + + Float64 getStandartErrorSquared() const + { + return getSSquared() * (1 / size_x + 1 / size_y); + } + + Float64 getDegreesOfFreedom() const + { + return static_cast(size_x + size_y - 2); + } + + static Float64 integrateSimpson(Float64 a, Float64 b, std::function func, size_t iterations = 1e6) + { + double h = (b - a) / iterations; + Float64 sum_odds = 0.0; + for (size_t i = 1; i < iterations; i += 2) + sum_odds += func(a + i * h); + Float64 sum_evens = 0.0; + for (size_t i = 2; i < iterations; i += 2) + sum_evens += func(a + i * h); + return (func(a) + func(b) + 2 * sum_evens + 4 * sum_odds) * h / 3; + } + + Float64 getPValue() const + { + const Float64 v = getDegreesOfFreedom(); + const Float64 t = getTStatisticSquared(); + std::cout << "getDegreesOfFreedom " << v << " getTStatisticSquared " << t << std::endl; + auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; + Float64 numenator = integrateSimpson(0, v / (t + v), f); + Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); + return numenator / denominator; + } + + Float64 getResult() const + { + return getPValue(); + } +}; + +/// Returns p-value +/// https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/9/1193/files/2016/01/05b-TandP.pdf +template +class AggregateFunctionStudentTTest : + public IAggregateFunctionDataHelper,AggregateFunctionStudentTTest> +{ + +public: + AggregateFunctionStudentTTest(const DataTypes & arguments) + : IAggregateFunctionDataHelper, AggregateFunctionStudentTTest> ({arguments}, {}) + {} + + String getName() const override + { + return "StudentTTest"; + } + + DataTypePtr getReturnType() const override + { + return std::make_shared>(); + } + + void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override + { + auto col_x = assert_cast *>(columns[0]); + auto col_y = assert_cast *>(columns[1]); + + X x = col_x->getData()[row_num]; + Y y = col_y->getData()[row_num]; + + this->data(place).add(x, y); + } + + void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override + { + this->data(place).merge(this->data(rhs)); + } + + void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override + { + this->data(place).serialize(buf); + } + + void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override + { + this->data(place).deserialize(buf); + } + + void insertResultInto(AggregateDataPtr place, IColumn & to, Arena * /*arena*/) const override + { + size_t size_x = this->data(place).getSizeX(); + size_t size_y = this->data(place).getSizeY(); + + if (size_x < 2 || size_y < 2) + { + throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); + } + + auto & column = static_cast &>(to); + column.getData().push_back(this->data(place).getResult()); + } + +}; + +}; diff --git a/src/AggregateFunctions/registerAggregateFunctions.cpp b/src/AggregateFunctions/registerAggregateFunctions.cpp index fd914443ba2..9fd02ba9d6c 100644 --- a/src/AggregateFunctions/registerAggregateFunctions.cpp +++ b/src/AggregateFunctions/registerAggregateFunctions.cpp @@ -46,6 +46,7 @@ void registerAggregateFunctions() registerAggregateFunctionCategoricalIV(factory); registerAggregateFunctionAggThrow(factory); registerAggregateFunctionWelchTTest(factory); + registerAggregateFunctionStudentTTest(factory); registerAggregateFunctionRankCorrelation(factory); } diff --git a/src/AggregateFunctions/registerAggregateFunctions.h b/src/AggregateFunctions/registerAggregateFunctions.h index de239258fa0..abbba56ed32 100644 --- a/src/AggregateFunctions/registerAggregateFunctions.h +++ b/src/AggregateFunctions/registerAggregateFunctions.h @@ -36,6 +36,7 @@ void registerAggregateFunctionMoving(AggregateFunctionFactory &); void registerAggregateFunctionCategoricalIV(AggregateFunctionFactory &); void registerAggregateFunctionAggThrow(AggregateFunctionFactory &); void registerAggregateFunctionWelchTTest(AggregateFunctionFactory &); +void registerAggregateFunctionStudentTTest(AggregateFunctionFactory &); void registerAggregateFunctionRankCorrelation(AggregateFunctionFactory &); class AggregateFunctionCombinatorFactory; From dbaada559782898e6a992b048e9f6fc58df22f2a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 19 Sep 2020 15:15:47 +0300 Subject: [PATCH 086/432] Another test (cherry picked from commit da87861285e63369bd79e176ce375a8d6ea18b85) --- .../01502_log_tinylog_deadlock_race.reference | 6 ++ .../01502_log_tinylog_deadlock_race.sh | 85 +++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference create mode 100755 tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference new file mode 100644 index 00000000000..4bf85ae79f3 --- /dev/null +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference @@ -0,0 +1,6 @@ +Testing TinyLog +Done TinyLog +Testing StripeLog +Done StripeLog +Testing Log +Done Log diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh new file mode 100755 index 00000000000..a5b2ff6db8f --- /dev/null +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +set -e + +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + + +function thread_create { + while true; do + $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" + sleep 0.0$RANDOM + done +} + +function thread_drop { + while true; do + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" + sleep 0.0$RANDOM + done +} + +function thread_rename { + while true; do + $CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)' + sleep 0.0$RANDOM + done +} + +function thread_select { + while true; do + $CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM + done +} + +function thread_insert { + while true; do + $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM + done +} + +function thread_insert_select { + while true; do + $CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)' + sleep 0.0$RANDOM + done +} + +export -f thread_create +export -f thread_drop +export -f thread_rename +export -f thread_select +export -f thread_insert +export -f thread_insert_select + + +# Do randomized queries and expect nothing extraordinary happens. + +function test_with_engine { + echo "Testing $1" + + timeout 10 bash -c "thread_create t1 $1" & + timeout 10 bash -c "thread_create t2 $1" & + timeout 10 bash -c 'thread_drop t1' & + timeout 10 bash -c 'thread_drop t2' & + timeout 10 bash -c 'thread_rename t1 t2' & + timeout 10 bash -c 'thread_rename t2 t1' & + timeout 10 bash -c 'thread_select t1' & + timeout 10 bash -c 'thread_select t2' & + timeout 10 bash -c 'thread_insert t1 5' & + timeout 10 bash -c 'thread_insert t2 10' & + timeout 10 bash -c 'thread_insert_select t1 t2' & + timeout 10 bash -c 'thread_insert_select t2 t1' & + + wait + echo "Done $1" +} + +test_with_engine TinyLog +test_with_engine StripeLog +test_with_engine Log From b3fc6d9b3385a47cc9553c32f1181797b64486e2 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 13 Oct 2020 16:31:02 +0300 Subject: [PATCH 087/432] fix IF [NOT] EXISTS failure --- src/Interpreters/InterpreterDropQuery.cpp | 24 ++++++++----------- src/Interpreters/InterpreterDropQuery.h | 2 +- .../01502_log_tinylog_deadlock_race.reference | 8 ++----- .../01502_log_tinylog_deadlock_race.sh | 7 +++--- 4 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index c70431e5238..a9bc738f614 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -47,7 +47,7 @@ BlockIO InterpreterDropQuery::execute() if (!drop.table.empty()) { if (!drop.is_dictionary) - return executeToTable({drop.database, drop.table, drop.uuid}, drop); + return executeToTable(drop); else return executeToDictionary(drop.database, drop.table, drop.kind, drop.if_exists, drop.temporary, drop.no_ddl_lock); } @@ -58,29 +58,25 @@ BlockIO InterpreterDropQuery::execute() } -BlockIO InterpreterDropQuery::executeToTable( - const StorageID & table_id_, - const ASTDropQuery & query) +BlockIO InterpreterDropQuery::executeToTable(const ASTDropQuery & query) { - if (query.temporary || table_id_.database_name.empty()) + auto table_id = StorageID(query); + if (query.temporary || table_id.database_name.empty()) { - if (context.tryResolveStorageID(table_id_, Context::ResolveExternal)) - return executeToTemporaryTable(table_id_.getTableName(), query.kind); + if (context.tryResolveStorageID(table_id, Context::ResolveExternal)) + return executeToTemporaryTable(table_id.getTableName(), query.kind); + else + table_id.database_name = context.getCurrentDatabase(); } if (query.temporary) { if (query.if_exists) return {}; - throw Exception("Temporary table " + backQuoteIfNeed(table_id_.table_name) + " doesn't exist", + throw Exception("Temporary table " + backQuoteIfNeed(table_id.table_name) + " doesn't exist", ErrorCodes::UNKNOWN_TABLE); } - auto table_id = query.if_exists ? context.tryResolveStorageID(table_id_, Context::ResolveOrdinary) - : context.resolveStorageID(table_id_, Context::ResolveOrdinary); - if (!table_id) - return {}; - auto ddl_guard = (!query.no_ddl_lock ? DatabaseCatalog::instance().getDDLGuard(table_id.database_name, table_id.table_name) : nullptr); /// If table was already dropped by anyone, an exception will be thrown @@ -255,7 +251,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const String & database_name, AS for (auto iterator = database->getTablesIterator(context); iterator->isValid(); iterator->next()) { query.table = iterator->name(); - executeToTable({query.database, query.table}, query); + executeToTable(query); } } diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index b54736b5c21..c5d9aacdfd5 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -31,7 +31,7 @@ private: BlockIO executeToDatabase(const String & database_name, ASTDropQuery::Kind kind, bool if_exists); - BlockIO executeToTable(const StorageID & table_id, const ASTDropQuery & query); + BlockIO executeToTable(const ASTDropQuery & query); BlockIO executeToDictionary(const String & database_name, const String & dictionary_name, ASTDropQuery::Kind kind, bool if_exists, bool is_temporary, bool no_ddl_lock); diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference index 4bf85ae79f3..c62a2b18918 100644 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.reference @@ -1,6 +1,2 @@ -Testing TinyLog -Done TinyLog -Testing StripeLog -Done StripeLog -Testing Log -Done Log +Testing Memory +Done Memory diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh index a5b2ff6db8f..f0b5f0a3568 100755 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh @@ -80,6 +80,7 @@ function test_with_engine { echo "Done $1" } -test_with_engine TinyLog -test_with_engine StripeLog -test_with_engine Log +#test_with_engine TinyLog +#test_with_engine StripeLog +#test_with_engine Log +test_with_engine Memory From cb8d132cca22da7531f345371f75ad6d4e793d61 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 13 Oct 2020 18:00:36 +0300 Subject: [PATCH 088/432] fix deadlock with DDLGuard --- src/Interpreters/DatabaseCatalog.cpp | 27 +++++++++--- src/Interpreters/DatabaseCatalog.h | 4 +- src/Interpreters/InterpreterDropQuery.cpp | 1 + .../0_stateless/01150_ddl_guard_rwr.reference | 0 .../0_stateless/01150_ddl_guard_rwr.sh | 43 +++++++++++++++++++ 5 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/01150_ddl_guard_rwr.reference create mode 100755 tests/queries/0_stateless/01150_ddl_guard_rwr.sh diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 203e2292c08..03f0e057821 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -530,7 +530,7 @@ std::unique_ptr DatabaseCatalog::getDDLGuard(const String & database, std::unique_lock lock(ddl_guards_mutex); auto db_guard_iter = ddl_guards.try_emplace(database).first; DatabaseGuard & db_guard = db_guard_iter->second; - return std::make_unique(db_guard.first, db_guard.second, std::move(lock), table); + return std::make_unique(db_guard.first, db_guard.second, std::move(lock), table, database); } std::unique_lock DatabaseCatalog::getExclusiveDDLGuardForDatabase(const String & database) @@ -832,7 +832,7 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) } -DDLGuard::DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_lock guards_lock_, const String & elem) +DDLGuard::DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_lock guards_lock_, const String & elem, const String & database_name) : map(map_), db_mutex(db_mutex_), guards_lock(std::move(guards_lock_)) { it = map.emplace(elem, Entry{std::make_unique(), 0}).first; @@ -841,14 +841,19 @@ DDLGuard::DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_locksecond.mutex); bool is_database = elem.empty(); if (!is_database) - db_mutex.lock_shared(); + { + + bool locked_database_for_read = db_mutex.try_lock_shared(); + if (!locked_database_for_read) + { + removeTableLock(); + throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database {} is currently dropped or renamed", database_name); + } + } } -DDLGuard::~DDLGuard() +void DDLGuard::removeTableLock() { - bool is_database = it->first.empty(); - if (!is_database) - db_mutex.unlock_shared(); guards_lock.lock(); --it->second.counter; if (!it->second.counter) @@ -858,4 +863,12 @@ DDLGuard::~DDLGuard() } } +DDLGuard::~DDLGuard() +{ + bool is_database = it->first.empty(); + if (!is_database) + db_mutex.unlock_shared(); + removeTableLock(); +} + } diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 7bc6923bde4..c6f50117564 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -51,7 +51,7 @@ public: /// NOTE: using std::map here (and not std::unordered_map) to avoid iterator invalidation on insertion. using Map = std::map; - DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_lock guards_lock_, const String & elem); + DDLGuard(Map & map_, std::shared_mutex & db_mutex_, std::unique_lock guards_lock_, const String & elem, const String & database_name); ~DDLGuard(); private: @@ -60,6 +60,8 @@ private: Map::iterator it; std::unique_lock guards_lock; std::unique_lock table_lock; + + void removeTableLock(); }; diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index a9bc738f614..b29f2893db9 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -247,6 +247,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const String & database_name, AS ASTDropQuery query; query.kind = kind; + query.if_exists = true; query.database = database_name; for (auto iterator = database->getTablesIterator(context); iterator->isValid(); iterator->next()) { diff --git a/tests/queries/0_stateless/01150_ddl_guard_rwr.reference b/tests/queries/0_stateless/01150_ddl_guard_rwr.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01150_ddl_guard_rwr.sh b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh new file mode 100755 index 00000000000..c14e4c38f54 --- /dev/null +++ b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS test_01150" +$CLICKHOUSE_CLIENT --query "CREATE DATABASE test_01150" + +$CLICKHOUSE_CLIENT --query "CREATE TABLE test_01150.t1 (x UInt64, s Array(Nullable(String))) ENGINE = Memory" +$CLICKHOUSE_CLIENT --query "CREATE TABLE test_01150.t2 (x UInt64, s Array(Nullable(String))) ENGINE = Memory" + +function thread_detach_attach { + while true; do + $CLICKHOUSE_CLIENT --query "DETACH DATABASE test_01150" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (219)' + sleep 0.0$RANDOM + $CLICKHOUSE_CLIENT --query "ATTACH DATABASE test_01150" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (82)' + sleep 0.0$RANDOM + done +} + +function thread_rename { + while true; do + $CLICKHOUSE_CLIENT --query "RENAME TABLE test_01150.t1 TO test_01150.t2_tmp, test_01150.t2 TO test_01150.t1, test_01150.t2_tmp TO test_01150.t2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (81|60|57|521)' + sleep 0.0$RANDOM + $CLICKHOUSE_CLIENT --query "RENAME TABLE test_01150.t2 TO test_01150.t1, test_01150.t2_tmp TO test_01150.t2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (81|60|57|521)' + sleep 0.0$RANDOM + $CLICKHOUSE_CLIENT --query "RENAME TABLE test_01150.t2_tmp TO test_01150.t2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (81|60|57|521)' + sleep 0.0$RANDOM + done +} + +export -f thread_detach_attach +export -f thread_rename + +timeout 20 bash -c "thread_detach_attach" & +timeout 20 bash -c 'thread_rename' & +wait +sleep 1 + +$CLICKHOUSE_CLIENT --query "ATTACH DATABASE IF NOT EXISTS test_01150" +$CLICKHOUSE_CLIENT --query "DROP DATABASE test_01150"; From 8a64b65e51713d429bd744ca7288ef858041a2bd Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 13 Oct 2020 20:45:59 +0300 Subject: [PATCH 089/432] fix --- src/Interpreters/InterpreterDropQuery.cpp | 2 ++ .../0_stateless/01516_drop_table_stress.sh | 18 +++++++----------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index b29f2893db9..a250ab1afd4 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -88,6 +88,8 @@ BlockIO InterpreterDropQuery::executeToTable(const ASTDropQuery & query) if (query_ptr->as().is_view && !table->isView()) throw Exception("Table " + table_id.getNameForLogs() + " is not a View", ErrorCodes::LOGICAL_ERROR); + table_id = table->getStorageID(); + if (query.kind == ASTDropQuery::Kind::Detach) { context.checkAccess(table->isView() ? AccessType::DROP_VIEW : AccessType::DROP_TABLE, table_id); diff --git a/tests/queries/0_stateless/01516_drop_table_stress.sh b/tests/queries/0_stateless/01516_drop_table_stress.sh index 3d6218c4549..3e2fd613a36 100755 --- a/tests/queries/0_stateless/01516_drop_table_stress.sh +++ b/tests/queries/0_stateless/01516_drop_table_stress.sh @@ -12,21 +12,17 @@ function drop_database() function drop_table() { - ${CLICKHOUSE_CLIENT} -nm <&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" + ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS db_01516.data1;" 2>&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" + ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS db_01516.data2;" 2>&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" } function create() { - ${CLICKHOUSE_CLIENT} -nm <&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" + ${CLICKHOUSE_CLIENT} -q "CREATE TABLE IF NOT EXISTS db_01516.data2 Engine=MergeTree() ORDER BY number AS SELECT * FROM numbers(1);" 2>&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" + ${CLICKHOUSE_CLIENT} -q "CREATE TABLE IF NOT EXISTS db_01516.data3 Engine=MergeTree() ORDER BY number AS SELECT * FROM numbers(1);" 2>&1 | grep -F "Code: " | grep -Fv "is currently dropped or renamed" } for _ in {1..100}; do From 744013d4b8e1624c528697b594982d3b696345d6 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Tue, 13 Oct 2020 21:46:15 +0300 Subject: [PATCH 090/432] test in comparison with scipy --- .../AggregateFunctionStudentTTest.h | 56 ++++++++++++++----- .../AggregateFunctionWelchTTest.h | 43 ++++++++++++-- src/AggregateFunctions/ya.make | 4 +- .../0_stateless/01322_student_ttest.reference | 4 ++ .../0_stateless/01322_student_ttest.sql | 19 +++++++ .../0_stateless/01322_welch_ttest.reference | 10 +++- .../queries/0_stateless/01322_welch_ttest.sql | 27 +++++++-- 7 files changed, 137 insertions(+), 26 deletions(-) create mode 100644 tests/queries/0_stateless/01322_student_ttest.reference create mode 100644 tests/queries/0_stateless/01322_student_ttest.sql diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index b03f9178709..ac05a11d334 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -115,18 +115,22 @@ struct AggregateFunctionStudentTTestData final throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); } - if (mean_x - mean_y < 1e-8) - { - return static_cast(0.0); - } - return std::pow(mean_x - mean_y, 2) / getStandartErrorSquared(); } + Float64 getTStatistic() const + { + if (size_x == 0 || size_y == 0) + { + throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); + } + + return (mean_x - mean_y) / std::sqrt(getStandartErrorSquared()); + } Float64 getStandartErrorSquared() const { - return getSSquared() * (1 / size_x + 1 / size_y); + return getSSquared() * (1.0 / static_cast(size_x) + 1.0 / static_cast(size_y)); } Float64 getDegreesOfFreedom() const @@ -150,20 +154,23 @@ struct AggregateFunctionStudentTTestData final { const Float64 v = getDegreesOfFreedom(); const Float64 t = getTStatisticSquared(); - std::cout << "getDegreesOfFreedom " << v << " getTStatisticSquared " << t << std::endl; + std::cout << "getDegreesOfFreedom() " << getDegreesOfFreedom() << std::endl; + std::cout << "getTStatisticSquared() " << getTStatisticSquared() << std::endl; auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; Float64 numenator = integrateSimpson(0, v / (t + v), f); Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); + std::cout << "numenator " << numenator << std::endl; + std::cout << "denominator " << denominator << std::endl; return numenator / denominator; } - Float64 getResult() const + std::pair getResult() const { - return getPValue(); + return std::make_pair(getTStatistic(), getPValue()); } }; -/// Returns p-value +/// Returns tuple of (t-statistic, p-value) /// https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/9/1193/files/2016/01/05b-TandP.pdf template class AggregateFunctionStudentTTest : @@ -182,7 +189,22 @@ public: DataTypePtr getReturnType() const override { - return std::make_shared>(); + DataTypes types + { + std::make_shared>(), + std::make_shared>(), + }; + + Strings names + { + "t-statistic", + "p-value" + }; + + return std::make_shared( + std::move(types), + std::move(names) + ); } void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override @@ -221,8 +243,16 @@ public: throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } - auto & column = static_cast &>(to); - column.getData().push_back(this->data(place).getResult()); + Float64 t_statistic = 0.0; + Float64 p_value = 0.0; + std::tie(t_statistic, p_value) = this->data(place).getResult(); + + auto & column_tuple = assert_cast(to); + auto & column_stat = assert_cast &>(column_tuple.getColumn(0)); + auto & column_value = assert_cast &>(column_tuple.getColumn(1)); + + column_stat.getData().push_back(t_statistic); + column_value.getData().push_back(p_value); } }; diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index e445278e9e7..36641b826b1 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -122,6 +122,16 @@ struct AggregateFunctionWelchTTestData final return std::pow(mean_x - mean_y, 2) / (getSxSquared() / size_x + getSySquared() / size_y); } + Float64 getTStatistic() const + { + if (size_x == 0 || size_y == 0) + { + throw Exception("Division by zero encountered in Aggregate function WelchTTest", ErrorCodes::BAD_ARGUMENTS); + } + + return (mean_x - mean_y) / std::sqrt(getSxSquared() / size_x + getSySquared() / size_y); + } + Float64 getDegreesOfFreedom() const { auto sx = getSxSquared(); @@ -154,9 +164,9 @@ struct AggregateFunctionWelchTTestData final return numenator / denominator; } - Float64 getResult() const + std::pair getResult() const { - return getPValue(); + return std::make_pair(getTStatistic(), getPValue()); } }; @@ -178,7 +188,22 @@ public: DataTypePtr getReturnType() const override { - return std::make_shared>(); + DataTypes types + { + std::make_shared>(), + std::make_shared>(), + }; + + Strings names + { + "t-statistic", + "p-value" + }; + + return std::make_shared( + std::move(types), + std::move(names) + ); } void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override @@ -217,8 +242,16 @@ public: throw Exception("Aggregate function " + getName() + " requires samples to be of size > 1", ErrorCodes::BAD_ARGUMENTS); } - auto & column = static_cast &>(to); - column.getData().push_back(this->data(place).getResult()); + Float64 t_statistic = 0.0; + Float64 p_value = 0.0; + std::tie(t_statistic, p_value) = this->data(place).getResult(); + + auto & column_tuple = assert_cast(to); + auto & column_stat = assert_cast &>(column_tuple.getColumn(0)); + auto & column_value = assert_cast &>(column_tuple.getColumn(1)); + + column_stat.getData().push_back(t_statistic); + column_value.getData().push_back(p_value); } }; diff --git a/src/AggregateFunctions/ya.make b/src/AggregateFunctions/ya.make index f5a869b2f78..1578e0c80ea 100644 --- a/src/AggregateFunctions/ya.make +++ b/src/AggregateFunctions/ya.make @@ -42,6 +42,7 @@ SRCS( AggregateFunctionState.cpp AggregateFunctionStatistics.cpp AggregateFunctionStatisticsSimple.cpp + AggregateFunctionStudentTTest.cpp AggregateFunctionSum.cpp AggregateFunctionSumMap.cpp AggregateFunctionTimeSeriesGroupSum.cpp @@ -49,12 +50,13 @@ SRCS( AggregateFunctionUniqCombined.cpp AggregateFunctionUniq.cpp AggregateFunctionUniqUpTo.cpp + AggregateFunctionWelchTTest.cpp AggregateFunctionWindowFunnel.cpp parseAggregateFunctionParameters.cpp registerAggregateFunctions.cpp UniqCombinedBiasData.cpp UniqVariadicHash.cpp - AggregateFunctionWelchTTest.cpp + ) END() diff --git a/tests/queries/0_stateless/01322_student_ttest.reference b/tests/queries/0_stateless/01322_student_ttest.reference new file mode 100644 index 00000000000..02e44744629 --- /dev/null +++ b/tests/queries/0_stateless/01322_student_ttest.reference @@ -0,0 +1,4 @@ +-2.610898982580138 0.00916587538237954 +-2.610898982580134 0.0091658753823792 +-28.740781574102936 7.667329672103986e-133 +-28.74078157410298 0 diff --git a/tests/queries/0_stateless/01322_student_ttest.sql b/tests/queries/0_stateless/01322_student_ttest.sql new file mode 100644 index 00000000000..3636e239fe8 --- /dev/null +++ b/tests/queries/0_stateless/01322_student_ttest.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS student_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=1, sigma (not sigma^2)=5, size=500 + Second: a=1, sigma = 5, size = 500 */ +CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO student_ttest VALUES (0.88854,-2.90702), (-5.76966,3.61651), (6.76618,4.27458), (3.55546,4.82133), (-9.76948,9.59483), (4.92323,1.00424), (-0.36352,2.04147), (0.97018,-3.58214), (4.61656,6.59543), (-6.78292,-1.00532), (4.02008,-3.59794), (12.41838,-2.82434), (5.14417,-3.13194), (3.86836,9.90977), (-1.26199,0.523), (12.44106,4.62779), (3.28349,-2.56872), (1.77261,2.25807), (-8.94748,1.04044), (-1.01449,-2.35744), (-1.26377,10.81531), (6.79682,-9.68469), (6.32333,3.80885), (-8.21214,12.70435), (-1.68565,-6.01112), (9.7557,1.89065), (3.66694,5.08892), (1.39967,3.45254), (-5.52035,11.58151), (-10.95601,0.85035), (0.93877,8.38397), (1.45933,1.17169), (-5.40551,4.74621), (-0.83857,-1.66614), (8.50794,4.2414), (-6.68686,1.68765), (5.03099,1.85223), (1.56251,9.10111), (4.17381,-2.38085), (-2.92644,-14.79595), (5.11068,-3.8938), (2.09617,-3.41864), (11.7787,-3.15282), (6.50336,-0.56684), (0.62098,12.87997), (-7.97121,6.89115), (3.81902,12.921), (0.33151,-7.94908), (10.68584,2.45687), (0.56007,2.14957), (-7.38621,7.55081), (5.05882,-3.71534), (2.34616,-2.41064), (11.3806,-0.80734), (5.95276,-4.75651), (-3.01429,2.05241), (5.98169,-5.44523), (0.96985,-2.75054), (-1.15932,-13.00131), (2.11547,-2.74451), (2.49668,-1.39004), (-12.49569,-3.02854), (-4.94667,7.65112), (-3.64215,1.1245), (-8.35595,6.74117), (3.211,-0.75777), (2.33805,8.93451), (2.38608,-8.85559), (-3.2862,-0.36405), (-0.80454,4.02742), (-0.53483,6.88718), (10.66445,-1.05124), (-0.37619,3.04085), (0.48246,3.32368), (7.41919,1.147), (0.42414,3.41554), (-2.32335,-3.47851), (-0.70223,-0.47684), (-5.9332,-0.55605), (-1.20561,-0.17006), (3.39865,2.26218), (9.61739,12.45494), (-0.78651,-1.84097), (-4.00256,1.64934), (-7.99646,-7.07496), (8.72923,-9.99462), (0.71859,6.09954), (-1.62726,-1.05319), (5.11234,3.04757), (-0.95625,0.93899), (-3.75573,-4.63243), (1.03141,-7.43322), (-3.33588,-7.298), (1.51804,-6.59016), (-3.30935,-6.11649), (-1.97507,0.56682), (4.06456,2.00661), (3.27195,-2.79814), (-7.81761,2.84482), (-3.81785,3.65348), (-4.18311,-4.22807), (-11.33313,-4.54336), (-0.25221,-3.63343), (7.2514,2.96878), (5.30301,6.11661), (2.46762,-1.70919), (4.22716,-4.71133), (0.33916,6.09652), (9.7638,-6.83454), (-7.58684,0.18006), (-4.09888,1.51676), (4.26617,-5.31646), (-0.56744,-3.21215), (4.65125,-5.07599), (-1.30301,-2.36591), (4.53771,3.55724), (9.96929,4.8904), (3.72939,-3.22586), (-2.29818,-1.74928), (3.09417,5.73458), (0.82251,1.41188), (5.29975,2.86255), (2.8685,2.90179), (-5.73321,-2.19949), (-1.85651,1.72727), (-1.07984,1.76939), (9.78342,-0.12848), (-13.49652,-0.52), (3.68791,3.48333), (1.9998,7.8262), (1.11674,0.09099), (9.43869,7.77017), (4.07029,9.49484), (5.32715,1.42825), (7.16504,1.99624), (6.66096,4.00419), (-5.7111,1.07925), (-0.38575,-0.09987), (4.49165,-5.48733), (-3.36489,-1.83517), (7.71814,2.38059), (-1.58966,1.42075), (-1.61063,-1.11968), (-0.91602,-6.46035), (0.73459,7.66576), (-3.24463,4.6307), (6.3947,5.55989), (-2.77845,3.16684), (4.45899,5.07671), (-8.84186,-10.20566), (2.62276,-4.73386), (1.774,1.28353), (4.3692,6.75679), (0.05942,12.09895), (-1.44042,7.0049), (-2.53594,7.16156), (-2.24752,-0.64311), (4.98874,-0.66747), (4.05434,3.99996), (-2.56483,9.07298), (-6.79286,-4.60971), (-2.06165,0.70744), (-0.26056,2.56774), (1.89567,9.32424), (-3.15145,3.95087), (-7.31321,7.11372), (0.28936,-0.89284), (-0.63111,8.6155), (0.22611,-0.14141), (-9.3377,-4.86319), (-5.76638,-6.95801), (3.87306,4.44883), (6.7011,4.6156), (9.03915,-2.3579), (-1.21835,-5.1186), (0.82892,8.12819), (2.80656,2.78392), (-1.34746,-4.30221), (-1.99912,-1.47506), (0.6036,6.8598), (-3.46117,0.47636), (5.23732,0.95383), (-1.86702,7.79779), (-5.86115,-2.61767), (6.48523,-10.5087), (-7.40158,-2.74299), (-1.38913,3.87369), (4.94613,-1.07093), (-2.07818,4.98864), (2.39808,-7.50772), (4.89238,6.41316), (4.39481,1.39061), (5.20425,-3.1747), (13.62598,-2.13621), (-2.86293,-0.02203), (-3.62396,0.89025), (-4.28695,-5.87746), (4.66425,3.60026), (2.20871,-0.23178), (1.60382,-2.1897), (-9.87024,-5.85101), (-7.37302,-1.6053), (-4.17814,3.6184), (2.5148,-8.53795), (3.21708,-0.35987), (-11.48089,2.15301), (1.19821,-6.60692), (-0.07436,9.54341), (-1.10652,1.11511), (4.03395,2.94025), (-4.35883,12.05657), (2.04013,3.75156), (0.52264,7.95597), (8.14004,-0.99449), (-8.86949,0.90597), (-0.35807,-7.90627), (-10.71113,3.50863), (-2.13755,-1.47493), (0.50715,4.11671), (6.30826,10.06325), (2.37527,-1.06059), (0.20872,-1.37737), (-5.85729,-0.42542), (-4.97217,-3.90267), (-9.78434,9.35037), (-1.53277,-7.91219), (0.14827,-4.69945), (-1.053,3.63776), (1.74558,3.46492), (11.17194,2.84518), (9.35487,-3.04301), (-9.17209,8.82764), (10.41814,7.80134), (7.41206,7.87755), (3.71775,7.01035), (-2.04674,2.43271), (6.18037,11.36418), (5.6383,-6.92659), (-0.90058,5.95541), (-1.27073,3.59436), (-2.3473,5.18429), (-8.44271,4.20225), (2.75551,0.5029), (-1.15521,4.03074), (4.08722,5.23152), (-1.70399,10.65409), (7.24114,-0.69845), (-8.43976,11.70096), (-1.53052,5.80692), (-0.00526,-8.1819), (-4.04813,4.31485), (-2.84299,5.7227), (-5.201,5.67398), (7.75774,-1.75826), (-2.85791,7.54164), (-3.86071,-1.79026), (-1.80029,-1.7395), (-5.26015,5.65042), (-3.158,0.38765), (7.71014,-4.64719), (-4.84866,-10.22048), (-8.38785,-2.05447), (7.67021,-2.43441), (4.96521,-5.38551), (-0.40919,5.47764), (-3.25711,8.26637), (3.07685,-3.6421), (2.89376,-11.66269), (-10.47331,3.972), (-3.48942,5.46642), (1.13906,-3.72304), (-8.57454,5.75251), (-3.38963,5.12841), (-2.3195,0.59067), (-1.60694,5.21138), (-5.57406,-4.58702), (-0.93075,-8.737), (-11.76579,-2.12737), (10.68283,0.22888), (8.74324,-1.46448), (7.66409,2.40311), (4.76715,-5.21814), (0.44539,13.94749), (-1.35941,-2.77448), (4.18849,-3.7867), (-6.17097,3.4954), (0.27977,3.12586), (-1.45006,-7.01485), (-4.81694,-3.20727), (-3.0297,6.31415), (0.02145,2.37521), (2.46883,8.13787), (9.60317,2.15956), (-9.93898,-0.40842), (1.05549,-7.27283), (5.55366,4.27575), (-3.80722,-2.89126), (-4.18851,6.84344), (1.00351,7.0869), (3.11385,-5.18837), (-5.17623,2.67648), (-3.18396,-6.57021), (-6.65302,0.60429), (-0.50832,-1.04921), (-4.04375,7.12873), (4.52707,1.68973), (6.63124,-2.58404), (-3.72082,-3.83114), (5.79825,-7.26546), (-2.0158,-5.07153), (-2.78369,-0.80395), (-1.91821,2.09455), (6.31714,4.33374), (-1.80869,8.54335), (8.55586,0.80566), (2.40826,-8.38085), (-8.46361,7.54812), (5.04452,8.78007), (-0.84665,1.5857), (2.30903,8.43855), (-3.71837,-1.90846), (-0.69419,-1.2434), (3.6733,7.16172), (-1.96098,-3.44129), (2.36747,-6.37542), (-12.03622,-4.99486), (4.38481,4.99033), (2.93955,-1.83734), (2.16804,-2.83289), (-0.08218,-4.13997), (-3.97934,1.40163), (-7.43985,8.57867), (0.91666,-1.87639), (7.23432,3.41667), (-6.13303,6.31762), (-10.23217,1.58473), (-6.21681,1.63625), (-0.80934,-6.93618), (0.17914,3.58046), (2.13338,-6.8097), (6.97656,4.69978), (6.90455,-1.72912), (6.25943,5.29491), (-6.04019,-1.63062), (-7.30909,5.83818), (1.4589,17.0769), (12.00208,4.54301), (2.22457,-1.33801), (-2.45912,5.64339), (-6.92213,1.26913), (4.05547,-1.01553), (0.04709,4.8316), (-7.70952,3.08635), (-1.47883,-2.27738), (1.3701,-1.13761), (-4.92928,10.08698), (-2.75872,5.33827), (-0.09178,2.84345), (2.62642,-1.51132), (-1.14623,13.46078), (2.76609,8.58965), (4.94404,-2.36683), (-7.01764,-1.8217), (-10.91568,1.96981), (-2.49738,2.31718), (0.73576,3.66493), (2.25436,1.93104), (-1.72956,5.20332), (2.41054,3.20519), (5.72149,3.34631), (-6.41371,7.0087), (3.38217,-7.96126), (1.24133,-0.62182), (10.03634,-4.65227), (-2.37303,10.6572), (-1.35543,4.50891), (-1.4387,9.74298), (-4.0976,3.85707), (-0.82501,6.41144), (-1.93498,1.48649), (5.59955,2.28076), (5.46656,2.75342), (2.43568,-5.40401), (-0.23926,7.11389), (-4.9945,5.74368), (-4.96655,6.78345), (-0.59258,3.83773), (2.02497,0.70959), (0.67583,0.57434), (3.16522,1.5888), (-1.9673,3.94889), (-6.75319,5.8234), (-6.69723,7.78366), (0.81148,9.08354), (4.44531,-7.99182), (-4.43522,-2.77033), (-5.28602,-10.29342), (-3.58829,1.76251), (-7.97395,2.09266), (-2.84891,4.20614), (-3.95112,-3.63064), (3.54945,-2.17794), (12.12376,-2.66225), (-3.12347,-2.74707), (3.65209,-1.93431), (9.34031,1.38629), (-0.26348,4.12816), (-5.23968,-1.58902), (2.22336,-5.08864), (-10.70405,-2.30491), (-4.41319,2.64605), (-5.94912,1.16158), (1.8147,2.63534), (7.69287,1.4956), (9.46125,-4.60768), (4.72497,0.60771), (-0.57565,3.29549), (-1.12303,-1.42592), (2.90272,0.8883), (-4.4584,-1.10612), (4.28819,-2.57296), (11.64512,5.88085), (-1.80395,7.40745), (2.51605,13.48116), (-3.18439,5.53539), (-0.70213,-1.46014), (-7.68383,3.73304), (-8.32268,3.5435), (-8.71115,-3.89151), (9.96933,4.16265), (0.95675,2.32663), (3.35114,5.31735), (-2.66008,6.33485), (7.75456,2.1339), (0.73568,0.82708), (0.3483,-2.95155), (-1.09203,-6.76019), (-7.76963,-4.20179), (5.81902,8.78354), (-3.41424,1.41863), (-0.39209,7.65689), (4.67608,-6.52601), (0.68753,-4.4426), (5.17179,-4.49483), (4.98983,-3.91479), (-0.12659,-2.84562), (3.25267,2.58974), (1.50184,2.24424), (2.94507,-4.65846), (-0.42333,8.4062), (-3.66227,8.20262), (8.90812,-8.63752), (4.74411,4.97966), (2.22018,-0.35563), (-2.07976,-4.72116), (4.8711,-2.95997), (0.5023,2.73959), (6.31569,-0.23956), (-4.36903,10.13915), (3.82146,11.83775), (-6.99477,-2.50332), (3.61225,-0.58181), (14.69335,-7.62836), (0.58368,2.26478), (4.65341,-3.50179), (-3.14272,-2.08023), (2.67048,4.07256), (4.64963,-1.40826), (-2.70828,-2.33644), (1.42923,3.00197), (5.84498,4.23668), (-4.76568,-2.24647), (0.19907,1.0445), (1.67486,-0.31901), (5.32145,8.62657), (-8.03477,3.92817), (3.46776,0.08462), (4.66374,10.15884), (-5.37394,0.4113), (5.39045,4.45847), (-1.44756,5.82941), (-1.64419,6.59202), (3.39699,-3.73441), (-2.94659,-5.86969), (-2.38437,-4.56543), (-0.23958,-1.32636), (6.88389,-0.17884), (-2.7172,-3.56181), (-1.53419,-0.66932), (7.38841,6.87538), (-5.44178,0.73527), (-0.89287,-0.24177), (2.93546,-0.8657), (-0.26901,-0.22977), (-4.70044,1.02095), (2.25846,6.16311), (-9.28813,-5.68027), (6.04268,-3.7619), (4.41693,4.22959), (1.75714,-1.5249); +SELECT '-2.610898982580138', '0.00916587538237954'; +SELECT roundBankers(StudentTTest(left, right).1, 16) as t_stat, roundBankers(StudentTTest(left, right).2, 16) as p_value from student_ttest; +DROP TABLE IF EXISTS student_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=1, sigma (not sigma^2)=5, size=500 + Second: a=1, sigma = 5, size = 500 */ +CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO student_ttest VALUES (4.52546,8.69444), (3.73628,3.81414), (-0.39478,12.38442), (5.15633,8.9738), (0.50539,9.19594), (-5.34036,7.21009), (0.19336,4.97743), (8.35729,4.94756), (6.95818,19.80911), (-2.93812,13.75358), (8.30807,16.56373), (-3.3517,9.72882), (4.16279,4.64509), (-3.17231,17.76854), (1.93545,4.80693), (11.06606,8.79505), (-4.22678,10.88868), (-1.99975,6.21932), (-4.51178,15.11614), (-4.50711,13.24703), (1.89786,14.76476), (-6.19638,-0.6117), (-3.70188,17.48993), (5.01334,12.11847), (1.79036,4.87439), (2.14435,18.56479), (3.0282,1.23712), (2.35528,5.41596), (-12.18535,4.54994), (5.59709,11.37668), (-12.92336,9.5982), (-0.04281,6.59822), (-0.16923,1.16703), (0.88924,8.88418), (-4.68414,10.95047), (8.01099,5.52787), (2.61686,-1.11647), (-2.76895,14.49946), (3.32165,3.27585), (-0.85135,-0.42025), (1.21368,6.37906), (4.38673,2.5242), (6.20964,8.1405), (-1.23172,6.46732), (4.65516,9.89332), (-1.87143,10.4374), (0.86429,-1.06465), (2.51184,6.84902), (-1.88822,10.96576), (-1.61802,7.83319), (1.93653,14.39823), (-3.66631,7.02594), (-1.05294,13.46629), (-10.74718,10.39531), (16.49295,11.27348), (-7.65494,9.32187), (-3.39303,12.32667), (-4.89418,8.98905), (3.2521,9.54757), (0.05831,5.98325), (-3.00409,3.47248), (5.76702,9.26966), (2.67674,5.77816), (10.52623,6.32966), (-0.54501,9.49313), (-4.89835,6.21337), (3.52457,10.00242), (-0.0451,6.25167), (-6.61226,15.64671), (9.02391,2.78968), (5.52571,6.55442), (4.54352,3.68819), (-3.8394,9.55934), (-7.75295,4.166), (5.91167,12.32471), (1.38897,7.10969), (6.24166,16.31723), (5.58536,12.99482), (4.7591,10.11585), (-2.58336,10.29455), (-1.91263,18.27524), (3.31575,12.84435), (5.3507,13.11954), (-15.22081,12.84147), (-0.84775,15.55658), (-4.538,11.45329), (6.71177,7.50912), (0.52882,8.56226), (2.0242,8.63104), (5.69146,15.68026), (4.63328,21.6361), (0.22984,6.23925), (-2.84052,8.65714), (7.91867,9.9423), (1.11001,12.28213), (-0.11251,3.11279), (-0.20905,13.58128), (0.03287,16.51407), (-1.59397,16.60476), (-5.39405,12.02022), (-7.1233,12.11035), (4.51517,9.47832), (-0.70967,6.40742), (5.67299,8.87252), (-0.33835,15.14265), (-1.83047,2.23572), (-0.62877,11.57144), (-7.23148,18.87737), (0.1802,12.1833), (11.73325,11.17519), (2.17603,16.80422), (-0.11683,6.81423), (-1.29102,12.12546), (-0.23201,8.06153), (-6.8643,10.97228), (-6.85153,7.30596), (-4.77163,15.44026), (6.11721,8.00993), (5.96406,12.60196), (3.59135,13.96832), (-0.60095,14.03207), (3.11163,4.53758), (-0.18831,8.08297), (0.67657,4.90451), (-3.16117,8.14253), (0.26957,19.88605), (2.18653,13.85254), (-5.94611,23.01839), (-4.39352,6.02084), (-3.71525,9.60319), (5.11103,1.90511), (1.33998,10.35237), (1.01629,16.27082), (-3.36917,12.52379), (-3.99661,11.37435), (8.19336,13.61823), (2.89168,15.77622), (-11.10373,15.17254), (11.68005,6.711), (3.08282,4.74205), (-6.81506,10.09812), (-2.34587,6.61722), (-2.68725,10.34164), (0.3577,8.96602), (-3.05682,12.32157), (9.08062,11.75711), (-0.77913,13.49499), (10.35215,8.57713), (6.82565,11.50313), (-1.24674,1.13097), (5.18822,7.83205), (-3.70743,5.77957), (1.40319,15.5519), (5.89432,10.82676), (1.43152,11.51218), (6.70638,9.29779), (9.76613,9.77021), (4.27604,9.94114), (-2.63141,15.54513), (-7.8133,19.10736), (-0.06668,15.04205), (1.05391,9.03114), (4.41797,24.0104), (0.09337,9.94205), (6.16075,2.5925), (7.49413,8.82726), (-3.52872,10.0209), (-2.17126,8.1635), (-3.87605,4.24074), (3.26607,7.67291), (-3.28045,5.21642), (2.1429,11.2808), (1.53386,6.88172), (0.21169,5.98743), (-0.63674,17.97249), (5.84893,6.46323), (-0.63498,15.37416), (8.29526,2.89957), (-1.08358,17.13044), (-2.306,11.06355), (2.86991,3.09625), (-0.76074,-2.33019), (5.49191,7.42675), (1.82883,15.06792), (-3.70497,8.81116), (-0.53232,19.17446), (-11.49722,18.77181), (3.44877,14.06443), (-1.8596,12.81241), (-10.34851,2.72299), (1.13093,18.67739), (-10.93389,11.63275), (-3.39703,2.23891), (0.19749,13.01195), (-3.68389,7.43402), (-4.67863,8.14599), (10.78916,16.65328), (0.37675,1.362), (3.98094,3.87957), (-3.64775,11.16134), (-4.8443,6.25357), (1.102,4.21945), (8.72112,12.50047), (-1.47361,6.45486), (6.24183,18.99924), (6.83569,18.09508), (-3.11684,13.59528), (4.91306,3.39681), (-0.03628,13.33157), (5.1282,5.8945), (-2.38558,5.61212), (2.33351,8.41149), (-0.97191,13.78608), (-0.05588,6.08609), (-4.70019,12.76962), (-5.12371,3.26206), (0.65606,0.25528), (-0.11574,11.9083), (4.4238,4.35071), (6.93399,11.19855), (3.68712,13.87404), (-0.01187,6.87986), (1.8332,8.32566), (5.81322,22.51334), (-4.04709,2.5226), (-8.26397,16.84498), (-2.11273,6.26108), (5.28396,13.84824), (0.73054,6.03262), (6.43559,14.12668), (4.35565,16.01939), (-1.05545,8.19237), (5.00087,18.01595), (-2.72239,9.45609), (7.32313,6.90459), (2.11548,12.83115), (-3.40953,10.603), (6.97051,13.70439), (-0.45567,6.1633), (1.31699,4.1151), (-1.49871,8.20499), (7.14772,11.67903), (0.79277,7.30851), (6.9698,6.50941), (2.08733,7.3949), (-3.55962,12.80075), (0.75601,5.62043), (1.21,18.2542), (-2.17877,17.9393), (1.83206,16.4569), (5.72463,8.78811), (7.42257,4.85949), (0.97829,-3.36394), (7.54238,5.38683), (9.91081,12.26083), (-4.61743,10.27907), (-4.40799,11.5144), (9.99854,11.57335), (8.53725,1.94203), (3.2905,7.78228), (0.38634,11.79385), (-2.53374,10.18415), (4.94758,14.67613), (4.79624,4.70301), (5.57664,12.72151), (-6.44871,-3.35508), (3.34431,17.63775), (0.14209,2.53883), (10.88431,14.01483), (0.31846,12.4387), (-0.54703,11.15408), (-4.67791,7.74882), (-5.68011,13.60956), (-4.93362,7.81991), (1.2271,10.90969), (5.27512,8.19828), (-3.84611,-1.18523), (6.81706,0.5916), (10.33033,0.35805), (5.13979,12.98364), (3.66534,11.38628), (-2.07219,13.94644), (10.65442,2.03781), (-3.31751,10.74447), (-1.82011,12.35656), (-0.39886,7.08701), (1.77052,2.69871), (1.29049,19.66653), (7.92344,7.88636), (-2.92595,10.36916), (-2.67107,1.632), (5.64708,11.86081), (0.34639,13.47602), (-3.04356,6.60204), (3.98828,7.01303), (-1.36695,20.19992), (-8.48462,18.88249), (-4.04669,11.34367), (9.84561,12.97305), (-6.1537,9.5776), (0.82433,17.91364), (1.92449,18.3247), (2.51288,9.9211), (0.40965,7.14257), (2.89183,6.59133), (3.84347,12.35274), (0.66829,10.57523), (-3.45094,12.12859), (1.3544,9.47177), (-9.85456,0.60659), (5.25689,4.72996), (-5.26018,4.51121), (-6.16912,13.28893), (-1.77163,8.09014), (3.96687,8.02511), (0.70893,13.85406), (-5.45342,1.75412), (-3.89706,6.00641), (3.11868,6.35554), (4.41714,7.11293), (7.64841,8.30442), (0.00489,12.63024), (3.2263,12.38966), (-5.33042,7.6801), (2.52189,11.33744), (-7.40308,4.67713), (0.67891,7.62276), (2.49343,2.14478), (5.43133,15.32988), (-0.67541,1.52299), (-0.60299,17.00017), (-6.32903,8.29701), (-3.44336,10.92961), (-0.23963,6.78449), (6.94686,7.02698), (6.59442,11.51719), (-4.18532,9.97926), (-1.8228,7.44251), (-0.29443,7.58541), (2.99821,4.76058), (2.51942,12.88959), (-3.49176,9.974), (-0.57979,17.03689), (8.69471,11.14554), (-1.19427,11.7392), (-3.17119,11.50029), (-2.99566,19.41759), (-3.34493,9.65127), (-2.33826,9.87673), (-5.04164,14.13485), (-0.48214,9.78034), (7.45097,1.57826), (3.04787,3.72091), (2.92632,9.4054), (1.39694,23.22816), (4.38686,-0.12571), (3.25753,6.97343), (7.14218,10.09049), (-4.04341,11.78393), (-9.19352,3.01909), (2.78473,16.09448), (0.33331,6.25485), (9.89238,7.13164), (6.00566,7.75879), (-1.7511,9.56834), (4.77815,6.14824), (5.07457,13.53454), (2.56132,8.26364), (2.38317,8.7095), (-1.63486,10.61607), (-1.46871,10.64418), (-5.8681,23.9106), (-2.96227,11.38978), (-1.90638,11.4383), (-13.3052,18.41498), (-2.14705,3.70959), (-9.62069,19.95918), (2.29313,9.53847), (0.22162,14.04957), (-1.83956,13.70151), (4.1853,5.45046), (6.05965,10.95061), (-0.23737,9.55156), (6.07452,17.92345), (4.34629,6.23976), (4.02922,8.71029), (3.62622,13.58736), (-3.95825,8.78527), (-1.63412,11.14213), (-1.25727,12.23717), (5.06323,16.44557), (-0.66176,0.47144), (2.36606,9.7198), (-5.77792,13.50981), (4.535,14.27806), (1.02031,13.50793), (4.49345,7.47381), (-4.99791,11.07844), (2.46716,9.89844), (3.65471,21.48548), (11.2283,6.92085), (6.69743,4.44074), (-5.60375,19.98074), (0.28683,7.92826), (-0.85737,16.6313), (4.26726,17.17618), (-3.4322,13.80807), (-2.07039,5.37083), (-2.26798,9.73962), (-0.99818,10.66273), (0.41335,8.90639), (5.18124,12.24596), (-5.01858,16.89203), (2.05561,12.69184), (-0.12117,15.59077), (0.99471,6.94287), (6.89979,-0.1801), (-4.18527,3.25318), (-6.35104,8.08804), (3.89734,13.78384), (-1.979,0.46434), (3.15404,7.78224), (3.52672,9.10987), (2.48372,-0.89391), (-6.13089,14.3696), (2.2968,3.01763), (-2.74324,8.03559), (-0.12876,7.24609), (-1.51135,11.86271), (-3.92434,6.28196), (-1.71254,8.9725), (-1.25878,14.46114), (2.03021,9.50216), (4.31726,16.30413), (-3.02908,1.02795), (9.7093,1.88717), (-3.36284,9.80106), (6.70938,4.53487), (0.42762,16.34543), (5.04726,7.71098), (2.78386,2.74639), (6.83022,6.51875), (-3.02109,10.42308), (-0.65382,13.57901), (-15.58675,0.52784), (5.89746,4.4708), (-4.11598,6.39619), (-1.37208,14.57666), (10.08082,2.71602), (5.35686,12.53905), (1.93331,11.4292), (10.47444,12.44641), (-2.36872,14.50894), (6.50752,17.64374), (2.54603,11.03218), (-0.4332,9.82789), (5.26572,10.11104), (2.09016,2.16137), (1.15513,10.24054), (14.95941,12.86909), (-3.85505,15.22845), (-2.36239,5.05411), (1.64338,10.84836), (-4.25074,11.15717), (7.29744,0.91782), (-1.18964,13.29961), (5.60612,15.11314), (-3.77011,11.54004), (6.67642,-0.94238), (-0.06862,19.32581), (5.60514,10.20744), (3.7341,6.54857), (9.59001,8.69108), (3.30093,8.2296), (-2.75658,8.4474), (4.71994,6.81178), (0.74699,5.99415), (2.91095,13.99336), (-7.36829,8.7469), (-5.29487,8.62349), (3.31079,1.84212), (1.06974,4.4762), (-1.18424,9.25421), (-7.415,10.44229), (3.40595,12.21649), (-7.63085,10.45968), (1.13336,15.34722), (-0.0096,5.50868), (0.8928,10.93609), (-0.5943,2.78631), (7.48306,11.86145), (10.11943,18.67385), (5.60459,10.64051), (4.00189,12.75565), (2.35823,6.63666), (0.33475,12.19343), (3.47072,9.08636), (-6.68867,11.67256), (3.31031,20.31392), (2.17159,11.66443); +SELECT -28.740781574102936, 7.667329672103986e-133; +SELECT roundBankers(StudentTTest(left, right).1, 16) as t_stat, roundBankers(StudentTTest(left, right).2, 16) as p_value from student_ttest; +DROP TABLE IF EXISTS student_ttest; diff --git a/tests/queries/0_stateless/01322_welch_ttest.reference b/tests/queries/0_stateless/01322_welch_ttest.reference index 015dd503b7e..d06853a0a5e 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.reference +++ b/tests/queries/0_stateless/01322_welch_ttest.reference @@ -1,6 +1,10 @@ 0.021378001462867 -0.021378 +0.0213780014628671 0.090773324285671 -0.09077332 +0.0907733242891952 0.00339907162713746 -0.00339907 +0.0033990715715539 +-0.5028215369186904 0.6152361677168877 +-0.5028215369187079 0.6152361677170834 +14.971190998235835 5.898143508382202e-44 +14.971190998235837 0 diff --git a/tests/queries/0_stateless/01322_welch_ttest.sql b/tests/queries/0_stateless/01322_welch_ttest.sql index 073e71f69fe..2a045e70b32 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.sql +++ b/tests/queries/0_stateless/01322_welch_ttest.sql @@ -1,18 +1,37 @@ +/*Check only p-value first*/ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (27.5,27.1), (21.0,22.0), (19.0,20.8), (23.6,23.4), (17.0,23.4), (17.9,23.5), (16.9,25.8), (20.1,22.0), (21.9,24.8), (22.6,20.2), (23.1,21.9), (19.6,22.1), (19.0,22.9), (21.7,20.5), (21.4,24.4); SELECT '0.021378001462867'; -SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; +SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (30.02,29.89), (29.99,29.93), (30.11,29.72), (29.97,29.98), (30.01,30.02), (29.99,29.98); SELECT '0.090773324285671'; -SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; +SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (0.010268,0.159258), (0.000167,0.136278), (0.000167,0.122389); SELECT '0.00339907162713746'; -SELECT roundBankers(WelchTTest(left, right), 8) from welch_ttest; -DROP TABLE IF EXISTS welch_ttest; \ No newline at end of file +SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=10, sigma (not sigma^2)=5, size=500 + Second: a=10, sigma = 10, size = 500 */ +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (14.72789,-8.65656), (9.61661,22.98234), (13.57615,23.80821), (3.98392,13.33939), (11.98889,-4.05537), (10.99422,23.5155), (5.44792,-6.45272), (20.29346,17.7903), (7.05926,11.463), (9.22732,5.28021), (12.06847,8.39157), (13.52612,6.02464), (8.24597,14.43732), (9.35245,15.76584), (10.12297,1.54391), (15.80624,1.24897), (13.68613,27.1507), (10.72729,7.71091), (5.62078,15.71846), (6.12229,32.97808), (6.03801,-1.79334), (8.95585,-9.23439), (24.04613,11.27838), (9.04757,0.72703), (2.68263,18.51557), (15.43935,9.16619), (2.89423,17.29624), (4.01423,-1.30208), (4.30568,-3.48018), (11.99948,10.12082), (8.40574,-8.01318), (10.86642,-14.22264), (9.4266,16.58174), (-8.12752,-0.55975), (7.91634,5.61449), (7.3967,1.44626), (2.26431,7.89158), (14.20118,1.13369), (6.68233,-0.82609), (15.46221,12.23365), (7.88467,12.45443), (11.20011,14.46915), (8.92027,13.72627), (10.27926,18.41459), (5.14395,29.66702), (5.62178,1.51619), (12.84383,10.40078), (9.98009,3.33266), (-0.69789,6.12036), (11.41386,11.86553), (7.76863,6.59422), (7.21743,22.0948), (1.81176,1.79623), (9.43762,14.29513), (19.22117,19.69162), (2.97128,-7.98033), (14.32851,5.48433), (7.54959,-2.28474), (3.81545,9.91876), (10.1281,10.64097), (2.48596,0.22523), (10.0461,17.01773), (3.59714,22.37388), (9.73522,14.04215), (18.8077,23.1244), (3.15148,18.96958), (12.26062,8.42663), (5.66707,3.7165), (6.58623,14.29366), (17.30902,23.50886), (9.91391,26.33722), (5.36946,26.72396), (15.73637,13.26287), (16.96281,12.97607), (11.54063,17.41838), (18.37358,8.63875), (11.38255,17.08943), (10.53256,23.15356), (8.08833,-4.4965), (16.27556,7.58895), (2.42969,26.04074), (9.56127,6.84245), (7.32998,20.56287), (9.19511,3.84735), (9.66903,-2.76304), (4.15029,13.1615), (8.83511,8.21954), (14.60617,-3.49943), (14.06143,22.12419), (5.39556,7.08323), (10.11871,16.12937), (10.56619,-0.32672), (14.4462,16.5942), (10.42106,7.68977), (7.75551,11.39484), (11.00418,-5.11987), (4.47226,20.87404), (16.35461,8.01007), (18.55174,3.26497), (11.82044,5.61253), (7.39454,20.69182), (11.27767,0.0296), (6.83827,21.904), (7.76858,22.46572), (15.97614,3.63685), (14.53781,-5.10846), (12.99546,14.86389), (16.91151,5.47188), (9.65012,18.44095), (14.25487,16.71368), (14.03618,6.36704), (2.57382,8.82663), (2.50779,14.6727), (14.24787,7.98383), (13.34666,2.65568), (7.31102,21.45827), (10.22981,11.77948), (17.4435,4.71979), (21.2074,3.17951), (6.64191,13.90226), (18.7086,15.50578), (14.78686,10.8026), (9.85287,16.91369), (4.48263,9.90552), (14.17469,13.87322), (14.4342,4.12366), (19.2481,-3.78985), (3.47165,1.7599), (8.28712,3.43715), (8.81657,-3.45246), (0.92319,23.64571), (20.41106,-4.96877), (6.76127,3.93514), (22.00242,1.49914), (8.66129,12.71519), (10.9929,5.11521), (17.95494,4.79872), (17.20996,20.89391), (12.18888,5.363), (12.14257,8.02765), (15.81243,14.30804), (4.43362,11.49002), (1.17567,14.25281), (15.60881,7.6573), (9.34833,15.49686), (6.33513,3.29327), (-0.83095,2.27236), (12.43268,12.58104), (6.63207,19.19128), (11.96877,15.25901), (14.81029,6.5221), (21.84876,10.10965), (3.75896,12.75249), (6.91307,16.50977), (13.73015,-8.6697), (8.63753,8.28553), (15.71679,1.44315), (1.74565,4.65869), (9.16895,0.98149), (5.70685,0.16623), (5.00117,17.66332), (13.06888,4.35346), (7.51204,6.52742), (15.34885,-1.06631), (5.20264,-5.28454), (8.59043,14.25583), (6.45619,8.74058), (14.61979,1.89553), (11.7075,-0.92959), (14.04901,10.30289), (4.20525,-6.3744), (15.1733,-8.1706), (3.12934,10.95369), (8.08049,4.94384), (15.41273,28.40568), (16.90751,3.7004), (5.86893,2.52363), (7.1086,4.07997), (4.418,7.8849), (12.0614,17.95409), (7.07887,16.67021), (3.61585,11.34377), (11.73001,-0.07446), (10.80449,22.00223), (8.40311,3.31778), (9.91276,18.50719), (16.4164,-3.58655), (5.25034,6.5394), (15.20283,12.40459), (10.42909,16.59866), (9.53888,7.54176), (14.68939,-1.51044), (6.60007,12.69758), (18.31058,2.9842), (7.01885,2.49187), (18.71631,2.04113), (10.50002,-2.46544), (10.7517,15.18368), (4.23224,-0.04058), (2.28924,-0.4127), (8.56059,10.5526), (8.25095,12.03982), (9.15673,12.10923), (13.28409,11.54954), (8.4513,-1.18613), (2.83911,11.30984), (2.79676,23.54105), (9.11055,10.67321), (7.18529,24.09196), (-4.1258,7.5008), (5.28306,12.52233), (6.82757,4.30673), (10.89035,9.35793), (5.24822,4.44472), (11.935,-7.00679), (6.45675,8.56241), (10.18088,23.73891), (4.9932,15.62708), (18.09939,16.09205), (8.11738,12.52074), (5.37883,14.58927), (10.50339,-4.80187), (16.64093,8.47964), (14.77263,7.75477), (13.71385,12.6893), (6.98746,7.14147), (10.74635,12.12654), (5.49432,12.32334), (13.46078,7.98909), (10.67565,3.26652), (9.0291,20.53684), (11.51417,32.3369), (13.07118,19.74911), (9.5049,-4.62897), (8.50611,8.26483), (6.47606,20.88451), (13.06526,-2.12982), (19.08658,25.61459), (9.49741,5.32091), (10.60865,-4.1196), (2.28996,7.57937), (8.12846,21.15847), (5.62241,6.46355), (4.07712,7.74846), (17.98526,19.62636), (9.466,28.34629), (11.38904,26.73919), (5.91826,20.40427), (1.52059,3.03378), (18.79161,10.2537), (18.20669,7.47745), (-1.67829,10.79184), (18.01586,3.91962), (16.31577,19.97973), (7.88281,18.87711), (8.46179,12.56157), (10.31113,11.46033), (14.88377,3.78661), (1.31835,-9.45748), (2.53176,12.06033), (9.48625,-0.74615), (3.97936,13.2815), (11.52319,24.78052), (13.24178,5.83337), (7.58739,17.4111), (10.00959,19.70331), (9.73361,11.78446), (8.35716,-1.366), (1.65491,1.37458), (11.11521,16.31483), (6.08355,32.63464), (10.04582,-3.79736), (11.58237,19.17984), (16.40249,-0.27705), (1.9691,-3.69456), (13.22776,28.38058), (2.67059,-1.36876), (9.83651,-25.63301), (2.12539,3.58644), (9.27114,-6.85667), (9.0699,13.42225), (2.78179,12.04671), (12.49311,28.99468), (12.97662,7.87662), (15.06359,2.61119), (16.91565,-3.56022), (5.92011,1.50022), (5.81304,14.55836), (8.46425,9.35831), (9.48705,16.9366), (4.68191,29.23126), (5.70028,15.31386), (-0.78798,13.46112), (10.03442,7.39667), (15.45433,11.15599), (9.43845,9.80499), (3.05825,22.64923), (6.92126,8.67693), (14.05905,18.67335), (19.71579,-3.19127), (15.0131,22.94716), (4.50386,17.86834), (1.31061,16.98267), (10.81197,15.91653), (14.32942,11.79718), (9.26469,18.50208), (7.27679,8.90755), (22.69295,10.44843), (12.03763,4.67433), (7.34876,6.82287), (16.60689,10.82228), (7.48786,-4.18631), (15.78602,20.3872), (17.21048,11.84735), (13.93482,21.25376), (9.69911,10.55032), (12.24315,12.19023), (10.58131,0.63369), (19.57006,7.92381), (9.8856,17.90933), (11.70302,15.30781), (7.89864,10.01877), (12.24831,0.88744), (16.93707,22.20967), (9.65467,-4.23117), (4.221,21.50819), (15.45229,11.27421), (12.83088,-16.23179), (7.58313,33.43085), (12.895,5.15093), (10.02471,1.34505), (13.36059,6.027), (5.07864,-10.43035), (9.72017,27.45998), (11.05809,19.24886), (15.28528,-4.44761), (13.99834,5.453), (19.26989,12.73758), (9.41846,11.2897), (11.65425,31.032), (8.49638,7.39168), (6.38592,11.95245), (-4.69837,26.279), (12.22061,-1.0255), (9.41331,10.36675), (13.2075,11.58439), (12.97005,27.8405), (11.44352,13.1707), (9.79805,31.39133), (6.93116,27.08301), (10.07691,-2.14368), (22.05892,4.08476), (7.80353,21.5573), (-2.17276,16.69822), (0.61509,7.69955), (8.35842,8.32793), (17.77108,6.49235), (14.70841,-7.3284), (1.27992,10.58264), (15.62699,-6.17006), (9.32914,34.55782), (15.41866,10.93221), (10.82009,44.24299), (3.29902,14.6224), (9.21998,-7.42798), (7.93845,15.52351), (10.33344,11.33982), (12.06399,10.46716), (5.5308,13.0986), (8.38727,-4.25988), (18.11104,9.55316), (8.86565,0.75489), (19.41825,25.99212), (9.52376,-0.81401), (3.94552,3.49551), (9.37587,22.99402), (15.44954,10.99628), (15.90527,23.70223), (13.18927,2.71482), (7.01646,22.82309), (9.06005,31.25686), (9.06431,4.86318), (5.76006,-1.06476), (9.18705,15.10298), (-3.48446,-0.61015), (15.89817,17.81246), (12.94719,-1.55788), (23.69426,18.09709), (17.47755,9.11271), (15.61528,9.94682), (0.54832,-7.33194), (14.32916,-4.67293), (9.55305,21.81717), (13.79891,7.16318), (0.82544,13.25649), (13.34875,13.88776), (9.07614,4.95793), (5.19621,17.65303), (2.1451,14.47382), (9.87726,13.19373), (8.45439,31.86093), (-1.41842,5.73161), (7.93598,10.96492), (11.23151,6.97951), (17.84458,1.75136), (7.02237,10.96144), (10.7842,15.08137), (4.42832,9.95311), (4.45044,7.07729), (1.50938,3.08148), (21.21651,22.37954), (6.2097,8.51951), (6.84354,2.88746), (18.53804,26.73509), (12.01072,-2.88939), (4.8345,-2.82367), (20.41587,-0.35783), (14.48353,14.22076), (8.71116,11.50295), (12.42818,7.10171), (14.89244,8.28488), (8.03033,0.54178), (5.25917,13.8022), (2.30092,15.62157), (10.22504,10.79173), (15.37573,28.18946), (7.13666,30.43524), (4.45018,2.54914), (10.18405,9.89421), (3.91025,13.08631), (14.52304,4.68761), (13.14771,5.61516), (11.99219,22.88072), (9.21345,7.4735), (8.85106,11.27382), (12.91887,2.39559), (15.62308,-3.31889), (11.88034,9.61957), (15.12097,23.01381), (11.58168,-1.23467), (16.83051,9.07691), (5.25405,15.78056), (2.19976,12.28421), (4.56716,9.44888), (16.46053,13.16928), (5.61995,4.33357), (8.67704,2.21737), (5.62789,33.17833), (9.84815,13.25407), (13.05834,-2.47961), (11.74205,6.41401), (3.88393,18.8439), (16.15321,-4.63375), (4.83925,-8.2909), (13.00334,12.18221), (4.4028,-2.95356), (4.35794,19.61659), (4.47478,12.45056), (2.38713,-4.17198), (4.25235,21.9641), (10.87509,11.96416), (9.82411,12.74573), (13.61518,10.47873), (10.25507,12.73295), (4.0335,11.31373), (10.69881,9.9827), (5.70321,5.87138), (6.96244,4.24372), (9.35874,-23.72256), (6.28076,28.41337), (8.29015,4.88103), (6.88653,3.61902), (7.70687,8.93586), (8.2001,16.40759), (6.73415,27.84494), (3.82052,5.6001), (3.94469,14.51379), (15.82384,13.5576), (2.54004,12.92213), (10.74876,3.90686), (12.60517,17.07104), (17.7024,15.84268), (4.6722,17.38777), (13.67341,16.54766), (6.4565,5.94487), (12.95699,17.02804), (4.56912,7.66386), (5.58464,10.43088), (4.0638,6.16059), (13.05559,20.46178), (5.38269,20.02888), (0.16354,20.95949), (7.23962,6.50808), (7.38577,7.22366), (8.50951,8.06659), (13.72574,16.08241), (17.80421,13.83514), (3.01135,-0.33454), (8.02608,12.98848), (14.23847,12.99024); +SELECT -0.5028215369186904, 0.6152361677168877; +SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=10, sigma (not sigma^2)=5, size=500 + Second: a=1, sigma = 12, size = 500 */ +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (4.82025,-2.69857), (6.13896,15.80943), (15.20277,7.31555), (14.15351,3.96517), (7.21338,4.77809), (8.55506,9.6472), (13.80816,-26.41717), (11.28411,-10.85635), (7.4612,-1.4376), (7.43759,-0.96308), (12.9832,2.84315), (-5.74783,5.79467), (12.47114,-3.06091), (15.14223,-14.62902), (3.40603,22.08022), (9.27323,-2.11982), (7.88547,-4.84824), (8.56456,-10.50447), (4.59731,2.4891), (7.91213,9.90324), (7.33894,-22.66866), (21.74811,-0.97103), (11.92111,-16.57608), (0.18828,-3.78749), (10.47314,25.84511), (20.37396,5.30797), (11.04991,-18.19466), (13.30083,11.72708), (14.28065,0.2891), (2.86942,-9.83474), (24.96072,6.69942), (14.20164,18.09604), (18.28769,18.52651), (10.50949,1.38201), (9.22273,7.64615), (11.77608,17.66598), (8.56872,-2.44141), (13.74535,-9.01598), (11.65209,27.69142), (12.51894,4.06946), (17.76256,-15.0077), (13.52122,-10.49648), (8.70796,-4.88322), (6.04749,-25.09805), (16.33064,-4.64024), (8.35636,20.94434), (14.03496,24.12126), (11.05834,-14.10962), (14.49261,10.6512), (2.59383,14.50687), (8.01022,-19.88081), (4.05458,-11.55271), (13.26384,13.16921), (14.62058,16.63864), (10.52489,-24.08114), (8.46357,-9.09949), (6.4147,-10.54702), (9.70071,0.20813), (12.47581,8.19066), (4.38333,-2.70523), (17.54172,-0.23954), (10.12109,7.19398), (7.73186,-7.1618), (14.0279,-7.44322), (11.6621,-17.92031), (17.47045,-1.58146), (15.50223,9.18338), (15.46034,3.25838), (13.39964,-14.30234), (14.98025,1.84695), (15.87912,31.13794), (17.67374,-0.85067), (9.64073,19.02787), (12.84904,-3.09594), (7.70278,13.45584), (13.03156,-5.48104), (9.04512,-22.74928), (15.97014,-8.03697), (8.96389,17.31143), (11.48009,-16.65231), (9.71153,-18.58713), (13.00084,-16.52641), (12.39803,14.95261), (13.08188,12.56762), (5.82244,15.00188), (10.81871,1.85858), (8.2539,2.1926), (7.52114,-2.4095), (9.11488,21.56873), (8.37482,3.35509), (14.48652,-4.98672), (11.42152,35.08603), (16.03111,-10.01602), (13.14057,-3.85153), (-2.26351,-6.81974), (15.50394,19.56525), (14.88603,-9.35488), (13.37257,0.24268), (11.84026,-3.51488), (7.66558,-0.37066), (6.24584,24.20888), (3.6312,-11.73537), (2.7018,0.01282), (5.63656,0.03963), (5.82643,-9.65589), (10.06745,-0.37429), (-0.5831,5.61255), (14.84202,0.49984), (9.5524,-10.15066), (19.71713,-14.54314), (14.23109,16.56889), (8.69105,-7.73873), (5.33742,-3.76422), (7.30372,1.40722), (7.93342,2.28818), (15.20884,-13.12643), (7.53839,5.17082), (13.45311,4.79089), (11.04473,-17.42643), (10.76673,8.72548), (15.44145,-3.70285), (14.06596,16.77893), (9.14873,13.382), (12.88372,19.98418), (8.74994,0.00483), (10.53263,-4.75951), (16.16694,2.35391), (8.37197,21.65809), (3.43739,-9.2714), (4.72799,-18.38253), (9.08802,7.23097), (11.2531,14.97927), (5.16115,-4.02197), (10.20895,-29.8189), (18.70884,-12.8554), (15.88924,-7.60124), (3.38758,-14.90158), (6.46449,-3.31486), (10.21088,31.38144), (14.08458,-8.61288), (15.74508,15.31895), (19.31896,-10.19488), (13.19641,13.796), (11.95409,-0.32912), (10.70718,-0.0684), (1.05245,-30.06834), (10.04772,24.93912), (17.01369,-3.26506), (10.2286,-8.29751), (19.58323,-5.39189), (7.02892,-25.08603), (4.16866,-1.45318), (8.94326,16.72724), (4.99854,-3.38467), (8.88352,-26.00478), (18.65422,7.28369), (17.32328,16.96226), (9.33492,16.5858), (14.94788,10.46583), (8.05863,3.84345), (14.6737,-2.99382), (10.93801,1.42078), (0.54036,-11.0123), (-0.34242,2.09909), (5.89076,1.21064), (3.15189,15.36079), (1.94421,-21.61349), (6.38698,22.7726), (10.50654,10.50512), (8.95362,-6.95825), (6.23711,9.20036), (11.75359,15.66902), (12.42155,3.28098), (-1.55472,-9.05692), (4.6688,0.32882), (10.48087,-1.64934), (11.74615,-4.81406), (9.26822,-5.06006), (7.55517,19.97493), (12.76005,2.88646), (16.47102,-0.34552), (11.31297,7.55186), (14.37437,-22.96115), (2.38799,31.29166), (6.44577,6.18798), (5.07471,-2.52715), (11.55123,-11.58799), (7.76795,14.13596), (10.60116,13.45069), (14.40885,12.15179), (11.58158,3.44491), (8.81648,-8.78006), (12.92299,18.32087), (11.26939,11.91757), (17.95014,-2.00179), (2.95002,10.88411), (17.41959,9.09327), (11.12455,6.62484), (8.78541,8.87178), (14.36413,11.52254), (12.98554,-14.15988), (12.58505,-17.19515), (15.49789,14.03089), (11.70999,-2.4095), (0.65596,-16.83575), (11.08202,2.71469), (14.75752,4.84351), (6.84385,-1.17651), (9.27245,-3.37529), (13.78243,-19.92137), (17.4863,4.48952), (4.01777,-12.4906), (11.82861,-5.65277), (13.86551,8.50819), (6.16591,-19.61261), (8.71589,12.54156), (16.77195,11.06784), (17.23243,-12.59285), (-2.12941,3.43683), (5.66629,-3.00325), (12.45153,12.49082), (1.63971,7.20955), (13.84031,17.6547), (4.6144,15.8619), (5.26169,24.3048), (9.27769,-8.05434), (9.14288,-6.06901), (9.71953,-15.69515), (9.38446,-11.13917), (1.64788,-3.90757), (11.72922,-2.57038), (13.68926,5.14065), (9.42952,17.8497), (12.05574,-8.64665), (9.09148,-18.68331), (5.32273,5.8567), (20.25258,-20.93884), (10.14599,4.40583), (10.82156,14.35985), (5.75736,4.18134), (7.13567,4.3635), (9.29746,9.35428), (5.1618,2.8908), (10.076,16.01017), (21.65669,-1.48499), (13.35486,-9.97949), (6.79957,1.03055), (8.76243,-2.79697), (14.59294,6.85977), (16.90609,4.73213), (10.50337,2.7815), (-0.07923,-2.46866), (13.51648,18.39425), (12.0676,-0.80378), (0.86482,-0.22982), (9.03563,-16.11608), (5.38751,3.0862), (17.16866,3.20779), (2.78702,10.50146), (11.15548,-0.21305), (12.30843,11.21012), (8.04897,-0.99825), (9.95814,18.39633), (11.29308,-3.39003), (14.13032,-0.64411), (21.05877,-1.39932), (3.57386,15.45319), (7.96631,-0.66044), (3.30484,-15.2223), (18.61856,-34.39907), (16.35184,-3.57836), (7.65236,16.82828), (18.02895,1.66624), (9.79458,15.43475), (16.7274,8.17776), (8.84453,5.50486), (13.05709,10.43082), (10.91447,-6.63332), (8.40171,2.28008), (16.95211,16.37203), (11.82194,5.16313), (19.87978,-8.85281), (12.88455,13.26692), (-0.00947,-7.46842), (12.28109,8.43091), (6.96462,-13.18172), (13.75282,-0.72401), (14.39141,22.3881), (11.07193,10.65448), (12.88039,2.81289), (11.38253,10.92405), (21.02707,-8.95358), (7.51955,19.80653), (6.31984,-12.86527), (15.6543,5.38826), (14.80315,-6.83501), (8.38024,-15.7647), (21.7516,-27.67412), (14.31336,8.6499), (15.04703,-4.89542), (5.73787,16.76167), (13.16911,12.84284), (12.40695,-17.27324), (9.88968,-4.18726), (8.46703,-14.62366), (8.70637,-5.49863), (8.03551,-16.22846), (5.9757,10.60329), (12.22951,6.46781), (3.14736,1.70458), (10.51266,10.77448), (18.593,0.8463), (10.82213,13.0482), (7.14216,-4.36264), (6.81154,3.22647), (-0.6486,2.38828), (20.56136,6.7946), (11.35367,-0.25254), (11.38205,1.2497), (17.14,1.6544), (14.91215,4.1019), (15.50207,11.27839), (5.93162,-5.04127), (3.74869,18.11674), (14.11532,0.51231), (7.38954,-0.51029), (5.45764,13.52556), (18.33733,16.10171), (9.91923,5.68197), (2.38991,-2.85904), (14.16756,-8.89167), (2.39791,6.24489), (6.92586,10.85319), (5.32474,-0.39816), (2.28812,3.87079), (5.71718,-3.1867), (5.84197,1.55322), (2.76206,16.86779), (19.05928,-14.60321), (11.51788,-1.81952), (6.56648,-3.11624), (3.35735,1.24193), (7.55948,10.18179), (19.99908,4.69796), (13.00634,0.69032), (18.36886,11.7723), (11.14675,7.62896), (16.72931,9.89741), (12.50106,9.11484), (6.00605,-3.84676), (23.06653,-0.4777), (5.39694,0.95958), (9.53167,-7.95056), (12.76944,-10.97474), (7.20604,-6.54861), (13.25391,34.74933), (13.7341,27.39463), (10.85292,4.18299), (-7.75835,6.02476), (10.29728,-1.99397), (13.70099,1.26478), (10.17959,23.37106), (9.98399,10.49682), (12.69389,-11.04354), (-0.28848,-12.22284), (-2.18319,-9.87635), (13.36378,28.90511), (10.09232,6.77613), (5.49489,0.55352), (5.46156,0.37031), (0.94225,7.1418), (12.79205,3.24897), (10.09593,-1.60918), (6.06218,3.1675), (0.89463,-17.97072), (11.88986,-5.61743), (10.79733,14.1422), (1.51371,14.87695), (2.20967,-4.65961), (15.45732,-0.99174), (16.5262,-2.96623), (5.99724,-9.02263), (8.3613,-17.2088), (15.68183,2.78608), (15.32117,6.74239), (14.15674,4.8524), (6.64553,7.46731), (4.20777,1.04894), (-0.10521,-12.8023), (-0.88169,-17.18188), (1.85913,-5.08801), (9.73673,22.13942), (0.30926,-0.36384), (6.17559,17.80564), (11.76602,7.67504), (5.68385,1.59779), (14.57088,4.10942), (12.81509,0.61074), (9.85682,-14.40767), (12.06376,10.59906), (6.08874,16.57017), (11.63921,-15.17526), (14.86722,-6.98549), (10.41035,-0.64548), (2.93794,3.23756), (12.21841,14.65504), (0.23804,4.583), (3.14845,12.72378), (7.29748,5.26547), (3.06134,0.81781), (13.77684,9.38273), (16.21992,10.37636), (5.33511,10.70325), (9.68959,-0.83043), (9.44169,-7.53149), (18.08012,-9.09147), (4.04224,-19.51381), (8.77918,-28.44508), (10.18324,6.44392), (9.38914,11.10201), (11.76995,-2.86184), (14.19963,8.30673), (6.88817,8.8797), (16.56123,10.68053), (15.39885,15.62919), (5.21241,8.00579), (4.44408,6.4651), (17.87587,-4.50029), (12.53337,18.04514), (13.60916,11.12996), (6.60104,-5.14007), (7.35453,9.43857), (18.61572,3.13476), (6.10437,4.9772), (13.08682,-17.45782), (12.15404,0.05552), (4.90789,-1.90283), (2.13353,2.67908), (12.49593,-2.62243), (11.93056,-3.22767), (13.29408,-8.70222), (5.70038,-23.11605), (8.40271,21.6757), (5.19456,12.70076), (-5.51028,4.4322), (14.0329,11.69344), (10.38365,9.18052), (6.56812,-2.2549), (4.21129,-2.15615), (9.7157,20.29765), (9.88553,-0.29536), (13.45346,15.50109), (4.97752,8.79187), (12.77595,5.11533), (8.56465,-20.44436), (4.27703,-3.00909), (18.12502,-4.48291), (12.45735,21.84462), (12.42912,1.94225), (12.08125,-2.81908), (10.85779,17.19418), (4.36013,-9.33528), (11.85062,-0.17346), (8.47776,0.03958), (9.60822,-35.17786), (11.3069,8.36887), (14.25525,-9.02292), (1.55168,-10.98804), (14.57782,0.29335), (7.84786,4.29634), (9.87774,3.87718), (14.75575,-9.08532), (3.68774,7.13922), (9.37667,-7.62463), (20.28676,-10.5666), (12.10027,4.68165), (8.01819,-3.30172), (18.78158,13.04852), (20.85402,13.45616), (18.98069,2.41043), (16.1429,-0.36501), (9.24047,-15.67383), (14.12487,17.92217), (10.18841,8.42106), (-3.04478,3.22063), (5.7552,-7.31753), (9.30376,21.99596), (11.42837,-36.8273), (6.02364,-20.46391), (8.86984,5.74179), (10.91177,-15.83178), (10.04418,14.90454), (18.10774,-8.84645), (7.49384,3.72036), (9.11556,4.6877), (9.7051,16.35418), (5.23268,3.15441), (9.04647,2.39907), (8.81547,-17.58664), (2.65098,-13.18269); +SELECT 14.971190998235835, 5.898143508382202e-44; +SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; From a750b76818b6fc6273a6d7fc8e5785d9ff0068c7 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Tue, 13 Oct 2020 21:50:04 +0300 Subject: [PATCH 091/432] better --- tests/queries/0_stateless/01322_student_ttest.sql | 2 +- tests/queries/0_stateless/01322_welch_ttest.sql | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01322_student_ttest.sql b/tests/queries/0_stateless/01322_student_ttest.sql index 3636e239fe8..babc8457bc0 100644 --- a/tests/queries/0_stateless/01322_student_ttest.sql +++ b/tests/queries/0_stateless/01322_student_ttest.sql @@ -14,6 +14,6 @@ DROP TABLE IF EXISTS student_ttest; Second: a=1, sigma = 5, size = 500 */ CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO student_ttest VALUES (4.52546,8.69444), (3.73628,3.81414), (-0.39478,12.38442), (5.15633,8.9738), (0.50539,9.19594), (-5.34036,7.21009), (0.19336,4.97743), (8.35729,4.94756), (6.95818,19.80911), (-2.93812,13.75358), (8.30807,16.56373), (-3.3517,9.72882), (4.16279,4.64509), (-3.17231,17.76854), (1.93545,4.80693), (11.06606,8.79505), (-4.22678,10.88868), (-1.99975,6.21932), (-4.51178,15.11614), (-4.50711,13.24703), (1.89786,14.76476), (-6.19638,-0.6117), (-3.70188,17.48993), (5.01334,12.11847), (1.79036,4.87439), (2.14435,18.56479), (3.0282,1.23712), (2.35528,5.41596), (-12.18535,4.54994), (5.59709,11.37668), (-12.92336,9.5982), (-0.04281,6.59822), (-0.16923,1.16703), (0.88924,8.88418), (-4.68414,10.95047), (8.01099,5.52787), (2.61686,-1.11647), (-2.76895,14.49946), (3.32165,3.27585), (-0.85135,-0.42025), (1.21368,6.37906), (4.38673,2.5242), (6.20964,8.1405), (-1.23172,6.46732), (4.65516,9.89332), (-1.87143,10.4374), (0.86429,-1.06465), (2.51184,6.84902), (-1.88822,10.96576), (-1.61802,7.83319), (1.93653,14.39823), (-3.66631,7.02594), (-1.05294,13.46629), (-10.74718,10.39531), (16.49295,11.27348), (-7.65494,9.32187), (-3.39303,12.32667), (-4.89418,8.98905), (3.2521,9.54757), (0.05831,5.98325), (-3.00409,3.47248), (5.76702,9.26966), (2.67674,5.77816), (10.52623,6.32966), (-0.54501,9.49313), (-4.89835,6.21337), (3.52457,10.00242), (-0.0451,6.25167), (-6.61226,15.64671), (9.02391,2.78968), (5.52571,6.55442), (4.54352,3.68819), (-3.8394,9.55934), (-7.75295,4.166), (5.91167,12.32471), (1.38897,7.10969), (6.24166,16.31723), (5.58536,12.99482), (4.7591,10.11585), (-2.58336,10.29455), (-1.91263,18.27524), (3.31575,12.84435), (5.3507,13.11954), (-15.22081,12.84147), (-0.84775,15.55658), (-4.538,11.45329), (6.71177,7.50912), (0.52882,8.56226), (2.0242,8.63104), (5.69146,15.68026), (4.63328,21.6361), (0.22984,6.23925), (-2.84052,8.65714), (7.91867,9.9423), (1.11001,12.28213), (-0.11251,3.11279), (-0.20905,13.58128), (0.03287,16.51407), (-1.59397,16.60476), (-5.39405,12.02022), (-7.1233,12.11035), (4.51517,9.47832), (-0.70967,6.40742), (5.67299,8.87252), (-0.33835,15.14265), (-1.83047,2.23572), (-0.62877,11.57144), (-7.23148,18.87737), (0.1802,12.1833), (11.73325,11.17519), (2.17603,16.80422), (-0.11683,6.81423), (-1.29102,12.12546), (-0.23201,8.06153), (-6.8643,10.97228), (-6.85153,7.30596), (-4.77163,15.44026), (6.11721,8.00993), (5.96406,12.60196), (3.59135,13.96832), (-0.60095,14.03207), (3.11163,4.53758), (-0.18831,8.08297), (0.67657,4.90451), (-3.16117,8.14253), (0.26957,19.88605), (2.18653,13.85254), (-5.94611,23.01839), (-4.39352,6.02084), (-3.71525,9.60319), (5.11103,1.90511), (1.33998,10.35237), (1.01629,16.27082), (-3.36917,12.52379), (-3.99661,11.37435), (8.19336,13.61823), (2.89168,15.77622), (-11.10373,15.17254), (11.68005,6.711), (3.08282,4.74205), (-6.81506,10.09812), (-2.34587,6.61722), (-2.68725,10.34164), (0.3577,8.96602), (-3.05682,12.32157), (9.08062,11.75711), (-0.77913,13.49499), (10.35215,8.57713), (6.82565,11.50313), (-1.24674,1.13097), (5.18822,7.83205), (-3.70743,5.77957), (1.40319,15.5519), (5.89432,10.82676), (1.43152,11.51218), (6.70638,9.29779), (9.76613,9.77021), (4.27604,9.94114), (-2.63141,15.54513), (-7.8133,19.10736), (-0.06668,15.04205), (1.05391,9.03114), (4.41797,24.0104), (0.09337,9.94205), (6.16075,2.5925), (7.49413,8.82726), (-3.52872,10.0209), (-2.17126,8.1635), (-3.87605,4.24074), (3.26607,7.67291), (-3.28045,5.21642), (2.1429,11.2808), (1.53386,6.88172), (0.21169,5.98743), (-0.63674,17.97249), (5.84893,6.46323), (-0.63498,15.37416), (8.29526,2.89957), (-1.08358,17.13044), (-2.306,11.06355), (2.86991,3.09625), (-0.76074,-2.33019), (5.49191,7.42675), (1.82883,15.06792), (-3.70497,8.81116), (-0.53232,19.17446), (-11.49722,18.77181), (3.44877,14.06443), (-1.8596,12.81241), (-10.34851,2.72299), (1.13093,18.67739), (-10.93389,11.63275), (-3.39703,2.23891), (0.19749,13.01195), (-3.68389,7.43402), (-4.67863,8.14599), (10.78916,16.65328), (0.37675,1.362), (3.98094,3.87957), (-3.64775,11.16134), (-4.8443,6.25357), (1.102,4.21945), (8.72112,12.50047), (-1.47361,6.45486), (6.24183,18.99924), (6.83569,18.09508), (-3.11684,13.59528), (4.91306,3.39681), (-0.03628,13.33157), (5.1282,5.8945), (-2.38558,5.61212), (2.33351,8.41149), (-0.97191,13.78608), (-0.05588,6.08609), (-4.70019,12.76962), (-5.12371,3.26206), (0.65606,0.25528), (-0.11574,11.9083), (4.4238,4.35071), (6.93399,11.19855), (3.68712,13.87404), (-0.01187,6.87986), (1.8332,8.32566), (5.81322,22.51334), (-4.04709,2.5226), (-8.26397,16.84498), (-2.11273,6.26108), (5.28396,13.84824), (0.73054,6.03262), (6.43559,14.12668), (4.35565,16.01939), (-1.05545,8.19237), (5.00087,18.01595), (-2.72239,9.45609), (7.32313,6.90459), (2.11548,12.83115), (-3.40953,10.603), (6.97051,13.70439), (-0.45567,6.1633), (1.31699,4.1151), (-1.49871,8.20499), (7.14772,11.67903), (0.79277,7.30851), (6.9698,6.50941), (2.08733,7.3949), (-3.55962,12.80075), (0.75601,5.62043), (1.21,18.2542), (-2.17877,17.9393), (1.83206,16.4569), (5.72463,8.78811), (7.42257,4.85949), (0.97829,-3.36394), (7.54238,5.38683), (9.91081,12.26083), (-4.61743,10.27907), (-4.40799,11.5144), (9.99854,11.57335), (8.53725,1.94203), (3.2905,7.78228), (0.38634,11.79385), (-2.53374,10.18415), (4.94758,14.67613), (4.79624,4.70301), (5.57664,12.72151), (-6.44871,-3.35508), (3.34431,17.63775), (0.14209,2.53883), (10.88431,14.01483), (0.31846,12.4387), (-0.54703,11.15408), (-4.67791,7.74882), (-5.68011,13.60956), (-4.93362,7.81991), (1.2271,10.90969), (5.27512,8.19828), (-3.84611,-1.18523), (6.81706,0.5916), (10.33033,0.35805), (5.13979,12.98364), (3.66534,11.38628), (-2.07219,13.94644), (10.65442,2.03781), (-3.31751,10.74447), (-1.82011,12.35656), (-0.39886,7.08701), (1.77052,2.69871), (1.29049,19.66653), (7.92344,7.88636), (-2.92595,10.36916), (-2.67107,1.632), (5.64708,11.86081), (0.34639,13.47602), (-3.04356,6.60204), (3.98828,7.01303), (-1.36695,20.19992), (-8.48462,18.88249), (-4.04669,11.34367), (9.84561,12.97305), (-6.1537,9.5776), (0.82433,17.91364), (1.92449,18.3247), (2.51288,9.9211), (0.40965,7.14257), (2.89183,6.59133), (3.84347,12.35274), (0.66829,10.57523), (-3.45094,12.12859), (1.3544,9.47177), (-9.85456,0.60659), (5.25689,4.72996), (-5.26018,4.51121), (-6.16912,13.28893), (-1.77163,8.09014), (3.96687,8.02511), (0.70893,13.85406), (-5.45342,1.75412), (-3.89706,6.00641), (3.11868,6.35554), (4.41714,7.11293), (7.64841,8.30442), (0.00489,12.63024), (3.2263,12.38966), (-5.33042,7.6801), (2.52189,11.33744), (-7.40308,4.67713), (0.67891,7.62276), (2.49343,2.14478), (5.43133,15.32988), (-0.67541,1.52299), (-0.60299,17.00017), (-6.32903,8.29701), (-3.44336,10.92961), (-0.23963,6.78449), (6.94686,7.02698), (6.59442,11.51719), (-4.18532,9.97926), (-1.8228,7.44251), (-0.29443,7.58541), (2.99821,4.76058), (2.51942,12.88959), (-3.49176,9.974), (-0.57979,17.03689), (8.69471,11.14554), (-1.19427,11.7392), (-3.17119,11.50029), (-2.99566,19.41759), (-3.34493,9.65127), (-2.33826,9.87673), (-5.04164,14.13485), (-0.48214,9.78034), (7.45097,1.57826), (3.04787,3.72091), (2.92632,9.4054), (1.39694,23.22816), (4.38686,-0.12571), (3.25753,6.97343), (7.14218,10.09049), (-4.04341,11.78393), (-9.19352,3.01909), (2.78473,16.09448), (0.33331,6.25485), (9.89238,7.13164), (6.00566,7.75879), (-1.7511,9.56834), (4.77815,6.14824), (5.07457,13.53454), (2.56132,8.26364), (2.38317,8.7095), (-1.63486,10.61607), (-1.46871,10.64418), (-5.8681,23.9106), (-2.96227,11.38978), (-1.90638,11.4383), (-13.3052,18.41498), (-2.14705,3.70959), (-9.62069,19.95918), (2.29313,9.53847), (0.22162,14.04957), (-1.83956,13.70151), (4.1853,5.45046), (6.05965,10.95061), (-0.23737,9.55156), (6.07452,17.92345), (4.34629,6.23976), (4.02922,8.71029), (3.62622,13.58736), (-3.95825,8.78527), (-1.63412,11.14213), (-1.25727,12.23717), (5.06323,16.44557), (-0.66176,0.47144), (2.36606,9.7198), (-5.77792,13.50981), (4.535,14.27806), (1.02031,13.50793), (4.49345,7.47381), (-4.99791,11.07844), (2.46716,9.89844), (3.65471,21.48548), (11.2283,6.92085), (6.69743,4.44074), (-5.60375,19.98074), (0.28683,7.92826), (-0.85737,16.6313), (4.26726,17.17618), (-3.4322,13.80807), (-2.07039,5.37083), (-2.26798,9.73962), (-0.99818,10.66273), (0.41335,8.90639), (5.18124,12.24596), (-5.01858,16.89203), (2.05561,12.69184), (-0.12117,15.59077), (0.99471,6.94287), (6.89979,-0.1801), (-4.18527,3.25318), (-6.35104,8.08804), (3.89734,13.78384), (-1.979,0.46434), (3.15404,7.78224), (3.52672,9.10987), (2.48372,-0.89391), (-6.13089,14.3696), (2.2968,3.01763), (-2.74324,8.03559), (-0.12876,7.24609), (-1.51135,11.86271), (-3.92434,6.28196), (-1.71254,8.9725), (-1.25878,14.46114), (2.03021,9.50216), (4.31726,16.30413), (-3.02908,1.02795), (9.7093,1.88717), (-3.36284,9.80106), (6.70938,4.53487), (0.42762,16.34543), (5.04726,7.71098), (2.78386,2.74639), (6.83022,6.51875), (-3.02109,10.42308), (-0.65382,13.57901), (-15.58675,0.52784), (5.89746,4.4708), (-4.11598,6.39619), (-1.37208,14.57666), (10.08082,2.71602), (5.35686,12.53905), (1.93331,11.4292), (10.47444,12.44641), (-2.36872,14.50894), (6.50752,17.64374), (2.54603,11.03218), (-0.4332,9.82789), (5.26572,10.11104), (2.09016,2.16137), (1.15513,10.24054), (14.95941,12.86909), (-3.85505,15.22845), (-2.36239,5.05411), (1.64338,10.84836), (-4.25074,11.15717), (7.29744,0.91782), (-1.18964,13.29961), (5.60612,15.11314), (-3.77011,11.54004), (6.67642,-0.94238), (-0.06862,19.32581), (5.60514,10.20744), (3.7341,6.54857), (9.59001,8.69108), (3.30093,8.2296), (-2.75658,8.4474), (4.71994,6.81178), (0.74699,5.99415), (2.91095,13.99336), (-7.36829,8.7469), (-5.29487,8.62349), (3.31079,1.84212), (1.06974,4.4762), (-1.18424,9.25421), (-7.415,10.44229), (3.40595,12.21649), (-7.63085,10.45968), (1.13336,15.34722), (-0.0096,5.50868), (0.8928,10.93609), (-0.5943,2.78631), (7.48306,11.86145), (10.11943,18.67385), (5.60459,10.64051), (4.00189,12.75565), (2.35823,6.63666), (0.33475,12.19343), (3.47072,9.08636), (-6.68867,11.67256), (3.31031,20.31392), (2.17159,11.66443); -SELECT -28.740781574102936, 7.667329672103986e-133; +SELECT '-28.740781574102936', '7.667329672103986e-133'; SELECT roundBankers(StudentTTest(left, right).1, 16) as t_stat, roundBankers(StudentTTest(left, right).2, 16) as p_value from student_ttest; DROP TABLE IF EXISTS student_ttest; diff --git a/tests/queries/0_stateless/01322_welch_ttest.sql b/tests/queries/0_stateless/01322_welch_ttest.sql index 2a045e70b32..5a5b52ab612 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.sql +++ b/tests/queries/0_stateless/01322_welch_ttest.sql @@ -23,7 +23,7 @@ DROP TABLE IF EXISTS welch_ttest; Second: a=10, sigma = 10, size = 500 */ CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (14.72789,-8.65656), (9.61661,22.98234), (13.57615,23.80821), (3.98392,13.33939), (11.98889,-4.05537), (10.99422,23.5155), (5.44792,-6.45272), (20.29346,17.7903), (7.05926,11.463), (9.22732,5.28021), (12.06847,8.39157), (13.52612,6.02464), (8.24597,14.43732), (9.35245,15.76584), (10.12297,1.54391), (15.80624,1.24897), (13.68613,27.1507), (10.72729,7.71091), (5.62078,15.71846), (6.12229,32.97808), (6.03801,-1.79334), (8.95585,-9.23439), (24.04613,11.27838), (9.04757,0.72703), (2.68263,18.51557), (15.43935,9.16619), (2.89423,17.29624), (4.01423,-1.30208), (4.30568,-3.48018), (11.99948,10.12082), (8.40574,-8.01318), (10.86642,-14.22264), (9.4266,16.58174), (-8.12752,-0.55975), (7.91634,5.61449), (7.3967,1.44626), (2.26431,7.89158), (14.20118,1.13369), (6.68233,-0.82609), (15.46221,12.23365), (7.88467,12.45443), (11.20011,14.46915), (8.92027,13.72627), (10.27926,18.41459), (5.14395,29.66702), (5.62178,1.51619), (12.84383,10.40078), (9.98009,3.33266), (-0.69789,6.12036), (11.41386,11.86553), (7.76863,6.59422), (7.21743,22.0948), (1.81176,1.79623), (9.43762,14.29513), (19.22117,19.69162), (2.97128,-7.98033), (14.32851,5.48433), (7.54959,-2.28474), (3.81545,9.91876), (10.1281,10.64097), (2.48596,0.22523), (10.0461,17.01773), (3.59714,22.37388), (9.73522,14.04215), (18.8077,23.1244), (3.15148,18.96958), (12.26062,8.42663), (5.66707,3.7165), (6.58623,14.29366), (17.30902,23.50886), (9.91391,26.33722), (5.36946,26.72396), (15.73637,13.26287), (16.96281,12.97607), (11.54063,17.41838), (18.37358,8.63875), (11.38255,17.08943), (10.53256,23.15356), (8.08833,-4.4965), (16.27556,7.58895), (2.42969,26.04074), (9.56127,6.84245), (7.32998,20.56287), (9.19511,3.84735), (9.66903,-2.76304), (4.15029,13.1615), (8.83511,8.21954), (14.60617,-3.49943), (14.06143,22.12419), (5.39556,7.08323), (10.11871,16.12937), (10.56619,-0.32672), (14.4462,16.5942), (10.42106,7.68977), (7.75551,11.39484), (11.00418,-5.11987), (4.47226,20.87404), (16.35461,8.01007), (18.55174,3.26497), (11.82044,5.61253), (7.39454,20.69182), (11.27767,0.0296), (6.83827,21.904), (7.76858,22.46572), (15.97614,3.63685), (14.53781,-5.10846), (12.99546,14.86389), (16.91151,5.47188), (9.65012,18.44095), (14.25487,16.71368), (14.03618,6.36704), (2.57382,8.82663), (2.50779,14.6727), (14.24787,7.98383), (13.34666,2.65568), (7.31102,21.45827), (10.22981,11.77948), (17.4435,4.71979), (21.2074,3.17951), (6.64191,13.90226), (18.7086,15.50578), (14.78686,10.8026), (9.85287,16.91369), (4.48263,9.90552), (14.17469,13.87322), (14.4342,4.12366), (19.2481,-3.78985), (3.47165,1.7599), (8.28712,3.43715), (8.81657,-3.45246), (0.92319,23.64571), (20.41106,-4.96877), (6.76127,3.93514), (22.00242,1.49914), (8.66129,12.71519), (10.9929,5.11521), (17.95494,4.79872), (17.20996,20.89391), (12.18888,5.363), (12.14257,8.02765), (15.81243,14.30804), (4.43362,11.49002), (1.17567,14.25281), (15.60881,7.6573), (9.34833,15.49686), (6.33513,3.29327), (-0.83095,2.27236), (12.43268,12.58104), (6.63207,19.19128), (11.96877,15.25901), (14.81029,6.5221), (21.84876,10.10965), (3.75896,12.75249), (6.91307,16.50977), (13.73015,-8.6697), (8.63753,8.28553), (15.71679,1.44315), (1.74565,4.65869), (9.16895,0.98149), (5.70685,0.16623), (5.00117,17.66332), (13.06888,4.35346), (7.51204,6.52742), (15.34885,-1.06631), (5.20264,-5.28454), (8.59043,14.25583), (6.45619,8.74058), (14.61979,1.89553), (11.7075,-0.92959), (14.04901,10.30289), (4.20525,-6.3744), (15.1733,-8.1706), (3.12934,10.95369), (8.08049,4.94384), (15.41273,28.40568), (16.90751,3.7004), (5.86893,2.52363), (7.1086,4.07997), (4.418,7.8849), (12.0614,17.95409), (7.07887,16.67021), (3.61585,11.34377), (11.73001,-0.07446), (10.80449,22.00223), (8.40311,3.31778), (9.91276,18.50719), (16.4164,-3.58655), (5.25034,6.5394), (15.20283,12.40459), (10.42909,16.59866), (9.53888,7.54176), (14.68939,-1.51044), (6.60007,12.69758), (18.31058,2.9842), (7.01885,2.49187), (18.71631,2.04113), (10.50002,-2.46544), (10.7517,15.18368), (4.23224,-0.04058), (2.28924,-0.4127), (8.56059,10.5526), (8.25095,12.03982), (9.15673,12.10923), (13.28409,11.54954), (8.4513,-1.18613), (2.83911,11.30984), (2.79676,23.54105), (9.11055,10.67321), (7.18529,24.09196), (-4.1258,7.5008), (5.28306,12.52233), (6.82757,4.30673), (10.89035,9.35793), (5.24822,4.44472), (11.935,-7.00679), (6.45675,8.56241), (10.18088,23.73891), (4.9932,15.62708), (18.09939,16.09205), (8.11738,12.52074), (5.37883,14.58927), (10.50339,-4.80187), (16.64093,8.47964), (14.77263,7.75477), (13.71385,12.6893), (6.98746,7.14147), (10.74635,12.12654), (5.49432,12.32334), (13.46078,7.98909), (10.67565,3.26652), (9.0291,20.53684), (11.51417,32.3369), (13.07118,19.74911), (9.5049,-4.62897), (8.50611,8.26483), (6.47606,20.88451), (13.06526,-2.12982), (19.08658,25.61459), (9.49741,5.32091), (10.60865,-4.1196), (2.28996,7.57937), (8.12846,21.15847), (5.62241,6.46355), (4.07712,7.74846), (17.98526,19.62636), (9.466,28.34629), (11.38904,26.73919), (5.91826,20.40427), (1.52059,3.03378), (18.79161,10.2537), (18.20669,7.47745), (-1.67829,10.79184), (18.01586,3.91962), (16.31577,19.97973), (7.88281,18.87711), (8.46179,12.56157), (10.31113,11.46033), (14.88377,3.78661), (1.31835,-9.45748), (2.53176,12.06033), (9.48625,-0.74615), (3.97936,13.2815), (11.52319,24.78052), (13.24178,5.83337), (7.58739,17.4111), (10.00959,19.70331), (9.73361,11.78446), (8.35716,-1.366), (1.65491,1.37458), (11.11521,16.31483), (6.08355,32.63464), (10.04582,-3.79736), (11.58237,19.17984), (16.40249,-0.27705), (1.9691,-3.69456), (13.22776,28.38058), (2.67059,-1.36876), (9.83651,-25.63301), (2.12539,3.58644), (9.27114,-6.85667), (9.0699,13.42225), (2.78179,12.04671), (12.49311,28.99468), (12.97662,7.87662), (15.06359,2.61119), (16.91565,-3.56022), (5.92011,1.50022), (5.81304,14.55836), (8.46425,9.35831), (9.48705,16.9366), (4.68191,29.23126), (5.70028,15.31386), (-0.78798,13.46112), (10.03442,7.39667), (15.45433,11.15599), (9.43845,9.80499), (3.05825,22.64923), (6.92126,8.67693), (14.05905,18.67335), (19.71579,-3.19127), (15.0131,22.94716), (4.50386,17.86834), (1.31061,16.98267), (10.81197,15.91653), (14.32942,11.79718), (9.26469,18.50208), (7.27679,8.90755), (22.69295,10.44843), (12.03763,4.67433), (7.34876,6.82287), (16.60689,10.82228), (7.48786,-4.18631), (15.78602,20.3872), (17.21048,11.84735), (13.93482,21.25376), (9.69911,10.55032), (12.24315,12.19023), (10.58131,0.63369), (19.57006,7.92381), (9.8856,17.90933), (11.70302,15.30781), (7.89864,10.01877), (12.24831,0.88744), (16.93707,22.20967), (9.65467,-4.23117), (4.221,21.50819), (15.45229,11.27421), (12.83088,-16.23179), (7.58313,33.43085), (12.895,5.15093), (10.02471,1.34505), (13.36059,6.027), (5.07864,-10.43035), (9.72017,27.45998), (11.05809,19.24886), (15.28528,-4.44761), (13.99834,5.453), (19.26989,12.73758), (9.41846,11.2897), (11.65425,31.032), (8.49638,7.39168), (6.38592,11.95245), (-4.69837,26.279), (12.22061,-1.0255), (9.41331,10.36675), (13.2075,11.58439), (12.97005,27.8405), (11.44352,13.1707), (9.79805,31.39133), (6.93116,27.08301), (10.07691,-2.14368), (22.05892,4.08476), (7.80353,21.5573), (-2.17276,16.69822), (0.61509,7.69955), (8.35842,8.32793), (17.77108,6.49235), (14.70841,-7.3284), (1.27992,10.58264), (15.62699,-6.17006), (9.32914,34.55782), (15.41866,10.93221), (10.82009,44.24299), (3.29902,14.6224), (9.21998,-7.42798), (7.93845,15.52351), (10.33344,11.33982), (12.06399,10.46716), (5.5308,13.0986), (8.38727,-4.25988), (18.11104,9.55316), (8.86565,0.75489), (19.41825,25.99212), (9.52376,-0.81401), (3.94552,3.49551), (9.37587,22.99402), (15.44954,10.99628), (15.90527,23.70223), (13.18927,2.71482), (7.01646,22.82309), (9.06005,31.25686), (9.06431,4.86318), (5.76006,-1.06476), (9.18705,15.10298), (-3.48446,-0.61015), (15.89817,17.81246), (12.94719,-1.55788), (23.69426,18.09709), (17.47755,9.11271), (15.61528,9.94682), (0.54832,-7.33194), (14.32916,-4.67293), (9.55305,21.81717), (13.79891,7.16318), (0.82544,13.25649), (13.34875,13.88776), (9.07614,4.95793), (5.19621,17.65303), (2.1451,14.47382), (9.87726,13.19373), (8.45439,31.86093), (-1.41842,5.73161), (7.93598,10.96492), (11.23151,6.97951), (17.84458,1.75136), (7.02237,10.96144), (10.7842,15.08137), (4.42832,9.95311), (4.45044,7.07729), (1.50938,3.08148), (21.21651,22.37954), (6.2097,8.51951), (6.84354,2.88746), (18.53804,26.73509), (12.01072,-2.88939), (4.8345,-2.82367), (20.41587,-0.35783), (14.48353,14.22076), (8.71116,11.50295), (12.42818,7.10171), (14.89244,8.28488), (8.03033,0.54178), (5.25917,13.8022), (2.30092,15.62157), (10.22504,10.79173), (15.37573,28.18946), (7.13666,30.43524), (4.45018,2.54914), (10.18405,9.89421), (3.91025,13.08631), (14.52304,4.68761), (13.14771,5.61516), (11.99219,22.88072), (9.21345,7.4735), (8.85106,11.27382), (12.91887,2.39559), (15.62308,-3.31889), (11.88034,9.61957), (15.12097,23.01381), (11.58168,-1.23467), (16.83051,9.07691), (5.25405,15.78056), (2.19976,12.28421), (4.56716,9.44888), (16.46053,13.16928), (5.61995,4.33357), (8.67704,2.21737), (5.62789,33.17833), (9.84815,13.25407), (13.05834,-2.47961), (11.74205,6.41401), (3.88393,18.8439), (16.15321,-4.63375), (4.83925,-8.2909), (13.00334,12.18221), (4.4028,-2.95356), (4.35794,19.61659), (4.47478,12.45056), (2.38713,-4.17198), (4.25235,21.9641), (10.87509,11.96416), (9.82411,12.74573), (13.61518,10.47873), (10.25507,12.73295), (4.0335,11.31373), (10.69881,9.9827), (5.70321,5.87138), (6.96244,4.24372), (9.35874,-23.72256), (6.28076,28.41337), (8.29015,4.88103), (6.88653,3.61902), (7.70687,8.93586), (8.2001,16.40759), (6.73415,27.84494), (3.82052,5.6001), (3.94469,14.51379), (15.82384,13.5576), (2.54004,12.92213), (10.74876,3.90686), (12.60517,17.07104), (17.7024,15.84268), (4.6722,17.38777), (13.67341,16.54766), (6.4565,5.94487), (12.95699,17.02804), (4.56912,7.66386), (5.58464,10.43088), (4.0638,6.16059), (13.05559,20.46178), (5.38269,20.02888), (0.16354,20.95949), (7.23962,6.50808), (7.38577,7.22366), (8.50951,8.06659), (13.72574,16.08241), (17.80421,13.83514), (3.01135,-0.33454), (8.02608,12.98848), (14.23847,12.99024); -SELECT -0.5028215369186904, 0.6152361677168877; +SELECT '-0.5028215369186904', '0.6152361677168877'; SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; @@ -32,6 +32,6 @@ DROP TABLE IF EXISTS welch_ttest; Second: a=1, sigma = 12, size = 500 */ CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (4.82025,-2.69857), (6.13896,15.80943), (15.20277,7.31555), (14.15351,3.96517), (7.21338,4.77809), (8.55506,9.6472), (13.80816,-26.41717), (11.28411,-10.85635), (7.4612,-1.4376), (7.43759,-0.96308), (12.9832,2.84315), (-5.74783,5.79467), (12.47114,-3.06091), (15.14223,-14.62902), (3.40603,22.08022), (9.27323,-2.11982), (7.88547,-4.84824), (8.56456,-10.50447), (4.59731,2.4891), (7.91213,9.90324), (7.33894,-22.66866), (21.74811,-0.97103), (11.92111,-16.57608), (0.18828,-3.78749), (10.47314,25.84511), (20.37396,5.30797), (11.04991,-18.19466), (13.30083,11.72708), (14.28065,0.2891), (2.86942,-9.83474), (24.96072,6.69942), (14.20164,18.09604), (18.28769,18.52651), (10.50949,1.38201), (9.22273,7.64615), (11.77608,17.66598), (8.56872,-2.44141), (13.74535,-9.01598), (11.65209,27.69142), (12.51894,4.06946), (17.76256,-15.0077), (13.52122,-10.49648), (8.70796,-4.88322), (6.04749,-25.09805), (16.33064,-4.64024), (8.35636,20.94434), (14.03496,24.12126), (11.05834,-14.10962), (14.49261,10.6512), (2.59383,14.50687), (8.01022,-19.88081), (4.05458,-11.55271), (13.26384,13.16921), (14.62058,16.63864), (10.52489,-24.08114), (8.46357,-9.09949), (6.4147,-10.54702), (9.70071,0.20813), (12.47581,8.19066), (4.38333,-2.70523), (17.54172,-0.23954), (10.12109,7.19398), (7.73186,-7.1618), (14.0279,-7.44322), (11.6621,-17.92031), (17.47045,-1.58146), (15.50223,9.18338), (15.46034,3.25838), (13.39964,-14.30234), (14.98025,1.84695), (15.87912,31.13794), (17.67374,-0.85067), (9.64073,19.02787), (12.84904,-3.09594), (7.70278,13.45584), (13.03156,-5.48104), (9.04512,-22.74928), (15.97014,-8.03697), (8.96389,17.31143), (11.48009,-16.65231), (9.71153,-18.58713), (13.00084,-16.52641), (12.39803,14.95261), (13.08188,12.56762), (5.82244,15.00188), (10.81871,1.85858), (8.2539,2.1926), (7.52114,-2.4095), (9.11488,21.56873), (8.37482,3.35509), (14.48652,-4.98672), (11.42152,35.08603), (16.03111,-10.01602), (13.14057,-3.85153), (-2.26351,-6.81974), (15.50394,19.56525), (14.88603,-9.35488), (13.37257,0.24268), (11.84026,-3.51488), (7.66558,-0.37066), (6.24584,24.20888), (3.6312,-11.73537), (2.7018,0.01282), (5.63656,0.03963), (5.82643,-9.65589), (10.06745,-0.37429), (-0.5831,5.61255), (14.84202,0.49984), (9.5524,-10.15066), (19.71713,-14.54314), (14.23109,16.56889), (8.69105,-7.73873), (5.33742,-3.76422), (7.30372,1.40722), (7.93342,2.28818), (15.20884,-13.12643), (7.53839,5.17082), (13.45311,4.79089), (11.04473,-17.42643), (10.76673,8.72548), (15.44145,-3.70285), (14.06596,16.77893), (9.14873,13.382), (12.88372,19.98418), (8.74994,0.00483), (10.53263,-4.75951), (16.16694,2.35391), (8.37197,21.65809), (3.43739,-9.2714), (4.72799,-18.38253), (9.08802,7.23097), (11.2531,14.97927), (5.16115,-4.02197), (10.20895,-29.8189), (18.70884,-12.8554), (15.88924,-7.60124), (3.38758,-14.90158), (6.46449,-3.31486), (10.21088,31.38144), (14.08458,-8.61288), (15.74508,15.31895), (19.31896,-10.19488), (13.19641,13.796), (11.95409,-0.32912), (10.70718,-0.0684), (1.05245,-30.06834), (10.04772,24.93912), (17.01369,-3.26506), (10.2286,-8.29751), (19.58323,-5.39189), (7.02892,-25.08603), (4.16866,-1.45318), (8.94326,16.72724), (4.99854,-3.38467), (8.88352,-26.00478), (18.65422,7.28369), (17.32328,16.96226), (9.33492,16.5858), (14.94788,10.46583), (8.05863,3.84345), (14.6737,-2.99382), (10.93801,1.42078), (0.54036,-11.0123), (-0.34242,2.09909), (5.89076,1.21064), (3.15189,15.36079), (1.94421,-21.61349), (6.38698,22.7726), (10.50654,10.50512), (8.95362,-6.95825), (6.23711,9.20036), (11.75359,15.66902), (12.42155,3.28098), (-1.55472,-9.05692), (4.6688,0.32882), (10.48087,-1.64934), (11.74615,-4.81406), (9.26822,-5.06006), (7.55517,19.97493), (12.76005,2.88646), (16.47102,-0.34552), (11.31297,7.55186), (14.37437,-22.96115), (2.38799,31.29166), (6.44577,6.18798), (5.07471,-2.52715), (11.55123,-11.58799), (7.76795,14.13596), (10.60116,13.45069), (14.40885,12.15179), (11.58158,3.44491), (8.81648,-8.78006), (12.92299,18.32087), (11.26939,11.91757), (17.95014,-2.00179), (2.95002,10.88411), (17.41959,9.09327), (11.12455,6.62484), (8.78541,8.87178), (14.36413,11.52254), (12.98554,-14.15988), (12.58505,-17.19515), (15.49789,14.03089), (11.70999,-2.4095), (0.65596,-16.83575), (11.08202,2.71469), (14.75752,4.84351), (6.84385,-1.17651), (9.27245,-3.37529), (13.78243,-19.92137), (17.4863,4.48952), (4.01777,-12.4906), (11.82861,-5.65277), (13.86551,8.50819), (6.16591,-19.61261), (8.71589,12.54156), (16.77195,11.06784), (17.23243,-12.59285), (-2.12941,3.43683), (5.66629,-3.00325), (12.45153,12.49082), (1.63971,7.20955), (13.84031,17.6547), (4.6144,15.8619), (5.26169,24.3048), (9.27769,-8.05434), (9.14288,-6.06901), (9.71953,-15.69515), (9.38446,-11.13917), (1.64788,-3.90757), (11.72922,-2.57038), (13.68926,5.14065), (9.42952,17.8497), (12.05574,-8.64665), (9.09148,-18.68331), (5.32273,5.8567), (20.25258,-20.93884), (10.14599,4.40583), (10.82156,14.35985), (5.75736,4.18134), (7.13567,4.3635), (9.29746,9.35428), (5.1618,2.8908), (10.076,16.01017), (21.65669,-1.48499), (13.35486,-9.97949), (6.79957,1.03055), (8.76243,-2.79697), (14.59294,6.85977), (16.90609,4.73213), (10.50337,2.7815), (-0.07923,-2.46866), (13.51648,18.39425), (12.0676,-0.80378), (0.86482,-0.22982), (9.03563,-16.11608), (5.38751,3.0862), (17.16866,3.20779), (2.78702,10.50146), (11.15548,-0.21305), (12.30843,11.21012), (8.04897,-0.99825), (9.95814,18.39633), (11.29308,-3.39003), (14.13032,-0.64411), (21.05877,-1.39932), (3.57386,15.45319), (7.96631,-0.66044), (3.30484,-15.2223), (18.61856,-34.39907), (16.35184,-3.57836), (7.65236,16.82828), (18.02895,1.66624), (9.79458,15.43475), (16.7274,8.17776), (8.84453,5.50486), (13.05709,10.43082), (10.91447,-6.63332), (8.40171,2.28008), (16.95211,16.37203), (11.82194,5.16313), (19.87978,-8.85281), (12.88455,13.26692), (-0.00947,-7.46842), (12.28109,8.43091), (6.96462,-13.18172), (13.75282,-0.72401), (14.39141,22.3881), (11.07193,10.65448), (12.88039,2.81289), (11.38253,10.92405), (21.02707,-8.95358), (7.51955,19.80653), (6.31984,-12.86527), (15.6543,5.38826), (14.80315,-6.83501), (8.38024,-15.7647), (21.7516,-27.67412), (14.31336,8.6499), (15.04703,-4.89542), (5.73787,16.76167), (13.16911,12.84284), (12.40695,-17.27324), (9.88968,-4.18726), (8.46703,-14.62366), (8.70637,-5.49863), (8.03551,-16.22846), (5.9757,10.60329), (12.22951,6.46781), (3.14736,1.70458), (10.51266,10.77448), (18.593,0.8463), (10.82213,13.0482), (7.14216,-4.36264), (6.81154,3.22647), (-0.6486,2.38828), (20.56136,6.7946), (11.35367,-0.25254), (11.38205,1.2497), (17.14,1.6544), (14.91215,4.1019), (15.50207,11.27839), (5.93162,-5.04127), (3.74869,18.11674), (14.11532,0.51231), (7.38954,-0.51029), (5.45764,13.52556), (18.33733,16.10171), (9.91923,5.68197), (2.38991,-2.85904), (14.16756,-8.89167), (2.39791,6.24489), (6.92586,10.85319), (5.32474,-0.39816), (2.28812,3.87079), (5.71718,-3.1867), (5.84197,1.55322), (2.76206,16.86779), (19.05928,-14.60321), (11.51788,-1.81952), (6.56648,-3.11624), (3.35735,1.24193), (7.55948,10.18179), (19.99908,4.69796), (13.00634,0.69032), (18.36886,11.7723), (11.14675,7.62896), (16.72931,9.89741), (12.50106,9.11484), (6.00605,-3.84676), (23.06653,-0.4777), (5.39694,0.95958), (9.53167,-7.95056), (12.76944,-10.97474), (7.20604,-6.54861), (13.25391,34.74933), (13.7341,27.39463), (10.85292,4.18299), (-7.75835,6.02476), (10.29728,-1.99397), (13.70099,1.26478), (10.17959,23.37106), (9.98399,10.49682), (12.69389,-11.04354), (-0.28848,-12.22284), (-2.18319,-9.87635), (13.36378,28.90511), (10.09232,6.77613), (5.49489,0.55352), (5.46156,0.37031), (0.94225,7.1418), (12.79205,3.24897), (10.09593,-1.60918), (6.06218,3.1675), (0.89463,-17.97072), (11.88986,-5.61743), (10.79733,14.1422), (1.51371,14.87695), (2.20967,-4.65961), (15.45732,-0.99174), (16.5262,-2.96623), (5.99724,-9.02263), (8.3613,-17.2088), (15.68183,2.78608), (15.32117,6.74239), (14.15674,4.8524), (6.64553,7.46731), (4.20777,1.04894), (-0.10521,-12.8023), (-0.88169,-17.18188), (1.85913,-5.08801), (9.73673,22.13942), (0.30926,-0.36384), (6.17559,17.80564), (11.76602,7.67504), (5.68385,1.59779), (14.57088,4.10942), (12.81509,0.61074), (9.85682,-14.40767), (12.06376,10.59906), (6.08874,16.57017), (11.63921,-15.17526), (14.86722,-6.98549), (10.41035,-0.64548), (2.93794,3.23756), (12.21841,14.65504), (0.23804,4.583), (3.14845,12.72378), (7.29748,5.26547), (3.06134,0.81781), (13.77684,9.38273), (16.21992,10.37636), (5.33511,10.70325), (9.68959,-0.83043), (9.44169,-7.53149), (18.08012,-9.09147), (4.04224,-19.51381), (8.77918,-28.44508), (10.18324,6.44392), (9.38914,11.10201), (11.76995,-2.86184), (14.19963,8.30673), (6.88817,8.8797), (16.56123,10.68053), (15.39885,15.62919), (5.21241,8.00579), (4.44408,6.4651), (17.87587,-4.50029), (12.53337,18.04514), (13.60916,11.12996), (6.60104,-5.14007), (7.35453,9.43857), (18.61572,3.13476), (6.10437,4.9772), (13.08682,-17.45782), (12.15404,0.05552), (4.90789,-1.90283), (2.13353,2.67908), (12.49593,-2.62243), (11.93056,-3.22767), (13.29408,-8.70222), (5.70038,-23.11605), (8.40271,21.6757), (5.19456,12.70076), (-5.51028,4.4322), (14.0329,11.69344), (10.38365,9.18052), (6.56812,-2.2549), (4.21129,-2.15615), (9.7157,20.29765), (9.88553,-0.29536), (13.45346,15.50109), (4.97752,8.79187), (12.77595,5.11533), (8.56465,-20.44436), (4.27703,-3.00909), (18.12502,-4.48291), (12.45735,21.84462), (12.42912,1.94225), (12.08125,-2.81908), (10.85779,17.19418), (4.36013,-9.33528), (11.85062,-0.17346), (8.47776,0.03958), (9.60822,-35.17786), (11.3069,8.36887), (14.25525,-9.02292), (1.55168,-10.98804), (14.57782,0.29335), (7.84786,4.29634), (9.87774,3.87718), (14.75575,-9.08532), (3.68774,7.13922), (9.37667,-7.62463), (20.28676,-10.5666), (12.10027,4.68165), (8.01819,-3.30172), (18.78158,13.04852), (20.85402,13.45616), (18.98069,2.41043), (16.1429,-0.36501), (9.24047,-15.67383), (14.12487,17.92217), (10.18841,8.42106), (-3.04478,3.22063), (5.7552,-7.31753), (9.30376,21.99596), (11.42837,-36.8273), (6.02364,-20.46391), (8.86984,5.74179), (10.91177,-15.83178), (10.04418,14.90454), (18.10774,-8.84645), (7.49384,3.72036), (9.11556,4.6877), (9.7051,16.35418), (5.23268,3.15441), (9.04647,2.39907), (8.81547,-17.58664), (2.65098,-13.18269); -SELECT 14.971190998235835, 5.898143508382202e-44; +SELECT '14.971190998235835', '5.898143508382202e-44'; SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; From f9204135d8212640803fc7fdd5ce69e8902fd176 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 16:54:02 +0300 Subject: [PATCH 092/432] add lgammal.c --- base/glibc-compatibility/musl/lgammal.c | 340 ++++++++++++++++++++++++ 1 file changed, 340 insertions(+) create mode 100644 base/glibc-compatibility/musl/lgammal.c diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c new file mode 100644 index 00000000000..cc4a5e3b54d --- /dev/null +++ b/base/glibc-compatibility/musl/lgammal.c @@ -0,0 +1,340 @@ +/* origin: OpenBSD /usr/src/lib/libm/src/ld80/e_lgammal.c */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ +/* + * Copyright (c) 2008 Stephen L. Moshier + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* lgammal(x) + * Reentrant version of the logarithm of the Gamma function + * with user provide pointer for the sign of Gamma(x). + * + * Method: + * 1. Argument Reduction for 0 < x <= 8 + * Since gamma(1+s)=s*gamma(s), for x in [0,8], we may + * reduce x to a number in [1.5,2.5] by + * lgamma(1+s) = log(s) + lgamma(s) + * for example, + * lgamma(7.3) = log(6.3) + lgamma(6.3) + * = log(6.3*5.3) + lgamma(5.3) + * = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3) + * 2. Polynomial approximation of lgamma around its + * minimun ymin=1.461632144968362245 to maintain monotonicity. + * On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use + * Let z = x-ymin; + * lgamma(x) = -1.214862905358496078218 + z^2*poly(z) + * 2. Rational approximation in the primary interval [2,3] + * We use the following approximation: + * s = x-2.0; + * lgamma(x) = 0.5*s + s*P(s)/Q(s) + * Our algorithms are based on the following observation + * + * zeta(2)-1 2 zeta(3)-1 3 + * lgamma(2+s) = s*(1-Euler) + --------- * s - --------- * s + ... + * 2 3 + * + * where Euler = 0.5771... is the Euler constant, which is very + * close to 0.5. + * + * 3. For x>=8, we have + * lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+.... + * (better formula: + * lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...) + * Let z = 1/x, then we approximation + * f(z) = lgamma(x) - (x-0.5)(log(x)-1) + * by + * 3 5 11 + * w = w0 + w1*z + w2*z + w3*z + ... + w6*z + * + * 4. For negative x, since (G is gamma function) + * -x*G(-x)*G(x) = pi/sin(pi*x), + * we have + * G(x) = pi/(sin(pi*x)*(-x)*G(-x)) + * since G(-x) is positive, sign(G(x)) = sign(sin(pi*x)) for x<0 + * Hence, for x<0, signgam = sign(sin(pi*x)) and + * lgamma(x) = log(|Gamma(x)|) + * = log(pi/(|x*sin(pi*x)|)) - lgamma(-x); + * Note: one should avoid compute pi*(-x) directly in the + * computation of sin(pi*(-x)). + * + * 5. Special Cases + * lgamma(2+s) ~ s*(1-Euler) for tiny s + * lgamma(1)=lgamma(2)=0 + * lgamma(x) ~ -log(x) for tiny x + * lgamma(0) = lgamma(inf) = inf + * lgamma(-integer) = +-inf + * + */ + +static const long double pi = 3.14159265358979323846264L, + +/* lgam(1+x) = 0.5 x + x a(x)/b(x) + -0.268402099609375 <= x <= 0 + peak relative error 6.6e-22 */ +a0 = -6.343246574721079391729402781192128239938E2L, +a1 = 1.856560238672465796768677717168371401378E3L, +a2 = 2.404733102163746263689288466865843408429E3L, +a3 = 8.804188795790383497379532868917517596322E2L, +a4 = 1.135361354097447729740103745999661157426E2L, +a5 = 3.766956539107615557608581581190400021285E0L, + +b0 = 8.214973713960928795704317259806842490498E3L, +b1 = 1.026343508841367384879065363925870888012E4L, +b2 = 4.553337477045763320522762343132210919277E3L, +b3 = 8.506975785032585797446253359230031874803E2L, +b4 = 6.042447899703295436820744186992189445813E1L, +/* b5 = 1.000000000000000000000000000000000000000E0 */ + + +tc = 1.4616321449683623412626595423257213284682E0L, +tf = -1.2148629053584961146050602565082954242826E-1, /* double precision */ +/* tt = (tail of tf), i.e. tf + tt has extended precision. */ +tt = 3.3649914684731379602768989080467587736363E-18L, +/* lgam ( 1.4616321449683623412626595423257213284682E0 ) = +-1.2148629053584960809551455717769158215135617312999903886372437313313530E-1 */ + +/* lgam (x + tc) = tf + tt + x g(x)/h(x) + -0.230003726999612341262659542325721328468 <= x + <= 0.2699962730003876587373404576742786715318 + peak relative error 2.1e-21 */ +g0 = 3.645529916721223331888305293534095553827E-18L, +g1 = 5.126654642791082497002594216163574795690E3L, +g2 = 8.828603575854624811911631336122070070327E3L, +g3 = 5.464186426932117031234820886525701595203E3L, +g4 = 1.455427403530884193180776558102868592293E3L, +g5 = 1.541735456969245924860307497029155838446E2L, +g6 = 4.335498275274822298341872707453445815118E0L, + +h0 = 1.059584930106085509696730443974495979641E4L, +h1 = 2.147921653490043010629481226937850618860E4L, +h2 = 1.643014770044524804175197151958100656728E4L, +h3 = 5.869021995186925517228323497501767586078E3L, +h4 = 9.764244777714344488787381271643502742293E2L, +h5 = 6.442485441570592541741092969581997002349E1L, +/* h6 = 1.000000000000000000000000000000000000000E0 */ + + +/* lgam (x+1) = -0.5 x + x u(x)/v(x) + -0.100006103515625 <= x <= 0.231639862060546875 + peak relative error 1.3e-21 */ +u0 = -8.886217500092090678492242071879342025627E1L, +u1 = 6.840109978129177639438792958320783599310E2L, +u2 = 2.042626104514127267855588786511809932433E3L, +u3 = 1.911723903442667422201651063009856064275E3L, +u4 = 7.447065275665887457628865263491667767695E2L, +u5 = 1.132256494121790736268471016493103952637E2L, +u6 = 4.484398885516614191003094714505960972894E0L, + +v0 = 1.150830924194461522996462401210374632929E3L, +v1 = 3.399692260848747447377972081399737098610E3L, +v2 = 3.786631705644460255229513563657226008015E3L, +v3 = 1.966450123004478374557778781564114347876E3L, +v4 = 4.741359068914069299837355438370682773122E2L, +v5 = 4.508989649747184050907206782117647852364E1L, +/* v6 = 1.000000000000000000000000000000000000000E0 */ + + +/* lgam (x+2) = .5 x + x s(x)/r(x) + 0 <= x <= 1 + peak relative error 7.2e-22 */ +s0 = 1.454726263410661942989109455292824853344E6L, +s1 = -3.901428390086348447890408306153378922752E6L, +s2 = -6.573568698209374121847873064292963089438E6L, +s3 = -3.319055881485044417245964508099095984643E6L, +s4 = -7.094891568758439227560184618114707107977E5L, +s5 = -6.263426646464505837422314539808112478303E4L, +s6 = -1.684926520999477529949915657519454051529E3L, + +r0 = -1.883978160734303518163008696712983134698E7L, +r1 = -2.815206082812062064902202753264922306830E7L, +r2 = -1.600245495251915899081846093343626358398E7L, +r3 = -4.310526301881305003489257052083370058799E6L, +r4 = -5.563807682263923279438235987186184968542E5L, +r5 = -3.027734654434169996032905158145259713083E4L, +r6 = -4.501995652861105629217250715790764371267E2L, +/* r6 = 1.000000000000000000000000000000000000000E0 */ + + +/* lgam(x) = ( x - 0.5 ) * log(x) - x + LS2PI + 1/x w(1/x^2) + x >= 8 + Peak relative error 1.51e-21 +w0 = LS2PI - 0.5 */ +w0 = 4.189385332046727417803e-1L, +w1 = 8.333333333333331447505E-2L, +w2 = -2.777777777750349603440E-3L, +w3 = 7.936507795855070755671E-4L, +w4 = -5.952345851765688514613E-4L, +w5 = 8.412723297322498080632E-4L, +w6 = -1.880801938119376907179E-3L, +w7 = 4.885026142432270781165E-3L; + +/* sin(pi*x) assuming x > 2^-1000, if sin(pi*x)==0 the sign is arbitrary */ +static long double sin_pi(long double x) +{ + int n; + + /* spurious inexact if odd int */ + x *= 0.5; + x = 2.0*(x - floorl(x)); /* x mod 2.0 */ + + n = (int)(x*4.0); + n = (n+1)/2; + x -= n*0.5f; + x *= pi; + + switch (n) { + default: /* case 4: */ + case 0: return __sinl(x, 0.0, 0); + case 1: return __cosl(x, 0.0); + case 2: return __sinl(-x, 0.0, 0); + case 3: return -__cosl(x, 0.0); + } +} + +#include +#include + +long double __lgammal_r(long double x, int *sg) { + long double t, y, z, nadj, p, p1, p2, q, r, w; + union ldshape u = {x}; + uint32_t ix = (u.i.se & 0x7fffU)<<16 | u.i.m>>48; + int sign = u.i.se >> 15; + int i; + + *sg = 1; + + /* purge off +-inf, NaN, +-0, tiny and negative arguments */ + if (ix >= 0x7fff0000) + return x * x; + if (ix < 0x3fc08000) { /* |x|<2**-63, return -log(|x|) */ + if (sign) { + *sg = -1; + x = -x; + } + return -logl(x); + } + if (sign) { + x = -x; + t = sin_pi(x); + if (t == 0.0) + return 1.0 / (x-x); /* -integer */ + if (t > 0.0) + *sg = -1; + else + t = -t; + nadj = logl(pi / (t * x)); + } + + /* purge off 1 and 2 (so the sign is ok with downward rounding) */ + if ((ix == 0x3fff8000 || ix == 0x40008000) && u.i.m == 0) { + r = 0; + } else if (ix < 0x40008000) { /* x < 2.0 */ + if (ix <= 0x3ffee666) { /* 8.99993896484375e-1 */ + /* lgamma(x) = lgamma(x+1) - log(x) */ + r = -logl(x); + if (ix >= 0x3ffebb4a) { /* 7.31597900390625e-1 */ + y = x - 1.0; + i = 0; + } else if (ix >= 0x3ffced33) { /* 2.31639862060546875e-1 */ + y = x - (tc - 1.0); + i = 1; + } else { /* x < 0.23 */ + y = x; + i = 2; + } + } else { + r = 0.0; + if (ix >= 0x3fffdda6) { /* 1.73162841796875 */ + /* [1.7316,2] */ + y = x - 2.0; + i = 0; + } else if (ix >= 0x3fff9da6) { /* 1.23162841796875 */ + /* [1.23,1.73] */ + y = x - tc; + i = 1; + } else { + /* [0.9, 1.23] */ + y = x - 1.0; + i = 2; + } + } + switch (i) { + case 0: + p1 = a0 + y * (a1 + y * (a2 + y * (a3 + y * (a4 + y * a5)))); + p2 = b0 + y * (b1 + y * (b2 + y * (b3 + y * (b4 + y)))); + r += 0.5 * y + y * p1/p2; + break; + case 1: + p1 = g0 + y * (g1 + y * (g2 + y * (g3 + y * (g4 + y * (g5 + y * g6))))); + p2 = h0 + y * (h1 + y * (h2 + y * (h3 + y * (h4 + y * (h5 + y))))); + p = tt + y * p1/p2; + r += (tf + p); + break; + case 2: + p1 = y * (u0 + y * (u1 + y * (u2 + y * (u3 + y * (u4 + y * (u5 + y * u6)))))); + p2 = v0 + y * (v1 + y * (v2 + y * (v3 + y * (v4 + y * (v5 + y))))); + r += (-0.5 * y + p1 / p2); + } + } else if (ix < 0x40028000) { /* 8.0 */ + /* x < 8.0 */ + i = (int)x; + y = x - (double)i; + p = y * (s0 + y * (s1 + y * (s2 + y * (s3 + y * (s4 + y * (s5 + y * s6)))))); + q = r0 + y * (r1 + y * (r2 + y * (r3 + y * (r4 + y * (r5 + y * (r6 + y)))))); + r = 0.5 * y + p / q; + z = 1.0; + /* lgamma(1+s) = log(s) + lgamma(s) */ + switch (i) { + case 7: + z *= (y + 6.0); /* FALLTHRU */ + case 6: + z *= (y + 5.0); /* FALLTHRU */ + case 5: + z *= (y + 4.0); /* FALLTHRU */ + case 4: + z *= (y + 3.0); /* FALLTHRU */ + case 3: + z *= (y + 2.0); /* FALLTHRU */ + r += logl(z); + break; + } + } else if (ix < 0x40418000) { /* 2^66 */ + /* 8.0 <= x < 2**66 */ + t = logl(x); + z = 1.0 / x; + y = z * z; + w = w0 + z * (w1 + y * (w2 + y * (w3 + y * (w4 + y * (w5 + y * (w6 + y * w7)))))); + r = (x - 0.5) * (t - 1.0) + w; + } else /* 2**66 <= x <= inf */ + r = x * (logl(x) - 1.0); + if (sign) + r = nadj - r; + return r; +} + +int signgam; + +long double lgammal(long double x) +{ + return lgammal_r(x, &signgam); +} + From 252b9b2ec1212ca4569f56b2012d69cc83c8606b Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 16:54:55 +0300 Subject: [PATCH 093/432] better --- base/glibc-compatibility/musl/lgammal.c | 26 ++----------------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c index cc4a5e3b54d..1e720163a5d 100644 --- a/base/glibc-compatibility/musl/lgammal.c +++ b/base/glibc-compatibility/musl/lgammal.c @@ -187,31 +187,9 @@ w5 = 8.412723297322498080632E-4L, w6 = -1.880801938119376907179E-3L, w7 = 4.885026142432270781165E-3L; -/* sin(pi*x) assuming x > 2^-1000, if sin(pi*x)==0 the sign is arbitrary */ -static long double sin_pi(long double x) -{ - int n; - - /* spurious inexact if odd int */ - x *= 0.5; - x = 2.0*(x - floorl(x)); /* x mod 2.0 */ - - n = (int)(x*4.0); - n = (n+1)/2; - x -= n*0.5f; - x *= pi; - - switch (n) { - default: /* case 4: */ - case 0: return __sinl(x, 0.0, 0); - case 1: return __cosl(x, 0.0); - case 2: return __sinl(-x, 0.0, 0); - case 3: return -__cosl(x, 0.0); - } -} - #include #include +#include "libm.h" long double __lgammal_r(long double x, int *sg) { long double t, y, z, nadj, p, p1, p2, q, r, w; @@ -234,7 +212,7 @@ long double __lgammal_r(long double x, int *sg) { } if (sign) { x = -x; - t = sin_pi(x); + t = sin(pi * x); if (t == 0.0) return 1.0 / (x-x); /* -integer */ if (t > 0.0) From 72fddba2b1153598d9cc61c82b1ce49fbafde688 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 14 Oct 2020 17:08:51 +0300 Subject: [PATCH 094/432] more fasttest fixes --- docker/test/fasttest/run.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index a0830ba5f12..d77d7233893 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -33,6 +33,12 @@ server_pid=none function stop_server { + if kill -0 -- "$server_pid" + then + echo "ClickHouse server pid '$server_pid' is not running" + return 0 + fi + for _ in {1..60} do if ! pkill -f "clickhouse-server" && ! kill -- "$server_pid" ; then break ; fi @@ -167,8 +173,7 @@ clickhouse-client --version clickhouse-test --help mkdir -p "$FASTTEST_DATA"{,/client-config} -cp -a "$FASTTEST_SOURCE/programs/server/"{config,users}.xml "$FASTTEST_DATA" -cp -a "$FASTTEST_SOURCE/programs/server/"{config,users}.xml "$FASTTEST_DATA" +cp -a "$FASTTEST_SOURCE/programs/server/"{config,users}.{xml,d} "$FASTTEST_DATA" "$FASTTEST_SOURCE/tests/config/install.sh" "$FASTTEST_DATA" "$FASTTEST_DATA/client-config" # doesn't support SSL rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" From eb8d8f6ba40f8968152b7e0a5a451e4978c85e45 Mon Sep 17 00:00:00 2001 From: feng lv Date: Wed, 14 Oct 2020 23:02:51 +0800 Subject: [PATCH 095/432] interval op support string literal --- src/Parsers/ExpressionListParsers.cpp | 42 +++++++++++++++++++++++++-- src/Parsers/ExpressionListParsers.h | 2 ++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 26affe020b1..ab1f7abf4a1 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -645,6 +645,14 @@ bool ParserTimestampOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expe return true; } +bool ParserIntervalOperatorExpression::stringToIntervalKind(const String & literal, ASTPtr & number, IntervalKind & interval_kind) +{ + Tokens tokens(literal.data(), literal.data() + literal.size()); + Pos pos(tokens, 0); + Expected expected; + return (ParserNumber().parse(pos, number, expected) && parseIntervalKind(pos, expected, interval_kind)); +} + bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { auto begin = pos; @@ -653,12 +661,41 @@ bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expec if (!ParserKeyword("INTERVAL").ignore(pos, expected)) return next_parser.parse(pos, node, expected); + ASTPtr string_literal; + if (ParserStringLiteral().parse(pos, string_literal, expected)) + { + String literal; + if (string_literal->as().value.tryGet(literal)) + { + IntervalKind interval_kind; + ASTPtr number; + + if (!stringToIntervalKind(literal, number, interval_kind)) + return false; + auto function = std::make_shared(); + + /// function arguments + auto exp_list = std::make_shared(); + + /// the first argument of the function is the previous element, the second is the next one + function->name = interval_kind.toNameOfFunctionToIntervalDataType(); + function->arguments = exp_list; + function->children.push_back(exp_list); + + exp_list->children.push_back(number); + + node = function; + return true; + } + } + ASTPtr expr; + /// Any expression can be inside, because operator surrounds it. if (!ParserExpressionWithOptionalAlias(false).parse(pos, expr, expected)) { - pos = begin; - return next_parser.parse(pos, node, expected); + pos = begin; + return next_parser.parse(pos, node, expected); } IntervalKind interval_kind; @@ -729,3 +766,4 @@ bool ParserKeyValuePairsList::parseImpl(Pos & pos, ASTPtr & node, Expected & exp } } + diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 93a47648a0b..72961f700fd 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -5,6 +5,7 @@ #include #include +#include namespace DB { @@ -232,6 +233,7 @@ protected: const char * getName() const override { return "INTERVAL operator expression"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; + bool stringToIntervalKind(const String & literal, ASTPtr & number, IntervalKind & interval_kind); }; class ParserAdditiveExpression : public IParserBase From 7b69592e496b2ad8cc5ac75dc408e0030c88f8ed Mon Sep 17 00:00:00 2001 From: feng lv Date: Wed, 14 Oct 2020 23:08:47 +0800 Subject: [PATCH 096/432] add test fix fix --- src/Parsers/ExpressionListParsers.cpp | 9 ++++----- ...23_interval_operator_support_string_literal.reference | 6 ++++++ .../01523_interval_operator_support_string_literal.sql | 6 ++++++ 3 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference create mode 100644 tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index ab1f7abf4a1..4f4b97eff2d 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -670,14 +670,14 @@ bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expec IntervalKind interval_kind; ASTPtr number; + /// parse function arguments and interval kind from string literal if (!stringToIntervalKind(literal, number, interval_kind)) return false; + auto function = std::make_shared(); - /// function arguments auto exp_list = std::make_shared(); - /// the first argument of the function is the previous element, the second is the next one function->name = interval_kind.toNameOfFunctionToIntervalDataType(); function->arguments = exp_list; function->children.push_back(exp_list); @@ -690,12 +690,11 @@ bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expec } ASTPtr expr; - /// Any expression can be inside, because operator surrounds it. if (!ParserExpressionWithOptionalAlias(false).parse(pos, expr, expected)) { - pos = begin; - return next_parser.parse(pos, node, expected); + pos = begin; + return next_parser.parse(pos, node, expected); } IntervalKind interval_kind; diff --git a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference new file mode 100644 index 00000000000..5ee4e7592f6 --- /dev/null +++ b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference @@ -0,0 +1,6 @@ +2 +2 +2 +2 +2 +2 diff --git a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql new file mode 100644 index 00000000000..ce418e13e9f --- /dev/null +++ b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql @@ -0,0 +1,6 @@ +SELECT INTERVAL 2 day; +SELECT INTERVAL '2 day'; +SELECT INTERVAL 2 hour; +SELECT INTERVAL '2 hour'; +SELECT INTERVAL 2 minute; +SELECT INTERVAL '2 minute'; From ead6d5992c32921da59947ff2a7689ddcc9ca7d1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 14 Oct 2020 18:53:14 +0300 Subject: [PATCH 097/432] fixup --- docker/test/fasttest/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index d77d7233893..299b748e4eb 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -33,7 +33,7 @@ server_pid=none function stop_server { - if kill -0 -- "$server_pid" + if ! kill -0 -- "$server_pid" then echo "ClickHouse server pid '$server_pid' is not running" return 0 @@ -274,7 +274,7 @@ then stop_server ||: # Clean the data so that there is no interference from the previous test run. - rm -rf "$FASTTEST_DATA"/{meta,}data ||: + rm -rf "$FASTTEST_DATA"/{{meta,}data,user_files} ||: start_server From f8852fcced01b368dcc6984e610d976494a61fe7 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Wed, 14 Oct 2020 20:01:03 +0300 Subject: [PATCH 098/432] Update 01150_ddl_guard_rwr.sh --- tests/queries/0_stateless/01150_ddl_guard_rwr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01150_ddl_guard_rwr.sh b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh index c14e4c38f54..43804075938 100755 --- a/tests/queries/0_stateless/01150_ddl_guard_rwr.sh +++ b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh @@ -39,5 +39,6 @@ timeout 20 bash -c 'thread_rename' & wait sleep 1 +$CLICKHOUSE_CLIENT --query "DETACH DATABASE IF EXISTS test_01150" $CLICKHOUSE_CLIENT --query "ATTACH DATABASE IF NOT EXISTS test_01150" $CLICKHOUSE_CLIENT --query "DROP DATABASE test_01150"; From ff7601a52cea90d5594a35969417747219dd9585 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 21:41:35 +0300 Subject: [PATCH 099/432] add python test and fix build --- base/glibc-compatibility/musl/lgammal.c | 4 +- .../AggregateFunctionStudentTTest.cpp | 2 +- .../AggregateFunctionStudentTTest.h | 28 ++--- .../AggregateFunctionWelchTTest.cpp | 2 +- .../AggregateFunctionWelchTTest.h | 8 +- .../queries/0_stateless/01322_ttest_scipy.py | 108 ++++++++++++++++++ .../0_stateless/01322_ttest_scipy.reference | 0 7 files changed, 126 insertions(+), 26 deletions(-) create mode 100644 tests/queries/0_stateless/01322_ttest_scipy.py create mode 100644 tests/queries/0_stateless/01322_ttest_scipy.reference diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c index 1e720163a5d..534abf41894 100644 --- a/base/glibc-compatibility/musl/lgammal.c +++ b/base/glibc-compatibility/musl/lgammal.c @@ -309,10 +309,10 @@ long double __lgammal_r(long double x, int *sg) { return r; } -int signgam; +int signgam_lgammal; long double lgammal(long double x) { - return lgammal_r(x, &signgam); + return lgammal_r(x, &signgam_lgammal); } diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp index b6f32409946..b03e961a24a 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp @@ -50,7 +50,7 @@ AggregateFunctionPtr createAggregateFunctionStudentTTest(const std::string & nam void registerAggregateFunctionStudentTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("StudentTTest", createAggregateFunctionStudentTTest, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("studentTTest", createAggregateFunctionStudentTTest, AggregateFunctionFactory::CaseInsensitive); } } diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index ac05a11d334..2f3d35dbeab 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -98,8 +98,8 @@ struct AggregateFunctionStudentTTestData final Float64 getSSquared() const { - /// TODO: Update comment with Tex. - /// The original formulae looks like ... + /// The original formulae looks like + /// \frac{\sum_{i = 1}^{n_x}{(x_i - \bar{x}) ^ 2} + \sum_{i = 1}^{n_y}{(y_i - \bar{y}) ^ 2}}{n_x + n_y - 2} /// But we made some mathematical transformations not to store original sequences. /// Also we dropped sqrt, because later it will be squared later. const Float64 all_x = square_sum_x + size_x * std::pow(mean_x, 2) - 2 * mean_x * sum_x; @@ -110,26 +110,19 @@ struct AggregateFunctionStudentTTestData final Float64 getTStatisticSquared() const { - if (size_x == 0 || size_y == 0) - { - throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); - } - return std::pow(mean_x - mean_y, 2) / getStandartErrorSquared(); } Float64 getTStatistic() const { - if (size_x == 0 || size_y == 0) - { - throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); - } - return (mean_x - mean_y) / std::sqrt(getStandartErrorSquared()); } Float64 getStandartErrorSquared() const { + if (size_x == 0 || size_y == 0) + throw Exception("Division by zero encountered in Aggregate function StudentTTest", ErrorCodes::BAD_ARGUMENTS); + return getSSquared() * (1.0 / static_cast(size_x) + 1.0 / static_cast(size_y)); } @@ -138,9 +131,10 @@ struct AggregateFunctionStudentTTestData final return static_cast(size_x + size_y - 2); } - static Float64 integrateSimpson(Float64 a, Float64 b, std::function func, size_t iterations = 1e6) + static Float64 integrateSimpson(Float64 a, Float64 b, std::function func) { - double h = (b - a) / iterations; + const size_t iterations = std::max(1e6, 1e4 * std::abs(std::round(b))); + const long double h = (b - a) / iterations; Float64 sum_odds = 0.0; for (size_t i = 1; i < iterations; i += 2) sum_odds += func(a + i * h); @@ -154,13 +148,9 @@ struct AggregateFunctionStudentTTestData final { const Float64 v = getDegreesOfFreedom(); const Float64 t = getTStatisticSquared(); - std::cout << "getDegreesOfFreedom() " << getDegreesOfFreedom() << std::endl; - std::cout << "getTStatisticSquared() " << getTStatisticSquared() << std::endl; auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; Float64 numenator = integrateSimpson(0, v / (t + v), f); Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); - std::cout << "numenator " << numenator << std::endl; - std::cout << "denominator " << denominator << std::endl; return numenator / denominator; } @@ -184,7 +174,7 @@ public: String getName() const override { - return "StudentTTest"; + return "studentTTest"; } DataTypePtr getReturnType() const override diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index d9fce97680c..00607171c41 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -50,7 +50,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("WelchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("welchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 36641b826b1..3332a6c363e 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -142,8 +142,9 @@ struct AggregateFunctionWelchTTestData final return numerator / (denominator_first + denominator_second); } - static Float64 integrateSimpson(Float64 a, Float64 b, std::function func, size_t iterations = 1e6) + static Float64 integrateSimpson(Float64 a, Float64 b, std::function func) { + size_t iterations = std::max(1e6, 1e4 * std::abs(std::round(b))); double h = (b - a) / iterations; Float64 sum_odds = 0.0; for (size_t i = 1; i < iterations; i += 2) @@ -170,7 +171,8 @@ struct AggregateFunctionWelchTTestData final } }; -/// Returns p-value +/// Returns tuple of (t-statistic, p-value) +/// https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/9/1193/files/2016/01/05b-TandP.pdf template class AggregateFunctionWelchTTest : public IAggregateFunctionDataHelper,AggregateFunctionWelchTTest> @@ -183,7 +185,7 @@ public: String getName() const override { - return "WelchTTest"; + return "welchTTest"; } DataTypePtr getReturnType() const override diff --git a/tests/queries/0_stateless/01322_ttest_scipy.py b/tests/queries/0_stateless/01322_ttest_scipy.py new file mode 100644 index 00000000000..d8255cd8062 --- /dev/null +++ b/tests/queries/0_stateless/01322_ttest_scipy.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +import os +import io +import sys +import requests +import time +import pandas as pd +import numpy as np +from scipy import stats + +CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1') +CLICKHOUSE_PORT_HTTP = os.environ.get('CLICKHOUSE_PORT_HTTP', '8123') +CLICKHOUSE_SERVER_URL_STR = 'http://' + ':'.join(str(s) for s in [CLICKHOUSE_HOST, CLICKHOUSE_PORT_HTTP]) + "/" + +class ClickHouseClient: + def __init__(self, host = CLICKHOUSE_SERVER_URL_STR): + self.host = host + + def query(self, query, connection_timeout = 1500): + NUMBER_OF_TRIES = 30 + DELAY = 10 + + for i in range(NUMBER_OF_TRIES): + r = requests.post( + self.host, + params = {'timeout_before_checking_execution_speed': 120, 'max_execution_time': 6000}, + timeout = connection_timeout, + data = query) + if r.status_code == 200: + return r.text + else: + print('ATTENTION: try #%d failed' % i) + if i != (NUMBER_OF_TRIES-1): + print(query) + print(r.text) + time.sleep(DELAY*(i+1)) + else: + raise ValueError(r.text) + + def query_return_df(self, query, connection_timeout = 1500): + data = self.query(query, connection_timeout) + df = pd.read_csv(io.StringIO(data), sep = '\t') + return df + + def query_with_data(self, query, content): + content = content.encode('utf-8') + r = requests.post(self.host, data=content) + result = r.text + if r.status_code == 200: + return result + else: + raise ValueError(r.text) + +def test_and_check(name, a, b, t_stat, p_value): + client = ClickHouseClient() + client.query("DROP TABLE IF EXISTS ttest;") + client.query("CREATE TABLE ttest (left Float64, right Float64) ENGINE = Memory;"); + client.query("INSERT INTO ttest VALUES {};".format(", ".join(['({},{})'.format(i, j) for i,j in zip(a, b)]))) + + real = client.query_return_df( + "SELECT roundBankers({}(left, right).1, 16) as t_stat, ".format(name) + + "roundBankers({}(left, right).2, 16) as p_value ".format(name) + + "FROM ttest FORMAT TabSeparatedWithNames;") + real_t_stat = real['t_stat'][0] + real_p_value = real['p_value'][0] + assert(abs(real_t_stat - np.float64(t_stat) < 1e-4)), "clickhouse_t_stat {}, scipy_t_stat {}".format(real_t_stat, t_stat) + assert(abs(real_p_value - np.float64(p_value)) < 1e-4), "clickhouse_p_value {}, scipy_p_value {}".format(real_p_value, p_value) + client.query("DROP TABLE IF EXISTS ttest;") + + +def test_student(): + rvs1 = np.round(stats.norm.rvs(loc=1, scale=5,size=500), 5) + rvs2 = np.round(stats.norm.rvs(loc=10, scale=5,size=500), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + + rvs1 = np.round(stats.norm.rvs(loc=0, scale=5,size=500), 5) + rvs2 = np.round(stats.norm.rvs(loc=0, scale=5,size=500), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + + + rvs1 = np.round(stats.norm.rvs(loc=0, scale=10,size=65536), 5) + rvs2 = np.round(stats.norm.rvs(loc=5, scale=1,size=65536), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + +def test_welch(): + rvs1 = np.round(stats.norm.rvs(loc=1, scale=15,size=500), 5) + rvs2 = np.round(stats.norm.rvs(loc=10, scale=5,size=500), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + + rvs1 = np.round(stats.norm.rvs(loc=0, scale=7,size=500), 5) + rvs2 = np.round(stats.norm.rvs(loc=0, scale=3,size=500), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + + + rvs1 = np.round(stats.norm.rvs(loc=0, scale=10,size=65536), 5) + rvs2 = np.round(stats.norm.rvs(loc=5, scale=1,size=65536), 5) + s, p = stats.ttest_ind(rvs1, rvs2, equal_var = True) + test_and_check("studentTTest", rvs1, rvs2, s, p) + +if __name__ == "__main__": + test_student() + test_welch() + print("Ok.") \ No newline at end of file diff --git a/tests/queries/0_stateless/01322_ttest_scipy.reference b/tests/queries/0_stateless/01322_ttest_scipy.reference new file mode 100644 index 00000000000..e69de29bb2d From 1f200f8bd20a08f7180a00bfc77aaf56a5169aba Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 21:43:38 +0300 Subject: [PATCH 100/432] add reference --- tests/queries/0_stateless/01322_ttest_scipy.reference | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01322_ttest_scipy.reference b/tests/queries/0_stateless/01322_ttest_scipy.reference index e69de29bb2d..587579af915 100644 --- a/tests/queries/0_stateless/01322_ttest_scipy.reference +++ b/tests/queries/0_stateless/01322_ttest_scipy.reference @@ -0,0 +1 @@ +Ok. From 575354a6d39c91d37fd8ed3795c78c97985a946c Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 21:52:23 +0300 Subject: [PATCH 101/432] add scipy to dockerfile --- docker/test/stateless/Dockerfile | 3 +++ docker/test/stateless_unbundled/Dockerfile | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 33eb1c29103..ca7ecf9cb9c 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -16,6 +16,7 @@ RUN apt-get update -y \ python3-lxml \ python3-requests \ python3-termcolor \ + python3-pip \ qemu-user-static \ sudo \ telnet \ @@ -23,6 +24,8 @@ RUN apt-get update -y \ unixodbc \ wget +RUN pip3 install numpy, scipy, pandas + RUN mkdir -p /tmp/clickhouse-odbc-tmp \ && wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \ diff --git a/docker/test/stateless_unbundled/Dockerfile b/docker/test/stateless_unbundled/Dockerfile index f2fd28e4078..2f4663fa061 100644 --- a/docker/test/stateless_unbundled/Dockerfile +++ b/docker/test/stateless_unbundled/Dockerfile @@ -58,6 +58,7 @@ RUN apt-get --allow-unauthenticated update -y \ python3-lxml \ python3-requests \ python3-termcolor \ + python3-pip \ qemu-user-static \ sudo \ telnet \ @@ -66,7 +67,9 @@ RUN apt-get --allow-unauthenticated update -y \ unixodbc \ unixodbc-dev \ wget \ - zlib1g-dev + zlib1g-dev\ + +RUN pip3 install numpy, scipy, pandas RUN mkdir -p /tmp/clickhouse-odbc-tmp \ && wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ From 2d29eab934a6543653ebd1381215a9bf762452af Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 22:00:20 +0300 Subject: [PATCH 102/432] better --- docker/test/stateless_unbundled/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless_unbundled/Dockerfile b/docker/test/stateless_unbundled/Dockerfile index 2f4663fa061..528f1e0520c 100644 --- a/docker/test/stateless_unbundled/Dockerfile +++ b/docker/test/stateless_unbundled/Dockerfile @@ -67,7 +67,7 @@ RUN apt-get --allow-unauthenticated update -y \ unixodbc \ unixodbc-dev \ wget \ - zlib1g-dev\ + zlib1g-dev RUN pip3 install numpy, scipy, pandas From b1e15530614f29320cd0b21541d00e3fc3523515 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Wed, 14 Oct 2020 22:43:08 +0300 Subject: [PATCH 103/432] style + docker + tests --- docker/test/stateless/Dockerfile | 2 +- docker/test/stateless_unbundled/Dockerfile | 2 +- src/AggregateFunctions/AggregateFunctionStudentTTest.cpp | 4 ---- src/AggregateFunctions/AggregateFunctionStudentTTest.h | 2 +- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 7 ------- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- tests/queries/0_stateless/01322_ttest_scipy.sh | 8 ++++++++ 7 files changed, 12 insertions(+), 15 deletions(-) create mode 100755 tests/queries/0_stateless/01322_ttest_scipy.sh diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index ca7ecf9cb9c..8f2c7358bb8 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -24,7 +24,7 @@ RUN apt-get update -y \ unixodbc \ wget -RUN pip3 install numpy, scipy, pandas +RUN pip3 install numpy scipy pandas RUN mkdir -p /tmp/clickhouse-odbc-tmp \ && wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ diff --git a/docker/test/stateless_unbundled/Dockerfile b/docker/test/stateless_unbundled/Dockerfile index 528f1e0520c..345ba905412 100644 --- a/docker/test/stateless_unbundled/Dockerfile +++ b/docker/test/stateless_unbundled/Dockerfile @@ -69,7 +69,7 @@ RUN apt-get --allow-unauthenticated update -y \ wget \ zlib1g-dev -RUN pip3 install numpy, scipy, pandas +RUN pip3 install numpy scipy pandas RUN mkdir -p /tmp/clickhouse-odbc-tmp \ && wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp index b03e961a24a..a2c36e43488 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp @@ -11,7 +11,6 @@ namespace ErrorCodes { -extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NOT_IMPLEMENTED; } @@ -44,13 +43,10 @@ AggregateFunctionPtr createAggregateFunctionStudentTTest(const std::string & nam return res; } - } - void registerAggregateFunctionStudentTTest(AggregateFunctionFactory & factory) { factory.registerFunction("studentTTest", createAggregateFunctionStudentTTest, AggregateFunctionFactory::CaseInsensitive); } - } diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index 2f3d35dbeab..2a4ec40e3c1 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -163,7 +163,7 @@ struct AggregateFunctionStudentTTestData final /// Returns tuple of (t-statistic, p-value) /// https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/9/1193/files/2016/01/05b-TandP.pdf template -class AggregateFunctionStudentTTest : +class AggregateFunctionStudentTTest : public IAggregateFunctionDataHelper,AggregateFunctionStudentTTest> { diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 00607171c41..483c99dde9b 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -6,12 +6,8 @@ #include #include - -// the return type is boolean (we use UInt8 as we do not have boolean in clickhouse) - namespace ErrorCodes { -extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int NOT_IMPLEMENTED; } @@ -44,13 +40,10 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, return res; } - } - void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { factory.registerFunction("welchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); } - } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 3332a6c363e..b262ebb70af 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -174,7 +174,7 @@ struct AggregateFunctionWelchTTestData final /// Returns tuple of (t-statistic, p-value) /// https://cpb-us-w2.wpmucdn.com/voices.uchicago.edu/dist/9/1193/files/2016/01/05b-TandP.pdf template -class AggregateFunctionWelchTTest : +class AggregateFunctionWelchTTest : public IAggregateFunctionDataHelper,AggregateFunctionWelchTTest> { diff --git a/tests/queries/0_stateless/01322_ttest_scipy.sh b/tests/queries/0_stateless/01322_ttest_scipy.sh new file mode 100755 index 00000000000..10dc79614d4 --- /dev/null +++ b/tests/queries/0_stateless/01322_ttest_scipy.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +# We should have correct env vars from shell_config.sh to run this test + +python3 "$CURDIR"/01322_ttest_scipy.py \ No newline at end of file From b931a3c9dad0375415cee6f511af831eb9b198ec Mon Sep 17 00:00:00 2001 From: feng lv Date: Thu, 15 Oct 2020 15:18:38 +0800 Subject: [PATCH 104/432] fix update test fix --- src/Parsers/ExpressionElementParsers.cpp | 24 +++--- src/Parsers/ExpressionListParsers.cpp | 80 +++++++++---------- src/Parsers/ExpressionListParsers.h | 2 +- ..._operator_support_string_literal.reference | 19 +++++ ...terval_operator_support_string_literal.sql | 19 +++++ 5 files changed, 87 insertions(+), 57 deletions(-) diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 1d861c6d78a..b26e73287d0 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -789,6 +789,7 @@ bool ParserDateAddExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & exp ++pos; IntervalKind interval_kind; + ASTPtr interval_func_node; if (parseIntervalKind(pos, expected, interval_kind)) { /// function(unit, offset, timestamp) @@ -805,6 +806,13 @@ bool ParserDateAddExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & exp if (!ParserExpression().parse(pos, timestamp_node, expected)) return false; + auto interval_expr_list_args = std::make_shared(); + interval_expr_list_args->children = {offset_node}; + + interval_func_node = std::make_shared(); + interval_func_node->as().name = interval_kind.toNameOfFunctionToIntervalDataType(); + interval_func_node->as().arguments = std::move(interval_expr_list_args); + interval_func_node->as().children.push_back(interval_func_node->as().arguments); } else { @@ -816,27 +824,13 @@ bool ParserDateAddExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & exp return false; ++pos; - if (!ParserKeyword("INTERVAL").ignore(pos, expected)) - return false; - - if (!ParserExpression().parse(pos, offset_node, expected)) - return false; - - if (!parseIntervalKind(pos, expected, interval_kind)) + if (!ParserIntervalOperatorExpression{}.parse(pos, interval_func_node, expected)) return false; } if (pos->type != TokenType::ClosingRoundBracket) return false; ++pos; - auto interval_expr_list_args = std::make_shared(); - interval_expr_list_args->children = {offset_node}; - - auto interval_func_node = std::make_shared(); - interval_func_node->name = interval_kind.toNameOfFunctionToIntervalDataType(); - interval_func_node->arguments = std::move(interval_expr_list_args); - interval_func_node->children.push_back(interval_func_node->arguments); - auto expr_list_args = std::make_shared(); expr_list_args->children = {timestamp_node, interval_func_node}; diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index 4f4b97eff2d..d6678bb9a78 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -645,12 +645,45 @@ bool ParserTimestampOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expe return true; } -bool ParserIntervalOperatorExpression::stringToIntervalKind(const String & literal, ASTPtr & number, IntervalKind & interval_kind) +bool ParserIntervalOperatorExpression::parseArgumentAndIntervalKind( + Pos & pos, ASTPtr & expr, IntervalKind & interval_kind, Expected & expected) { - Tokens tokens(literal.data(), literal.data() + literal.size()); - Pos pos(tokens, 0); - Expected expected; - return (ParserNumber().parse(pos, number, expected) && parseIntervalKind(pos, expected, interval_kind)); + auto begin = pos; + auto init_expected = expected; + ASTPtr string_literal; + //// A String literal followed INTERVAL keyword, + /// the literal can be a part of an expression or + /// include Number and INTERVAL TYPE at the same time + if (ParserStringLiteral{}.parse(pos, string_literal, expected)) + { + String literal; + if (string_literal->as().value.tryGet(literal)) + { + Tokens tokens(literal.data(), literal.data() + literal.size()); + Pos token_pos(tokens, 0); + Expected token_expected; + + if (!ParserNumber{}.parse(token_pos, expr, token_expected)) + return false; + else + { + /// case: INTERVAL '1' HOUR + /// back to begin + if (!token_pos.isValid()) + { + pos = begin; + expected = init_expected; + } + else + /// case: INTERVAL '1 HOUR' + return parseIntervalKind(token_pos, token_expected, interval_kind); + } + } + } + // case: INTERVAL expr HOUR + if (!ParserExpressionWithOptionalAlias(false).parse(pos, expr, expected)) + return false; + return parseIntervalKind(pos, expected, interval_kind); } bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) @@ -661,44 +694,9 @@ bool ParserIntervalOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Expec if (!ParserKeyword("INTERVAL").ignore(pos, expected)) return next_parser.parse(pos, node, expected); - ASTPtr string_literal; - if (ParserStringLiteral().parse(pos, string_literal, expected)) - { - String literal; - if (string_literal->as().value.tryGet(literal)) - { - IntervalKind interval_kind; - ASTPtr number; - - /// parse function arguments and interval kind from string literal - if (!stringToIntervalKind(literal, number, interval_kind)) - return false; - - auto function = std::make_shared(); - - auto exp_list = std::make_shared(); - - function->name = interval_kind.toNameOfFunctionToIntervalDataType(); - function->arguments = exp_list; - function->children.push_back(exp_list); - - exp_list->children.push_back(number); - - node = function; - return true; - } - } - ASTPtr expr; - /// Any expression can be inside, because operator surrounds it. - if (!ParserExpressionWithOptionalAlias(false).parse(pos, expr, expected)) - { - pos = begin; - return next_parser.parse(pos, node, expected); - } - IntervalKind interval_kind; - if (!parseIntervalKind(pos, expected, interval_kind)) + if (!parseArgumentAndIntervalKind(pos, expr, interval_kind, expected)) { pos = begin; return next_parser.parse(pos, node, expected); diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 72961f700fd..40efd0e02d2 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -233,7 +233,7 @@ protected: const char * getName() const override { return "INTERVAL operator expression"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - bool stringToIntervalKind(const String & literal, ASTPtr & number, IntervalKind & interval_kind); + bool parseArgumentAndIntervalKind(Pos & pos, ASTPtr & expr, IntervalKind & interval_kind, Expected & expected); }; class ParserAdditiveExpression : public IParserBase diff --git a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference index 5ee4e7592f6..0451ef3afd5 100644 --- a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference +++ b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.reference @@ -4,3 +4,22 @@ 2 2 2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2009-02-14 00:31:30 +2009-02-14 00:31:30 +2009-02-14 00:31:30 +2009-02-15 23:31:30 +2009-02-15 23:31:30 +2009-02-15 23:31:30 diff --git a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql index ce418e13e9f..2af2ba4996e 100644 --- a/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql +++ b/tests/queries/0_stateless/01523_interval_operator_support_string_literal.sql @@ -1,6 +1,25 @@ +SELECT INTERVAL 2 year; +SELECT INTERVAL '2' year; +SELECT INTERVAL '2 year'; +SELECT INTERVAL 2 month; +SELECT INTERVAL '2' month; +SELECT INTERVAL '2 month'; +SELECT INTERVAL 2 week; +SELECT INTERVAL '2' week; +SELECT INTERVAL '2 week'; SELECT INTERVAL 2 day; +SELECT INTERVAL '2' day; SELECT INTERVAL '2 day'; SELECT INTERVAL 2 hour; +SELECT INTERVAL '2' hour; SELECT INTERVAL '2 hour'; SELECT INTERVAL 2 minute; +SELECT INTERVAL '2' minute; SELECT INTERVAL '2 minute'; +SELECT INTERVAL '2' AS n minute; +SELECT DATE_ADD(hour, '1', toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(hour, 1, toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(hour, (SELECT 1), toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL 2 day); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL '2 day'); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL '2' day); From fabe86c6d30907bdc2a4e370b4dfcc2c0face7bf Mon Sep 17 00:00:00 2001 From: feng lv Date: Thu, 15 Oct 2020 18:48:39 +0800 Subject: [PATCH 105/432] fix fix --- src/Parsers/ExpressionListParsers.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 40efd0e02d2..cf77b8b4da4 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -233,7 +233,9 @@ protected: const char * getName() const override { return "INTERVAL operator expression"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - bool parseArgumentAndIntervalKind(Pos & pos, ASTPtr & expr, IntervalKind & interval_kind, Expected & expected); + +private: + static bool parseArgumentAndIntervalKind(Pos & pos, ASTPtr & expr, IntervalKind & interval_kind, Expected & expected); }; class ParserAdditiveExpression : public IParserBase From 80f3de1359e119851f1de45e4250f4cf5f87c63d Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Thu, 15 Oct 2020 20:39:04 +0800 Subject: [PATCH 106/432] ISSUES-15883 try fix collate name --- src/Parsers/MySQL/ASTAlterCommand.cpp | 6 +-- src/Parsers/MySQL/ASTDeclareColumn.cpp | 4 +- src/Parsers/MySQL/ASTDeclareOption.cpp | 39 +++++--------------- src/Parsers/MySQL/ASTDeclareOption.h | 4 +- src/Parsers/MySQL/ASTDeclareTableOptions.cpp | 8 ++-- 5 files changed, 21 insertions(+), 40 deletions(-) diff --git a/src/Parsers/MySQL/ASTAlterCommand.cpp b/src/Parsers/MySQL/ASTAlterCommand.cpp index b6f2b925de0..92461635265 100644 --- a/src/Parsers/MySQL/ASTAlterCommand.cpp +++ b/src/Parsers/MySQL/ASTAlterCommand.cpp @@ -303,9 +303,9 @@ static inline bool parseOtherCommand(IParser::Pos & pos, ASTPtr & node, Expected OptionDescribe("ENABLE KEYS", "enable_keys", std::make_shared()), OptionDescribe("DISABLE KEYS", "enable_keys", std::make_shared()), /// TODO: with collate - OptionDescribe("CONVERT TO CHARACTER SET", "charset", std::make_shared()), - OptionDescribe("CHARACTER SET", "charset", std::make_shared()), - OptionDescribe("DEFAULT CHARACTER SET", "charset", std::make_shared()), + OptionDescribe("CONVERT TO CHARACTER SET", "charset", std::make_shared()), + OptionDescribe("CHARACTER SET", "charset", std::make_shared()), + OptionDescribe("DEFAULT CHARACTER SET", "charset", std::make_shared()), OptionDescribe("LOCK", "lock", std::make_shared()) } }; diff --git a/src/Parsers/MySQL/ASTDeclareColumn.cpp b/src/Parsers/MySQL/ASTDeclareColumn.cpp index 6d21f934858..3913c828ec3 100644 --- a/src/Parsers/MySQL/ASTDeclareColumn.cpp +++ b/src/Parsers/MySQL/ASTDeclareColumn.cpp @@ -51,8 +51,8 @@ static inline bool parseColumnDeclareOptions(IParser::Pos & pos, ASTPtr & node, OptionDescribe("UNIQUE", "unique_key", std::make_unique()), OptionDescribe("KEY", "primary_key", std::make_unique()), OptionDescribe("COMMENT", "comment", std::make_unique()), - OptionDescribe("CHARACTER SET", "charset_name", std::make_unique()), - OptionDescribe("COLLATE", "collate", std::make_unique()), + OptionDescribe("CHARACTER SET", "charset_name", std::make_unique()), + OptionDescribe("COLLATE", "collate", std::make_unique()), OptionDescribe("COLUMN_FORMAT", "column_format", std::make_unique()), OptionDescribe("STORAGE", "storage", std::make_unique()), OptionDescribe("AS", "generated", std::make_unique()), diff --git a/src/Parsers/MySQL/ASTDeclareOption.cpp b/src/Parsers/MySQL/ASTDeclareOption.cpp index 92ac5f0343e..17be639b630 100644 --- a/src/Parsers/MySQL/ASTDeclareOption.cpp +++ b/src/Parsers/MySQL/ASTDeclareOption.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB { @@ -94,41 +95,21 @@ bool ParserAlwaysFalse::parseImpl(IParser::Pos & /*pos*/, ASTPtr & node, Expecte return true; } -bool ParserCharsetName::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &) +bool ParserCharsetOrCollateName::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & expected) { - /// Identifier in backquotes or in double quotes - if (pos->type == TokenType::QuotedIdentifier) - { - ReadBufferFromMemory buf(pos->begin, pos->size()); - String s; + ParserIdentifier p_identifier; + ParserStringLiteral p_string_literal; - if (*pos->begin == '`') - readBackQuotedStringWithSQLStyle(s, buf); - else - readDoubleQuotedStringWithSQLStyle(s, buf); - - if (s.empty()) /// Identifiers "empty string" are not allowed. - return false; - - node = std::make_shared(s); - ++pos; + if (p_identifier.parse(pos, node, expected)) return true; - } - else if (pos->type == TokenType::BareWord) + else { - const char * begin = pos->begin; - - while (true) + if (p_string_literal.parse(pos, node, expected)) { - if (!isWhitespaceASCII(*pos->end) && pos->type != TokenType::EndOfStream) - ++pos; - else - break; + const auto & string_value = node->as()->value.safeGet(); + node = std::make_shared(string_value); + return true; } - - node = std::make_shared(String(begin, pos->end)); - ++pos; - return true; } return false; diff --git a/src/Parsers/MySQL/ASTDeclareOption.h b/src/Parsers/MySQL/ASTDeclareOption.h index 24800371061..2502618b209 100644 --- a/src/Parsers/MySQL/ASTDeclareOption.h +++ b/src/Parsers/MySQL/ASTDeclareOption.h @@ -61,10 +61,10 @@ public: /// Copy and paste from ParserIdentifier, /// the difference is that multiple tokens are glued if there is no whitespace ASCII between them -struct ParserCharsetName : public IParserBase +struct ParserCharsetOrCollateName : public IParserBase { protected: - const char * getName() const override { return "charset name"; } + const char * getName() const override { return "charset or collate name"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected &) override; }; diff --git a/src/Parsers/MySQL/ASTDeclareTableOptions.cpp b/src/Parsers/MySQL/ASTDeclareTableOptions.cpp index 87b99cdf1ac..c903c7d2fa7 100644 --- a/src/Parsers/MySQL/ASTDeclareTableOptions.cpp +++ b/src/Parsers/MySQL/ASTDeclareTableOptions.cpp @@ -68,12 +68,12 @@ bool ParserDeclareTableOptions::parseImpl(IParser::Pos & pos, ASTPtr & node, Exp { OptionDescribe("AUTO_INCREMENT", "auto_increment", std::make_shared()), OptionDescribe("AVG_ROW_LENGTH", "avg_row_length", std::make_shared()), - OptionDescribe("CHARSET", "character_set", std::make_shared()), - OptionDescribe("DEFAULT CHARSET", "character_set", std::make_shared()), - OptionDescribe("CHARACTER SET", "character_set", std::make_shared()), + OptionDescribe("CHARSET", "character_set", std::make_shared()), + OptionDescribe("DEFAULT CHARSET", "character_set", std::make_shared()), + OptionDescribe("CHARACTER SET", "character_set", std::make_shared()), OptionDescribe("DEFAULT CHARACTER SET", "character_set", std::make_shared()), OptionDescribe("CHECKSUM", "checksum", std::make_shared>()), - OptionDescribe("COLLATE", "collate", std::make_shared()), + OptionDescribe("COLLATE", "collate", std::make_shared()), OptionDescribe("DEFAULT COLLATE", "collate", std::make_shared()), OptionDescribe("COMMENT", "comment", std::make_shared()), OptionDescribe("COMPRESSION", "compression", std::make_shared()), From c8aa007a455372ab0b71a60f3405aea9beb5c5d6 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Thu, 15 Oct 2020 20:42:10 +0800 Subject: [PATCH 107/432] ISSUES-15883 modify comment --- src/Parsers/MySQL/ASTDeclareOption.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Parsers/MySQL/ASTDeclareOption.h b/src/Parsers/MySQL/ASTDeclareOption.h index 2502618b209..a9529924567 100644 --- a/src/Parsers/MySQL/ASTDeclareOption.h +++ b/src/Parsers/MySQL/ASTDeclareOption.h @@ -59,8 +59,7 @@ public: bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; }; -/// Copy and paste from ParserIdentifier, -/// the difference is that multiple tokens are glued if there is no whitespace ASCII between them +/// identifier, string literal, binary keyword struct ParserCharsetOrCollateName : public IParserBase { protected: From ee8a9d2cf12220fb1420e2186cb8735256ed4fa5 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Thu, 15 Oct 2020 18:57:17 +0300 Subject: [PATCH 108/432] Don't touch MySQL database if it's unnecessary --- src/Databases/DatabaseLazy.h | 4 ++++ src/Databases/IDatabase.h | 4 ++++ src/Databases/MySQL/DatabaseConnectionMySQL.h | 6 ++++++ src/Interpreters/AsynchronousMetrics.cpp | 4 ++-- src/Server/ReplicasStatusHandler.cpp | 4 ++-- src/Storages/System/StorageSystemDistributionQueue.cpp | 4 ++-- src/Storages/System/StorageSystemGraphite.cpp | 4 ++-- src/Storages/System/StorageSystemMutations.cpp | 4 ++-- src/Storages/System/StorageSystemPartsBase.cpp | 6 +++--- src/Storages/System/StorageSystemReplicas.cpp | 4 ++-- src/Storages/System/StorageSystemReplicationQueue.cpp | 4 ++-- 11 files changed, 31 insertions(+), 17 deletions(-) diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 13c14863efb..2d091297c91 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -22,6 +22,10 @@ public: String getEngineName() const override { return "Lazy"; } + bool canContainMergeTreeTables() const override { return false; } + + bool canContainDistributedTables() const override { return false; } + void loadStoredObjects( Context & context, bool has_force_restore_data_flag, bool force_attach) override; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index b28bd5fd599..fadec5fe7a9 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -147,6 +147,10 @@ public: /// Get name of database engine. virtual String getEngineName() const = 0; + virtual bool canContainMergeTreeTables() const { return true; } + + virtual bool canContainDistributedTables() const { return true; } + /// Load a set of existing tables. /// You can call only once, right after the object is created. virtual void loadStoredObjects(Context & /*context*/, bool /*has_force_restore_data_flag*/, bool /*force_attach*/ = false) {} diff --git a/src/Databases/MySQL/DatabaseConnectionMySQL.h b/src/Databases/MySQL/DatabaseConnectionMySQL.h index 7bf5e8c1d88..d8694e71db2 100644 --- a/src/Databases/MySQL/DatabaseConnectionMySQL.h +++ b/src/Databases/MySQL/DatabaseConnectionMySQL.h @@ -42,6 +42,12 @@ public: String getEngineName() const override { return "MySQL"; } + bool canContainMergeTreeTables() const override { return false; } + + bool canContainDistributedTables() const override { return false; } + + bool shouldBeEmptyOnDetach() const override { return false; } + bool empty() const override; DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override; diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index feb2036a0d6..e1a9a820ebb 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -233,8 +233,8 @@ void AsynchronousMetrics::update() for (const auto & db : databases) { - /// Lazy database can not contain MergeTree tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain MergeTree tables + if (!db.second->canContainMergeTreeTables()) continue; for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next()) { diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index bc5436f00ee..1aa5c10afd7 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -43,8 +43,8 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request /// Iterate through all the replicated tables. for (const auto & db : databases) { - /// Lazy database can not contain replicated tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain replicated tables + if (!db.second->canContainMergeTreeTables()) continue; for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next()) diff --git a/src/Storages/System/StorageSystemDistributionQueue.cpp b/src/Storages/System/StorageSystemDistributionQueue.cpp index 2459be0ba71..39ccea64e26 100644 --- a/src/Storages/System/StorageSystemDistributionQueue.cpp +++ b/src/Storages/System/StorageSystemDistributionQueue.cpp @@ -38,8 +38,8 @@ void StorageSystemDistributionQueue::fillData(MutableColumns & res_columns, cons std::map> tables; for (const auto & db : DatabaseCatalog::instance().getDatabases()) { - /// Lazy database can not contain distributed tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain distributed tables + if (!db.second->canContainDistributedTables()) continue; const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, db.first); diff --git a/src/Storages/System/StorageSystemGraphite.cpp b/src/Storages/System/StorageSystemGraphite.cpp index ffa789a4751..93bc16785b2 100644 --- a/src/Storages/System/StorageSystemGraphite.cpp +++ b/src/Storages/System/StorageSystemGraphite.cpp @@ -32,8 +32,8 @@ static StorageSystemGraphite::Configs getConfigs(const Context & context) for (const auto & db : databases) { - /// Lazy database can not contain MergeTree tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain MergeTree tables + if (!db.second->canContainMergeTreeTables()) continue; for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next()) diff --git a/src/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp index 32f672b8401..f66f57ef5d1 100644 --- a/src/Storages/System/StorageSystemMutations.cpp +++ b/src/Storages/System/StorageSystemMutations.cpp @@ -44,8 +44,8 @@ void StorageSystemMutations::fillData(MutableColumns & res_columns, const Contex std::map> merge_tree_tables; for (const auto & db : DatabaseCatalog::instance().getDatabases()) { - /// Lazy database can not contain MergeTree tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain MergeTree tables + if (!db.second->canContainMergeTreeTables()) continue; const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, db.first); diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index faa2ec0e1c3..d10346af89f 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -83,9 +83,9 @@ StoragesInfoStream::StoragesInfoStream(const SelectQueryInfo & query_info, const MutableColumnPtr database_column_mut = ColumnString::create(); for (const auto & database : databases) { - /// Lazy database can not contain MergeTree tables - /// and it's unnecessary to load all tables of Lazy database just to filter all of them. - if (database.second->getEngineName() != "Lazy") + /// Checck if database can contain MergeTree tables, + /// if not it's unnecessary to load all tables of database just to filter all of them. + if (database.second->canContainMergeTreeTables()) database_column_mut->insert(database.first); } block_to_filter.insert(ColumnWithTypeAndName( diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index 7ab6e939815..973ccfbf464 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -74,8 +74,8 @@ Pipe StorageSystemReplicas::read( std::map> replicated_tables; for (const auto & db : DatabaseCatalog::instance().getDatabases()) { - /// Lazy database can not contain replicated tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain replicated tables + if (db.second->canContainMergeTreeTables()) continue; const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, db.first); for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next()) diff --git a/src/Storages/System/StorageSystemReplicationQueue.cpp b/src/Storages/System/StorageSystemReplicationQueue.cpp index f04d8759507..9cd5e8b8ff3 100644 --- a/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -55,8 +55,8 @@ void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, const std::map> replicated_tables; for (const auto & db : DatabaseCatalog::instance().getDatabases()) { - /// Lazy database can not contain replicated tables - if (db.second->getEngineName() == "Lazy") + /// Check if database can contain replicated tables + if (!db.second->canContainMergeTreeTables()) continue; const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, db.first); From f02840449e6b3c5bfcdb4d7cb03e6489e8e22b1f Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 15 Oct 2020 20:27:18 +0300 Subject: [PATCH 109/432] fix the test --- .../01455_opentelemetry_distributed.sh | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index d3114ab66ff..446f713c11e 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -63,14 +63,24 @@ check_log echo "===sampled===" query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") -for _ in {1..200} +for i in {1..200} do ${CLICKHOUSE_CLIENT} \ --opentelemetry_start_trace_probability=0.1 \ - --query_id "$query_id" \ - --query "select 1 from remote('127.0.0.2', system, one) format Null" -done + --query_id "$query_id-$i" \ + --query "select 1 from remote('127.0.0.2', system, one) format Null" \ + & + # clickhouse-client is slow to start, so run them in parallel, but not too + # much. + if [[ $((i % 10)) -eq 0 ]] + then + wait + fi +done +wait + +${CLICKHOUSE_CLIENT} -q "system flush logs" ${CLICKHOUSE_CLIENT} -q " with count(*) as c -- expect 200 * 0.1 = 20 sampled events on average @@ -78,6 +88,8 @@ ${CLICKHOUSE_CLIENT} -q " from system.opentelemetry_log array join attribute.names as name, attribute.values as value where name = 'clickhouse.query_id' - and value = '$query_id' + and operation_name = 'query' + and parent_span_id = 0 -- only account for the initial queries + and value like '$query_id-%' ; " From dc8e165ce4fa98d715f2d3817057a4eb2d5b47cf Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 15 Oct 2020 20:27:26 +0300 Subject: [PATCH 110/432] improve fasttest usability --- docker/test/fasttest/Dockerfile | 1 - docker/test/fasttest/run.sh | 100 +++++++++--------- tests/clickhouse-test | 8 +- .../0_stateless/01293_show_clusters.reference | 6 -- .../0_stateless/01293_show_clusters.sql | 4 +- 5 files changed, 58 insertions(+), 61 deletions(-) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 3cfa57bd747..6547a98c58b 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -56,7 +56,6 @@ RUN apt-get update \ python3-lxml \ python3-requests \ python3-termcolor \ - qemu-user-static \ rename \ software-properties-common \ tzdata \ diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index ad44cfc3cf9..f12ecbb2c9c 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -191,63 +191,65 @@ stop_server ||: start_server TESTS_TO_SKIP=( - parquet - avro - h3 - odbc - mysql - sha256 - _orc_ - arrow - 01098_temporary_and_external_tables - 01083_expressions_in_engine_arguments - hdfs - 00911_tautological_compare - protobuf - capnproto - java_hash - hashing - secure - 00490_special_line_separators_and_characters_outside_of_bmp - 00436_convert_charset 00105_shard_collations - 01354_order_by_tuple_collate_const - 01292_create_user - 01098_msgpack_format - 00929_multi_match_edit_distance - 00926_multimatch - 00834_cancel_http_readonly_queries_on_client_close - brotli - parallel_alter + 00109_shard_totals_after_having + 00110_external_sort 00302_http_compression 00417_kill_query - 01294_lazy_database_concurrent - 01193_metadata_loading - base64 - 01031_mutations_interpreter_and_context - json - client - 01305_replica_create_drop_zookeeper - 01092_memory_profiler - 01355_ilike - 01281_unsucceeded_insert_select_queries_counter - live_view - limit_memory - memory_limit - memory_leak - 00110_external_sort + 00436_convert_charset + 00490_special_line_separators_and_characters_outside_of_bmp + 00652_replicated_mutations_zookeeper 00682_empty_parts_merge 00701_rollup - 00109_shard_totals_after_having - ddl_dictionaries + 00834_cancel_http_readonly_queries_on_client_close + 00911_tautological_compare + 00926_multimatch + 00929_multi_match_edit_distance + 01031_mutations_interpreter_and_context + 01053_ssd_dictionary # this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled + 01083_expressions_in_engine_arguments + 01092_memory_profiler + 01098_msgpack_format + 01098_temporary_and_external_tables + 01103_check_cpu_instructions_at_startup # avoid dependency on qemu -- invonvenient when running locally + 01193_metadata_loading + 01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary - 00652_replicated_mutations_zookeeper - 01411_bayesian_ab_testing - 01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently 01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently + 01281_unsucceeded_insert_select_queries_counter + 01292_create_user + 01294_lazy_database_concurrent + 01305_replica_create_drop_zookeeper + 01354_order_by_tuple_collate_const + 01355_ilike + 01411_bayesian_ab_testing + _orc_ + arrow + avro + base64 + brotli + capnproto + client + ddl_dictionaries + h3 + hashing + hdfs + java_hash + json + limit_memory + live_view + memory_leak + memory_limit + mysql + odbc + parallel_alter + parquet + protobuf + secure + sha256 # Not sure why these two fail even in sequential mode. Disabled for now # to make some progress. @@ -258,7 +260,7 @@ TESTS_TO_SKIP=( 01460_DistributedFilesToInsert ) -time clickhouse-test -j 8 --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" +time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" # substr is to remove semicolon after test name readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "$FASTTEST_OUTPUT/test_log.txt" | tee "$FASTTEST_OUTPUT/failed-parallel-tests.txt") @@ -281,7 +283,7 @@ then echo "Going to run again: ${FAILED_TESTS[*]}" - clickhouse-test --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt" + clickhouse-test --order=random --no-long --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a "$FASTTEST_OUTPUT/test_log.txt" else echo "No failed tests" fi diff --git a/tests/clickhouse-test b/tests/clickhouse-test index be8f9551db2..d5736001d60 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -353,20 +353,22 @@ def run_tests_array(all_tests_with_params): if os.path.isfile(stdout_file): print(", result:\n") - print(open(stdout_file).read()) + print('\n'.join(open(stdout_file).read().split('\n')[:100])) elif stderr: failures += 1 failures_chain += 1 print(MSG_FAIL, end='') print_test_time(total_time) - print(" - having stderror:\n{}".format(stderr)) + print(" - having stderror:\n{}".format( + '\n'.join(stderr.split('\n')[:100]))) elif 'Exception' in stdout: failures += 1 failures_chain += 1 print(MSG_FAIL, end='') print_test_time(total_time) - print(" - having exception:\n{}".format(stdout)) + print(" - having exception:\n{}".format( + '\n'.join(stdout.split('\n')[:100]))) elif not os.path.isfile(reference_file): print(MSG_UNKNOWN, end='') print_test_time(total_time) diff --git a/tests/queries/0_stateless/01293_show_clusters.reference b/tests/queries/0_stateless/01293_show_clusters.reference index 39e25143131..590ca348458 100644 --- a/tests/queries/0_stateless/01293_show_clusters.reference +++ b/tests/queries/0_stateless/01293_show_clusters.reference @@ -1,8 +1,2 @@ -test_cluster_two_shards -test_cluster_two_shards_different_databases -test_cluster_two_shards_localhost test_shard_localhost -test_shard_localhost_secure -test_unavailable_shard -test_cluster_two_shards test_shard_localhost 1 1 1 localhost ::1 9000 1 default 0 0 diff --git a/tests/queries/0_stateless/01293_show_clusters.sql b/tests/queries/0_stateless/01293_show_clusters.sql index af450680dac..ad5e51531e3 100644 --- a/tests/queries/0_stateless/01293_show_clusters.sql +++ b/tests/queries/0_stateless/01293_show_clusters.sql @@ -1,3 +1,3 @@ -show clusters; -show clusters like 'test%' limit 1; +-- don't show all clusters to reduce dependency on the configuration of server +show clusters like 'test_shard%' limit 1; show cluster 'test_shard_localhost'; From a80bbf6e0eff4db2222776b18a5f1a578784ba82 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Thu, 15 Oct 2020 22:54:17 +0300 Subject: [PATCH 111/432] fasttest + rm .py --- docker/test/fasttest/Dockerfile | 3 +++ ...test_scipy.py => 01322_ttest_scipy.python} | 0 .../queries/0_stateless/01322_ttest_scipy.sh | 2 +- .../01521_skip_unused_shards_bugfix.sql | 25 +++++++++++++++++++ 4 files changed, 29 insertions(+), 1 deletion(-) rename tests/queries/0_stateless/{01322_ttest_scipy.py => 01322_ttest_scipy.python} (100%) create mode 100644 tests/queries/0_stateless/01521_skip_unused_shards_bugfix.sql diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 3cfa57bd747..23d7504230a 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -53,6 +53,7 @@ RUN apt-get update \ ninja-build \ psmisc \ python3 \ + python3-pip \ python3-lxml \ python3-requests \ python3-termcolor \ @@ -63,6 +64,8 @@ RUN apt-get update \ unixodbc \ --yes --no-install-recommends +RUN pip3 install numpy scipy pandas + # This symlink required by gcc to find lld compiler RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld diff --git a/tests/queries/0_stateless/01322_ttest_scipy.py b/tests/queries/0_stateless/01322_ttest_scipy.python similarity index 100% rename from tests/queries/0_stateless/01322_ttest_scipy.py rename to tests/queries/0_stateless/01322_ttest_scipy.python diff --git a/tests/queries/0_stateless/01322_ttest_scipy.sh b/tests/queries/0_stateless/01322_ttest_scipy.sh index 10dc79614d4..31c1acf3e60 100755 --- a/tests/queries/0_stateless/01322_ttest_scipy.sh +++ b/tests/queries/0_stateless/01322_ttest_scipy.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We should have correct env vars from shell_config.sh to run this test -python3 "$CURDIR"/01322_ttest_scipy.py \ No newline at end of file +python3 "$CURDIR"/01322_ttest_scipy.python diff --git a/tests/queries/0_stateless/01521_skip_unused_shards_bugfix.sql b/tests/queries/0_stateless/01521_skip_unused_shards_bugfix.sql new file mode 100644 index 00000000000..a6dd118bad3 --- /dev/null +++ b/tests/queries/0_stateless/01521_skip_unused_shards_bugfix.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS mv; +DROP DATABASE IF EXISTS dict_01521; +CREATE DATABASE dict_01521; + +CREATE TABLE dict_01521.sharding_table (key UInt64, val UInt64) Engine=Memory(); + +CREATE DICTIONARY dict_01521.sharding_dict +( + key UInt64 DEFAULT 0, + val UInt8 DEFAULT 1 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'sharding_table' PASSWORD '' DB 'dict_01521')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +INSERT INTO dict_01521.sharding_table VALUES (150, 1), (151, 2); + +CREATE TABLE table_first (a UInt64, b UInt64) ENGINE = Memory; +CREATE TABLE table_second (a UInt64, b UInt64) ENGINE = Memory; + +CREATE TABLE table_distr (a Int) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 't_local'); + + + From 708fedbcf8bc700d21bdab7ecf66b590abb6fd75 Mon Sep 17 00:00:00 2001 From: MyroTk Date: Thu, 15 Oct 2020 22:23:49 +0200 Subject: [PATCH 112/432] testflows RBAC tests for views, distributed tables, public tables, and privileges: alter, updated select, updated insert, and show tables --- tests/testflows/example/regression.py | 6 +- tests/testflows/helpers/argparser.py | 15 +- tests/testflows/helpers/cluster.py | 89 +- tests/testflows/ldap/regression.py | 6 +- .../configs/clickhouse/config.d/remote.xml | 73 +- .../rbac/configs/clickhouse/config.d/ssl.xml | 1 + .../rbac/configs/clickhouse/config.xml | 20 +- .../docker-compose/clickhouse-service.yml | 0 .../rbac/docker-compose/docker-compose.yml | 4 +- .../rbac/docker-compose/zookeeper-service.yml | 8 +- tests/testflows/rbac/helper/__init__.py | 0 tests/testflows/rbac/helper/common.py | 124 + tests/testflows/rbac/helper/errors.py | 123 + tests/testflows/rbac/helper/tables.py | 41 + tests/testflows/rbac/regression.py | 118 +- tests/testflows/rbac/requirements/__init__.py | 2 +- .../rbac/requirements/requirements.md | 959 ++++++- .../rbac/requirements/requirements.py | 1332 +++++++++- .../rbac/tests/privileges/__init__.py | 7 + .../rbac/tests/privileges/alter/__init__.py | 0 .../tests/privileges/alter/alter_column.py | 993 ++++++++ .../privileges/alter/alter_constraint.py | 629 +++++ .../tests/privileges/alter/alter_index.py | 815 ++++++ .../tests/privileges/alter/alter_settings.py | 476 ++++ .../rbac/tests/privileges/alter/alter_ttl.py | 605 +++++ .../tests/privileges/distributed_table.py | 1000 ++++++++ .../rbac/tests/privileges/feature.py | 24 +- .../testflows/rbac/tests/privileges/insert.py | 570 +++-- .../rbac/tests/privileges/public_tables.py | 38 + .../testflows/rbac/tests/privileges/select.py | 419 ++- .../rbac/tests/privileges/show_tables.py | 62 + .../rbac/tests/syntax/alter_quota.py | 8 +- .../testflows/rbac/tests/syntax/alter_role.py | 2 +- .../rbac/tests/syntax/alter_row_policy.py | 2 +- .../tests/syntax/alter_settings_profile.py | 10 +- .../testflows/rbac/tests/syntax/alter_user.py | 2 +- .../rbac/tests/syntax/create_quota.py | 6 +- .../rbac/tests/syntax/create_role.py | 6 +- .../rbac/tests/syntax/create_row_policy.py | 8 +- .../tests/syntax/create_settings_profile.py | 4 +- .../rbac/tests/syntax/create_user.py | 2 +- .../testflows/rbac/tests/syntax/drop_quota.py | 2 +- .../testflows/rbac/tests/syntax/drop_role.py | 6 +- .../rbac/tests/syntax/drop_row_policy.py | 2 +- .../tests/syntax/drop_settings_profile.py | 2 +- .../testflows/rbac/tests/syntax/drop_user.py | 2 +- .../rbac/tests/syntax/grant_privilege.py | 6 +- .../testflows/rbac/tests/syntax/grant_role.py | 10 +- .../rbac/tests/syntax/revoke_privilege.py | 52 +- .../rbac/tests/syntax/revoke_role.py | 6 +- .../rbac/tests/syntax/set_default_role.py | 4 +- tests/testflows/rbac/tests/syntax/set_role.py | 2 +- .../rbac/tests/syntax/show_create_role.py | 2 +- tests/testflows/rbac/tests/views/__init__.py | 0 tests/testflows/rbac/tests/views/feature.py | 20 + tests/testflows/rbac/tests/views/live_view.py | 1141 +++++++++ .../rbac/tests/views/materialized_view.py | 2268 +++++++++++++++++ tests/testflows/rbac/tests/views/view.py | 1162 +++++++++ tests/testflows/regression.py | 4 +- 59 files changed, 12433 insertions(+), 867 deletions(-) mode change 100644 => 100755 tests/testflows/helpers/cluster.py mode change 100644 => 100755 tests/testflows/rbac/docker-compose/clickhouse-service.yml mode change 100644 => 100755 tests/testflows/rbac/docker-compose/docker-compose.yml mode change 100644 => 100755 tests/testflows/rbac/docker-compose/zookeeper-service.yml create mode 100644 tests/testflows/rbac/helper/__init__.py create mode 100755 tests/testflows/rbac/helper/common.py create mode 100755 tests/testflows/rbac/helper/errors.py create mode 100755 tests/testflows/rbac/helper/tables.py mode change 100644 => 100755 tests/testflows/rbac/requirements/requirements.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/__init__.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/alter_column.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/alter_constraint.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/alter_index.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/alter_settings.py create mode 100755 tests/testflows/rbac/tests/privileges/alter/alter_ttl.py create mode 100755 tests/testflows/rbac/tests/privileges/distributed_table.py create mode 100755 tests/testflows/rbac/tests/privileges/public_tables.py mode change 100644 => 100755 tests/testflows/rbac/tests/privileges/select.py create mode 100755 tests/testflows/rbac/tests/privileges/show_tables.py create mode 100755 tests/testflows/rbac/tests/views/__init__.py create mode 100755 tests/testflows/rbac/tests/views/feature.py create mode 100755 tests/testflows/rbac/tests/views/live_view.py create mode 100755 tests/testflows/rbac/tests/views/materialized_view.py create mode 100755 tests/testflows/rbac/tests/views/view.py diff --git a/tests/testflows/example/regression.py b/tests/testflows/example/regression.py index 2c0a778d39b..cb58b42ba4c 100755 --- a/tests/testflows/example/regression.py +++ b/tests/testflows/example/regression.py @@ -2,7 +2,7 @@ import sys from testflows.core import * -append_path(sys.path, "..") +append_path(sys.path, "..") from helpers.cluster import Cluster from helpers.argparser import argparser @@ -10,13 +10,13 @@ from helpers.argparser import argparser @TestFeature @Name("example") @ArgumentParser(argparser) -def regression(self, local, clickhouse_binary_path): +def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): """Simple example of how you can use TestFlows to test ClickHouse. """ nodes = { "clickhouse": ("clickhouse1",), } - + with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: self.context.cluster = cluster diff --git a/tests/testflows/helpers/argparser.py b/tests/testflows/helpers/argparser.py index 033c15a3bfe..03014becb76 100644 --- a/tests/testflows/helpers/argparser.py +++ b/tests/testflows/helpers/argparser.py @@ -1,5 +1,12 @@ import os +def onoff(v): + if v in ["yes", "1", "on"]: + return True + elif v in ["no", "0", "off"]: + return False + raise ValueError(f"invalid {v}") + def argparser(parser): """Default argument parser for regressions. """ @@ -10,4 +17,10 @@ def argparser(parser): parser.add_argument("--clickhouse-binary-path", type=str, dest="clickhouse_binary_path", help="path to ClickHouse binary, default: /usr/bin/clickhouse", metavar="path", - default=os.getenv("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")) \ No newline at end of file + default=os.getenv("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")) + + parser.add_argument("--stress", action="store_true", default=False, + help="enable stress testing (might take a long time)") + + parser.add_argument("--parallel", type=onoff, default=True, choices=["yes", "no", "on", "off", 0, 1], + help="enable parallelism for tests that support it") \ No newline at end of file diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py old mode 100644 new mode 100755 index 8fda8ac43d8..490a9f4e17e --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -7,6 +7,7 @@ import tempfile from testflows.core import * from testflows.asserts import error from testflows.connect import Shell +from testflows.uexpect import ExpectTimeoutError class QueryRuntimeException(Exception): """Exception during query execution on the server. @@ -78,32 +79,43 @@ class ClickHouseNode(Node): def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False, raise_on_exception=False, step=By, settings=None, *args, **kwargs): """Execute and check query. - :param sql: sql query :param message: expected message that should be in the output, default: None :param exitcode: expected exitcode, default: None """ + settings = list(settings or []) + + if hasattr(current().context, "default_query_settings"): + settings += current().context.default_query_settings + if len(sql) > 1024: with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query: query.write(sql) query.flush() command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} clickhouse client -n" - for setting in settings or []: + for setting in settings: name, value = setting command += f" --{name} \"{value}\"" description = f""" echo -e \"{sql[:100]}...\" > {query.name} {command} """ - with step("executing command", description=description) if steps else NullStep(): - r = self.cluster.bash(None)(command, *args, **kwargs) + with step("executing command", description=description, format_description=False) if steps else NullStep(): + try: + r = self.cluster.bash(None)(command, *args, **kwargs) + except ExpectTimeoutError: + self.cluster.close_bash(None) else: command = f"echo -e \"{sql}\" | clickhouse client -n" - for setting in settings or []: + for setting in settings: name, value = setting command += f" --{name} \"{value}\"" - with step("executing command", description=command) if steps else NullStep(): - r = self.cluster.bash(self.name)(command, *args, **kwargs) + with step("executing command", description=command, format_description=False) if steps else NullStep(): + try: + r = self.cluster.bash(self.name)(command, *args, **kwargs) + except ExpectTimeoutError: + self.cluster.close_bash(self.name) + raise if no_checks: return r @@ -134,6 +146,7 @@ class Cluster(object): docker_compose="docker-compose", docker_compose_project_dir=None, docker_compose_file="docker-compose.yml"): + self.terminating = False self._bash = {} self.clickhouse_binary_path = clickhouse_binary_path self.configs_dir = configs_dir @@ -183,11 +196,19 @@ class Cluster(object): def bash(self, node, timeout=120): """Returns thread-local bash terminal to a specific node. - :param node: name of the service """ + test = current() + + if self.terminating: + if test and (test.cflags & MANDATORY): + pass + else: + raise InterruptedError("terminating") + current_thread = threading.current_thread() - id = f"{current_thread.ident}-{node}" + id = f"{current_thread.name}-{node}" + with self.lock: if self._bash.get(id) is None: if node is None: @@ -196,9 +217,30 @@ class Cluster(object): self._bash[id] = Shell(command=[ "/bin/bash", "--noediting", "-c", f"{self.docker_compose} exec {node} bash --noediting" ], name=node).__enter__() + self._bash[id].timeout = timeout + + # clean up any stale open shells for threads that have exited + active_thread_names = {thread.name for thread in threading.enumerate()} + + for bash_id in list(self._bash.keys()): + thread_name, node_name = bash_id.rsplit("-", 1) + if thread_name not in active_thread_names: + self._bash[bash_id].__exit__(None, None, None) + del self._bash[bash_id] + return self._bash[id] + def close_bash(self, node): + current_thread = threading.current_thread() + id = f"{current_thread.name}-{node}" + + with self.lock: + if self._bash.get(id) is None: + return + self._bash[id].__exit__(None, None, None) + del self._bash[id] + def __enter__(self): with Given("docker-compose cluster"): self.up() @@ -210,20 +252,21 @@ class Cluster(object): self.down() finally: with self.lock: - for shell in list(self._bash.values()): + for shell in self._bash.values(): shell.__exit__(type, value, traceback) def node(self, name): """Get object with node bound methods. - :param name: name of service name """ if name.startswith("clickhouse"): return ClickHouseNode(self, name) return Node(self, name) - def down(self, timeout=120): + def down(self, timeout=300): """Bring cluster down by executing docker-compose down.""" + self.terminating = True + try: bash = self.bash(None) with self.lock: @@ -235,7 +278,7 @@ class Cluster(object): else: self._bash[id] = shell finally: - return self.command(None, f"{self.docker_compose} down", timeout=timeout) + return self.command(None, f"{self.docker_compose} down", bash=bash, timeout=timeout) def up(self, timeout=30*60): if self.local: @@ -264,7 +307,7 @@ class Cluster(object): if cmd.exitcode != 0: continue with And("executing docker-compose down just in case it is up"): - cmd = self.command(None, f"{self.docker_compose} down 2>&1 | tee", exitcode=None, timeout=timeout) + cmd = self.command(None, f"{self.docker_compose} down --remove-orphans 2>&1 | tee", exitcode=None, timeout=timeout) if cmd.exitcode != 0: continue with And("executing docker-compose up"): @@ -285,22 +328,26 @@ class Cluster(object): for name in self.nodes["clickhouse"]: self.node(name).wait_healthy() - def command(self, node, command, message=None, exitcode=None, steps=True, *args, **kwargs): + def command(self, node, command, message=None, exitcode=None, steps=True, bash=None, *args, **kwargs): """Execute and check command. - :param node: name of the service :param command: command :param message: expected message that should be in the output, default: None :param exitcode: expected exitcode, default: None :param steps: don't break command into steps, default: True """ - debug(f"command() {node}, {command}") - with By("executing command", description=command) if steps else NullStep(): - r = self.bash(node)(command, *args, **kwargs) + with By("executing command", description=command, format_description=False) if steps else NullStep(): + if bash is None: + bash = self.bash(node) + try: + r = bash(command, *args, **kwargs) + except ExpectTimeoutError: + self.close_bash(node) + raise if exitcode is not None: - with Then(f"exitcode should be {exitcode}") if steps else NullStep(): + with Then(f"exitcode should be {exitcode}", format_name=False) if steps else NullStep(): assert r.exitcode == exitcode, error(r.output) if message is not None: - with Then(f"output should contain message", description=message) if steps else NullStep(): + with Then(f"output should contain message", description=message, format_description=False) if steps else NullStep(): assert message in r.output, error(r.output) return r diff --git a/tests/testflows/ldap/regression.py b/tests/testflows/ldap/regression.py index 567807fc0a8..1c0edcb57d9 100755 --- a/tests/testflows/ldap/regression.py +++ b/tests/testflows/ldap/regression.py @@ -2,7 +2,7 @@ import sys from testflows.core import * -append_path(sys.path, "..") +append_path(sys.path, "..") from helpers.cluster import Cluster from helpers.argparser import argparser @@ -33,13 +33,13 @@ xfails = { RQ_SRS_007_LDAP_Authentication("1.0") ) @XFails(xfails) -def regression(self, local, clickhouse_binary_path): +def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): """ClickHouse integration with LDAP regression module. """ nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } - + with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: self.context.cluster = cluster diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml b/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml index ada8eec5fc9..a7ed0d6e2b4 100644 --- a/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml +++ b/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml @@ -58,9 +58,44 @@ 9440 1 - - - + + + + + + clickhouse1 + 9440 + 1 + + + + + + + clickhouse1 + 9000 + + + + + clickhouse2 + 9000 + + + + + + + clickhouse1 + 9000 + + + clickhouse2 + 9000 + + + + clickhouse2 @@ -73,8 +108,20 @@ 9000 - - + + + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + + clickhouse1 @@ -94,6 +141,22 @@ + + + + clickhouse1 + 9000 + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml b/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml index ca65ffd5e04..768d2250b79 100644 --- a/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml +++ b/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml @@ -3,6 +3,7 @@ /etc/clickhouse-server/ssl/server.crt /etc/clickhouse-server/ssl/server.key + /etc/clickhouse-server/ssl/dhparam.pem none true diff --git a/tests/testflows/rbac/configs/clickhouse/config.xml b/tests/testflows/rbac/configs/clickhouse/config.xml index 65187edf806..4ec12232539 100644 --- a/tests/testflows/rbac/configs/clickhouse/config.xml +++ b/tests/testflows/rbac/configs/clickhouse/config.xml @@ -69,7 +69,7 @@ - + 0.0.0.0 /var/lib/clickhouse/access/ + + + + + users.xml + + + + /var/lib/clickhouse/access/ + + + users.xml @@ -160,7 +172,7 @@ - + @@ -220,7 +232,7 @@ See https://clickhouse.yandex/docs/en/table_engines/replication/ --> - + - + 300 + + + 20 From 8a39b65fa203739734c1151e3336ebf6d122ffa4 Mon Sep 17 00:00:00 2001 From: nikitamikhaylov Date: Mon, 19 Oct 2020 19:15:22 +0300 Subject: [PATCH 134/432] fix build and tests --- base/glibc-compatibility/musl/lgammal.c | 29 ++++++++++++++++--- docker/test/fasttest/run.sh | 3 ++ .../0_stateless/01322_ttest_scipy.python | 4 +-- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/base/glibc-compatibility/musl/lgammal.c b/base/glibc-compatibility/musl/lgammal.c index 534abf41894..3b5d94c5051 100644 --- a/base/glibc-compatibility/musl/lgammal.c +++ b/base/glibc-compatibility/musl/lgammal.c @@ -85,6 +85,20 @@ * */ +#include +#include +#include "libm.h" + + +#if LDBL_MANT_DIG == 53 && LDBL_MAX_EXP == 1024 +double lgamma_r(double x, int *sg); + +long double lgammal_r(long double x, int *sg) +{ + return lgamma_r(x, sg); +} +#elif LDBL_MANT_DIG == 64 && LDBL_MAX_EXP == 16384 + static const long double pi = 3.14159265358979323846264L, /* lgam(1+x) = 0.5 x + x a(x)/b(x) @@ -187,11 +201,8 @@ w5 = 8.412723297322498080632E-4L, w6 = -1.880801938119376907179E-3L, w7 = 4.885026142432270781165E-3L; -#include -#include -#include "libm.h" -long double __lgammal_r(long double x, int *sg) { +long double lgammal_r(long double x, int *sg) { long double t, y, z, nadj, p, p1, p2, q, r, w; union ldshape u = {x}; uint32_t ix = (u.i.se & 0x7fffU)<<16 | u.i.m>>48; @@ -308,6 +319,16 @@ long double __lgammal_r(long double x, int *sg) { r = nadj - r; return r; } +#elif LDBL_MANT_DIG == 113 && LDBL_MAX_EXP == 16384 +// TODO: broken implementation to make things compile +double lgamma_r(double x, int *sg); + +long double lgammal_r(long double x, int *sg) +{ + return lgamma_r(x, sg); +} +#endif + int signgam_lgammal; diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index f12ecbb2c9c..9c89e9ffb28 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -258,6 +258,9 @@ TESTS_TO_SKIP=( # Look at DistributedFilesToInsert, so cannot run in parallel. 01460_DistributedFilesToInsert + + # Require python libraries like scipy, pandas and numpy + 01322_ttest_scipy ) time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt" diff --git a/tests/queries/0_stateless/01322_ttest_scipy.python b/tests/queries/0_stateless/01322_ttest_scipy.python index d8255cd8062..7068b6c4d5a 100644 --- a/tests/queries/0_stateless/01322_ttest_scipy.python +++ b/tests/queries/0_stateless/01322_ttest_scipy.python @@ -63,8 +63,8 @@ def test_and_check(name, a, b, t_stat, p_value): "FROM ttest FORMAT TabSeparatedWithNames;") real_t_stat = real['t_stat'][0] real_p_value = real['p_value'][0] - assert(abs(real_t_stat - np.float64(t_stat) < 1e-4)), "clickhouse_t_stat {}, scipy_t_stat {}".format(real_t_stat, t_stat) - assert(abs(real_p_value - np.float64(p_value)) < 1e-4), "clickhouse_p_value {}, scipy_p_value {}".format(real_p_value, p_value) + assert(abs(real_t_stat - np.float64(t_stat) < 1e-3)), "clickhouse_t_stat {}, scipy_t_stat {}".format(real_t_stat, t_stat) + assert(abs(real_p_value - np.float64(p_value)) < 1e-3), "clickhouse_p_value {}, scipy_p_value {}".format(real_p_value, p_value) client.query("DROP TABLE IF EXISTS ttest;") From 34beb9308b3a8f508643cb4d899bd7fbad1fdd92 Mon Sep 17 00:00:00 2001 From: MyroTk Date: Mon, 19 Oct 2020 18:48:39 +0200 Subject: [PATCH 135/432] increase timeout --- tests/testflows/helpers/cluster.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index a971c61a12e..3a6ee201999 100755 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -26,7 +26,7 @@ class Node(object): def repr(self): return f"Node(name='{self.name}')" - def restart(self, timeout=120, safe=True): + def restart(self, timeout=300, safe=True): """Restart node. """ with self.cluster.lock: @@ -43,18 +43,18 @@ class Node(object): class ClickHouseNode(Node): """Node with ClickHouse server. """ - def wait_healthy(self, timeout=120): + def wait_healthy(self, timeout=300): with By(f"waiting until container {self.name} is healthy"): start_time = time.time() while True: - if self.query("select 1", no_checks=1, timeout=120, steps=False).exitcode == 0: + if self.query("select 1", no_checks=1, timeout=300, steps=False).exitcode == 0: break if time.time() - start_time < timeout: time.sleep(2) continue assert False, "container is not healthy" - def restart(self, timeout=120, safe=True): + def restart(self, timeout=300, safe=True): """Restart node. """ if safe: @@ -180,7 +180,7 @@ class Cluster(object): self.docker_compose += f" --no-ansi --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\"" self.lock = threading.Lock() - def shell(self, node, timeout=120): + def shell(self, node, timeout=300): """Returns unique shell terminal to be used. """ if node is None: @@ -193,7 +193,7 @@ class Cluster(object): shell.timeout = timeout return shell - def bash(self, node, timeout=120): + def bash(self, node, timeout=300): """Returns thread-local bash terminal to a specific node. :param node: name of the service From 70fb557ff83a78fb0db4afcbee6df97be22e6eee Mon Sep 17 00:00:00 2001 From: MyroTk Date: Mon, 19 Oct 2020 21:49:03 +0200 Subject: [PATCH 136/432] xfails change --- tests/testflows/rbac/regression.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/testflows/rbac/regression.py b/tests/testflows/rbac/regression.py index 2a474cfe40b..b25c1f93e34 100755 --- a/tests/testflows/rbac/regression.py +++ b/tests/testflows/rbac/regression.py @@ -76,17 +76,17 @@ xfails = { [(Fail, issue_15165)], "privileges/distributed table/cluster tests/cluster=:/special cases/insert with table on source table of materialized view privilege granted directly or via role/:": [(Fail, issue_14810)], - "/rbac/views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted directly": + "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted directly": [(Fail, ".inner table is not created as expected")], - "/rbac/views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted through a role": + "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted through a role": [(Fail, ".inner table is not created as expected")], - "/rbac/views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted through a role": + "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted through a role": [(Fail, ".inner table is not created as expected")], - "/rbac/views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted directly": + "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted directly": [(Fail, ".inner table is not created as expected")], - "/rbac/views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted directly": + "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted directly": [(Fail, ".inner table is not created as expected")], - "/rbac/views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted through a role": + "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted through a role": [(Fail, ".inner table is not created as expected")], } From 41ac15a71ce70a0fd1f644d02d66c87b2b1e652a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 00:26:10 +0300 Subject: [PATCH 137/432] fix initial query id --- src/Server/HTTPHandler.cpp | 6 +++-- src/Server/TCPHandler.cpp | 17 ++++++------ tests/integration/README.md | 27 ++++++++++++++++++- .../01526_initial_query_id.reference | 2 ++ .../0_stateless/01526_initial_query_id.sh | 25 +++++++++++++++++ 5 files changed, 66 insertions(+), 11 deletions(-) create mode 100644 tests/queries/0_stateless/01526_initial_query_id.reference create mode 100755 tests/queries/0_stateless/01526_initial_query_id.sh diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 15934c2dc5a..f2afc2c860f 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -310,9 +310,11 @@ void HTTPHandler::processQuery( session->release(); }); - std::string query_id = params.get("query_id", ""); - context.setCurrentQueryId(query_id); + // Set the query id supplied by the user, if any. + context.setCurrentQueryId(params.get("query_id", + request.get("X-ClickHouse-Query-Id", ""))); + // Parse the OpenTelemetry traceparent header. if (request.has("traceparent")) { std::string opentelemetry_traceparent = request.get("traceparent"); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index bc3f674bc31..38cc8faad6d 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -903,14 +903,6 @@ void TCPHandler::receiveQuery() /// Set fields, that are known apriori. client_info.interface = ClientInfo::Interface::TCP; - if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY) - { - /// 'Current' fields was set at receiveHello. - client_info.initial_user = client_info.current_user; - client_info.initial_query_id = client_info.current_query_id; - client_info.initial_address = client_info.current_address; - } - /// Per query settings are also passed via TCP. /// We need to check them before applying due to they can violate the settings constraints. auto settings_format = (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS) ? SettingsWriteFormat::STRINGS_WITH_FLAGS @@ -999,6 +991,15 @@ void TCPHandler::receiveQuery() // so we have to apply the changes first. query_context->setCurrentQueryId(state.query_id); + // Set parameters of initial query. + if (client_info.query_kind == ClientInfo::QueryKind::INITIAL_QUERY) + { + /// 'Current' fields was set at receiveHello. + client_info.initial_user = client_info.current_user; + client_info.initial_query_id = client_info.current_query_id; + client_info.initial_address = client_info.current_address; + } + /// Sync timeouts on client and server during current query to avoid dangling queries on server /// NOTE: We use settings.send_timeout for the receive timeout and vice versa (change arguments ordering in TimeoutSetter), /// because settings.send_timeout is client-side setting which has opposite meaning on the server side. diff --git a/tests/integration/README.md b/tests/integration/README.md index bc64b686782..0886dc2cfac 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -14,7 +14,32 @@ Don't use Docker from your system repository. * [pip](https://pypi.python.org/pypi/pip) and `libpq-dev`. To install: `sudo apt-get install python3-pip libpq-dev zlib1g-dev libcrypto++-dev libssl-dev` * [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest` -* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install urllib3==1.23 pytest docker-compose==1.22.0 docker dicttoxml kazoo PyMySQL psycopg2==2.7.5 pymongo tzlocal kafka-python protobuf redis aerospike pytest-timeout minio confluent-kafka avro +* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: + +``` +sudo -H pip install \ + PyMySQL \ + aerospike \ + avro \ + cassandra-driver \ + confluent-kafka \ + dicttoxml \ + docker \ + docker-compose==1.22.0 \ + grpcio \ + grpcio-tools \ + kafka-python \ + kazoo \ + minio \ + protobuf \ + psycopg2-binary==2.7.5 \ + pymongo \ + pytest \ + pytest-timeout \ + redis \ + tzlocal \ + urllib3 +``` (highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio` diff --git a/tests/queries/0_stateless/01526_initial_query_id.reference b/tests/queries/0_stateless/01526_initial_query_id.reference new file mode 100644 index 00000000000..e8d2c31aa17 --- /dev/null +++ b/tests/queries/0_stateless/01526_initial_query_id.reference @@ -0,0 +1,2 @@ +1 1 +2 1 diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh new file mode 100755 index 00000000000..c5459625023 --- /dev/null +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -ue + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") + +${CLICKHOUSE_CLIENT} -q "select 1 format Null" "--query_id=$query_id" + +${CLICKHOUSE_CURL} \ + --header "X-ClickHouse-Query-Id: $query_id" \ + "http://localhost:8123/" \ + --get \ + --data-urlencode "query=select 1 format Null" + +${CLICKHOUSE_CLIENT} -n -q " +system flush logs; +select interface, initial_query_id = query_id + from system.query_log + where query_id = '$query_id' and type = 'QueryFinish' + order by interface + ; +" + From 14d95867355df38b0240a96a31e14397d23024ab Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 20 Oct 2020 04:18:12 +0300 Subject: [PATCH 138/432] Update AggregateFunctionRankCorrelation.h --- src/AggregateFunctions/AggregateFunctionRankCorrelation.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h index 4fa545e1a4a..4ce4d7199dc 100644 --- a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h +++ b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h @@ -21,10 +21,6 @@ #include -namespace ErrorCodes -{ -extern const int BAD_ARGUMENTS; -} namespace DB { From f3abb76e5775931f44fcba8420a95357b5343b70 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 11:13:21 +0300 Subject: [PATCH 139/432] disable traceparent header in Arcadia it interferes with the test_clickhouse.TestTracing.test_tracing_via_http_proxy[traceparent] test --- src/Server/HTTPHandler.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index f2afc2c860f..5933800d710 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -315,6 +315,9 @@ void HTTPHandler::processQuery( request.get("X-ClickHouse-Query-Id", ""))); // Parse the OpenTelemetry traceparent header. + // Disable in Arcadia -- it interferes with the + // test_clickhouse.TestTracing.test_tracing_via_http_proxy[traceparent] test. +#if !defined(ARCADIA_BUILD) if (request.has("traceparent")) { std::string opentelemetry_traceparent = request.get("traceparent"); @@ -329,6 +332,7 @@ void HTTPHandler::processQuery( context.getClientInfo().opentelemetry_tracestate = request.get("tracestate", ""); } +#endif /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). String http_response_compression_methods = request.get("Accept-Encoding", ""); From 84908df6d8f5fb82c1c468ec621357a339f72578 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 14:35:13 +0300 Subject: [PATCH 140/432] fix setting parent from HTTP traceparent --- programs/client/Client.cpp | 2 +- src/Interpreters/ClientInfo.cpp | 8 ++- src/Interpreters/ClientInfo.h | 11 +++- src/Server/HTTPHandler.cpp | 11 ++-- src/Storages/StorageURL.cpp | 2 +- .../01455_opentelemetry_distributed.reference | 12 ++--- .../01455_opentelemetry_distributed.sh | 53 +++++++++++++------ 7 files changed, 63 insertions(+), 36 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 0cbed999382..87d1282310f 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -2498,7 +2498,7 @@ public: { std::string traceparent = options["opentelemetry-traceparent"].as(); std::string error; - if (!context.getClientInfo().setOpenTelemetryTraceparent( + if (!context.getClientInfo().parseTraceparentHeader( traceparent, error)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, diff --git a/src/Interpreters/ClientInfo.cpp b/src/Interpreters/ClientInfo.cpp index 5570a1bc88d..76f0460815d 100644 --- a/src/Interpreters/ClientInfo.cpp +++ b/src/Interpreters/ClientInfo.cpp @@ -70,7 +70,6 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) // are random and will probably require the full length anyway. writeBinary(opentelemetry_trace_id, out); writeBinary(opentelemetry_span_id, out); - writeBinary(opentelemetry_parent_span_id, out); writeBinary(opentelemetry_tracestate, out); writeBinary(opentelemetry_trace_flags, out); } @@ -144,7 +143,6 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) { readBinary(opentelemetry_trace_id, in); readBinary(opentelemetry_span_id, in); - readBinary(opentelemetry_parent_span_id, in); readBinary(opentelemetry_tracestate, in); readBinary(opentelemetry_trace_flags, in); @@ -163,7 +161,7 @@ void ClientInfo::setInitialQuery() client_name = (DBMS_NAME " ") + client_name; } -bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, +bool ClientInfo::parseTraceparentHeader(const std::string & traceparent, std::string & error) { uint8_t version = -1; @@ -215,13 +213,13 @@ bool ClientInfo::setOpenTelemetryTraceparent(const std::string & traceparent, opentelemetry_trace_id = static_cast<__uint128_t>(trace_id_high) << 64 | trace_id_low; - opentelemetry_parent_span_id = trace_parent; + opentelemetry_span_id = trace_parent; opentelemetry_trace_flags = trace_flags; return true; } -std::string ClientInfo::getOpenTelemetryTraceparentForChild() const +std::string ClientInfo::composeTraceparentHeader() const { // This span is a parent for its children, so we specify this span_id as a // parent id. diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h index 5b5c3b400eb..53ecab4cd00 100644 --- a/src/Interpreters/ClientInfo.h +++ b/src/Interpreters/ClientInfo.h @@ -103,8 +103,15 @@ public: /// Initialize parameters on client initiating query. void setInitialQuery(); - bool setOpenTelemetryTraceparent(const std::string & traceparent, std::string & error); - std::string getOpenTelemetryTraceparentForChild() const; + // Parse/compose OpenTelemetry traceparent header. + // Note that these functions use span_id field, not parent_span_id, same as + // in native protocol. The incoming traceparent corresponds to the upstream + // trace span, and the outgoing traceparent corresponds to our current span. + // We use the same ClientInfo structure first for incoming span, and then + // for our span: when we switch, we use old span_id as parent_span_id, and + // generate a new span_id (currently this happens in Context::setQueryId()). + bool parseTraceparentHeader(const std::string & traceparent, std::string & error); + std::string composeTraceparentHeader() const; private: void fillOSUserHostNameAndVersionInfo(); diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 5933800d710..c2e6c0fdf4b 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -310,10 +310,6 @@ void HTTPHandler::processQuery( session->release(); }); - // Set the query id supplied by the user, if any. - context.setCurrentQueryId(params.get("query_id", - request.get("X-ClickHouse-Query-Id", ""))); - // Parse the OpenTelemetry traceparent header. // Disable in Arcadia -- it interferes with the // test_clickhouse.TestTracing.test_tracing_via_http_proxy[traceparent] test. @@ -322,7 +318,7 @@ void HTTPHandler::processQuery( { std::string opentelemetry_traceparent = request.get("traceparent"); std::string error; - if (!context.getClientInfo().setOpenTelemetryTraceparent( + if (!context.getClientInfo().parseTraceparentHeader( opentelemetry_traceparent, error)) { throw Exception(ErrorCodes::BAD_REQUEST_PARAMETER, @@ -334,6 +330,11 @@ void HTTPHandler::processQuery( } #endif + // Set the query id supplied by the user, if any, and also update the + // OpenTelemetry fields. + context.setCurrentQueryId(params.get("query_id", + request.get("X-ClickHouse-Query-Id", ""))); + /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). String http_response_compression_methods = request.get("Accept-Encoding", ""); CompressionMethod http_response_compression_method = CompressionMethod::None; diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index dbe718c1f0c..55c16496ba5 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -74,7 +74,7 @@ namespace if (client_info.opentelemetry_trace_id) { header.emplace_back("traceparent", - client_info.getOpenTelemetryTraceparentForChild()); + client_info.composeTraceparentHeader()); if (!client_info.opentelemetry_tracestate.empty()) { diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference index e0eadd91a97..420bb17ae8b 100644 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -1,10 +1,10 @@ ===http=== -1 -4 -1 +{"total spans":"4","unique spans":"4","unique non-zero parent spans":"2"} +{"initial query spans with proper parent":"1"} +{"unique non-empty tracestate values":"1"} ===native=== -1 -2 -1 +{"total spans":"2","unique spans":"2","unique non-zero parent spans":"2"} +{"initial query spans with proper parent":"1"} +{"unique non-empty tracestate values":"1"} ===sampled=== OK diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index 446f713c11e..e3740fd690d 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -6,19 +6,39 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function check_log { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} --format=JSONEachRow -nq " system flush logs; --- Check the number of spans with given trace id, to verify it was propagated. -select count(*) +-- Check the number of query spans with given trace id, to verify it was +-- propagated. +select count(*) "'"'"total spans"'"'", + uniqExact(span_id) "'"'"unique spans"'"'", + uniqExactIf(parent_span_id, parent_span_id != 0) + "'"'"unique non-zero parent spans"'"'" from system.opentelemetry_log where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) and operation_name = 'query' ; +-- Also check that the initial query span in ClickHouse has proper parent span. +select count(*) "'"'"initial query spans with proper parent"'"'" + from + (select *, attribute_name, attribute_value + from system.opentelemetry_log + array join attribute.names as attribute_name, + attribute.values as attribute_value) o + join system.query_log on query_id = o.attribute_value + where trace_id = reinterpretAsUUID(reverse(unhex('$trace_id'))) + and operation_name = 'query' + and parent_span_id = reinterpretAsUInt64(unhex('73')) + and o.attribute_name = 'clickhouse.query_id' + and is_initial_query + and type = 'QueryFinish' + ; + -- Check that the tracestate header was propagated. It must have exactly the -- same non-empty value for all 'query' spans in this trace. -select count(distinct value) +select uniqExact(value) "'"'"unique non-empty tracestate values"'"'" from system.opentelemetry_log array join attribute.names as name, attribute.values as value where @@ -34,14 +54,15 @@ select count(distinct value) echo "===http===" trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") -# Check that the HTTP traceparent is read, and then passed through `remote` table function. -# We expect 4 queries, because there are two DESC TABLE queries for the shard. -# This is bug-ish, see https://github.com/ClickHouse/ClickHouse/issues/14228 +# Check that the HTTP traceparent is read, and then passed through `remote` +# table function. We expect 4 queries -- one initial, one SELECT and two +# DESC TABLE. Two DESC TABLE instead of one looks like a bug, see the issue: +# https://github.com/ClickHouse/ClickHouse/issues/14228 ${CLICKHOUSE_CURL} \ - --header "traceparent: 00-$trace_id-0000000000000010-01" \ + --header "traceparent: 00-$trace_id-0000000000000073-01" \ --header "tracestate: some custom state" "http://localhost:8123/" \ --get \ - --data-urlencode "query=select 1 from remote('127.0.0.2', system, one)" + --data-urlencode "query=select 1 from remote('127.0.0.2', system, one) format Null" check_log @@ -52,14 +73,14 @@ echo "===native===" trace_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") ${CLICKHOUSE_CLIENT} \ - --opentelemetry-traceparent "00-$trace_id-0000000000000020-02" \ + --opentelemetry-traceparent "00-$trace_id-0000000000000073-01" \ --opentelemetry-tracestate "another custom state" \ - --query "select * from url('http://127.0.0.2:8123/?query=select%201', CSV, 'a int')" + --query "select * from url('http://127.0.0.2:8123/?query=select%201%20format%20Null', CSV, 'a int')" check_log -# Test sampled tracing. The traces should be started with the specified probability, -# only for initial queries. +# Test sampled tracing. The traces should be started with the specified +# probability, only for initial queries. echo "===sampled===" query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") @@ -71,8 +92,8 @@ do --query "select 1 from remote('127.0.0.2', system, one) format Null" \ & - # clickhouse-client is slow to start, so run them in parallel, but not too - # much. + # clickhouse-client is slow to start (initialization of DateLUT), so run + # several clients in parallel, but not too many. if [[ $((i % 10)) -eq 0 ]] then wait @@ -84,7 +105,7 @@ ${CLICKHOUSE_CLIENT} -q "system flush logs" ${CLICKHOUSE_CLIENT} -q " with count(*) as c -- expect 200 * 0.1 = 20 sampled events on average - select if(c > 10 and c < 30, 'OK', 'fail: ' || toString(c)) + select if(c > 5 and c < 35, 'OK', 'fail: ' || toString(c)) from system.opentelemetry_log array join attribute.names as name, attribute.values as value where name = 'clickhouse.query_id' From 242ec7e56c72c44cd26eacf22eb4a902e8b7c0b2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 15:12:06 +0300 Subject: [PATCH 141/432] make some tests faster --- docker/test/performance-comparison/compare.sh | 1 + tests/performance/constant_column_search.xml | 3 ++- tests/performance/trim_urls.xml | 2 +- tests/performance/website.xml | 6 +++--- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 34ddbc37eec..769a4f8f735 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -1090,6 +1090,7 @@ case "$stage" in "restart") numactl --hardware ||: lscpu ||: + sudo dmidecode -t 4 ||: time restart ;& "run_tests") diff --git a/tests/performance/constant_column_search.xml b/tests/performance/constant_column_search.xml index 62baab14e3c..2f90844cb4a 100644 --- a/tests/performance/constant_column_search.xml +++ b/tests/performance/constant_column_search.xml @@ -5,6 +5,7 @@ hits_100m_single + hits_10m_single @@ -36,7 +37,7 @@ - + diff --git a/tests/performance/trim_urls.xml b/tests/performance/trim_urls.xml index 276a12bc570..075984682f9 100644 --- a/tests/performance/trim_urls.xml +++ b/tests/performance/trim_urls.xml @@ -20,5 +20,5 @@ - SELECT count() FROM hits_100m_single WHERE NOT ignore({func}URL)) + SELECT ignore({func}URL)) FROM hits_100m_single LIMIT 50000000 FORMAT Null
diff --git a/tests/performance/website.xml b/tests/performance/website.xml index 65b3d79b5f1..2127a71c55c 100644 --- a/tests/performance/website.xml +++ b/tests/performance/website.xml @@ -37,7 +37,7 @@ SELECT UserID, count() FROM {table} GROUP BY UserID ORDER BY count() DESC LIMIT 10 SELECT UserID, SearchPhrase, count() FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count() DESC LIMIT 10 SELECT UserID, SearchPhrase, count() FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10 -SELECT UserID, toMinute(EventTime) AS m, SearchPhrase, count() FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count() DESC LIMIT 10 +SELECT UserID, toMinute(EventTime) AS m, SearchPhrase, count() FROM hits_10m_single GROUP BY UserID, m, SearchPhrase ORDER BY count() DESC LIMIT 10 SELECT count() FROM hits_100m_single WHERE UserID = 12345678901234567890 SELECT count() FROM hits_100m_single WHERE URL LIKE '%metrika%' SELECT SearchPhrase, any(URL), count() AS c FROM hits_100m_single WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10 @@ -52,8 +52,8 @@ SELECT SearchEngineID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_100m_single WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10 SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10 SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m_single GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10 -SELECT URL, count() AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10 -SELECT 1, URL, count() AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10 +SELECT URL, count() AS c FROM hits_10m_single GROUP BY URL ORDER BY c DESC LIMIT 10 +SELECT 1, URL, count() AS c FROM hits_10m_single GROUP BY 1, URL ORDER BY c DESC LIMIT 10 SELECT ClientIP AS x, x - 1, x - 2, x - 3, count() AS c FROM hits_100m_single GROUP BY x, x - 1, x - 2, x - 3 ORDER BY c DESC LIMIT 10 SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10 SETTINGS max_threads = 1 SELECT Title, count() AS PageViews FROM {table} WHERE CounterID = 34 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews DESC LIMIT 10 From 62a0ee8255ffc3404b2a1865a509646a629a8ed4 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 20 Oct 2020 15:29:46 +0300 Subject: [PATCH 142/432] fix assertion in table function file() --- src/TableFunctions/ITableFunctionFileLike.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/TableFunctions/ITableFunctionFileLike.cpp b/src/TableFunctions/ITableFunctionFileLike.cpp index f876da02fd1..1349c166474 100644 --- a/src/TableFunctions/ITableFunctionFileLike.cpp +++ b/src/TableFunctions/ITableFunctionFileLike.cpp @@ -23,6 +23,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int INCORRECT_FILE_NAME; + extern const int BAD_ARGUMENTS; } void ITableFunctionFileLike::parseArguments(const ASTPtr & ast_function, const Context & context) @@ -46,15 +47,18 @@ void ITableFunctionFileLike::parseArguments(const ASTPtr & ast_function, const C if (args.size() == 2 && getName() == "file") { - if (format != "Distributed") - throw Exception("Table function '" + getName() + "' allows 2 arguments only for Distributed format.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + if (format == "Distributed") + return; + throw Exception("Table function '" + getName() + "' allows 2 arguments only for Distributed format.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); } - else if (args.size() != 3 && args.size() != 4) + + if (args.size() != 3 && args.size() != 4) throw Exception("Table function '" + getName() + "' requires 3 or 4 arguments: filename, format, structure and compression method (default auto).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - if (args.size() > 2) - structure = args[2]->as().value.safeGet(); + structure = args[2]->as().value.safeGet(); + if (structure.empty()) + throw Exception("Table structure is empty", ErrorCodes::BAD_ARGUMENTS); if (args.size() == 4) compression_method = args[3]->as().value.safeGet(); From 439bbd98610983087cdcd324ba9c7cbc9890fef2 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 20 Oct 2020 18:30:46 +0300 Subject: [PATCH 143/432] Blind performance fix --- src/Storages/MergeTree/SimpleMergeSelector.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index cbb24d1494e..65d9aaecdab 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -90,6 +90,9 @@ double mapPiecewiseLinearToUnit(double value, double min, double max) /** Is allowed to merge parts in range with specific properties. */ +#if defined(__clang__) + ALWAYS_INLINE +#endif bool allow( double sum_size, double max_size, From d7ea9b6d93f95caa2bdd5cd43f6656cc0f8043d3 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Tue, 20 Oct 2020 17:57:53 +0200 Subject: [PATCH 144/432] Add setTemporaryStorage to clickhouse-local to make OPTIMIZE work --- programs/local/LocalServer.cpp | 4 ++ .../01527_clickhouse_local_optimize.reference | 16 +++++ .../01527_clickhouse_local_optimize.sh | 60 +++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 tests/queries/0_stateless/01527_clickhouse_local_optimize.reference create mode 100755 tests/queries/0_stateless/01527_clickhouse_local_optimize.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 41da477152c..bfc7cac7fc1 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -152,6 +152,10 @@ void LocalServer::tryInitPath() path += '/'; context->setPath(path); + + context->setTemporaryStorage(path + "tmp"); + context->setFlagsPath(path + "flags"); + context->setUserFilesPath(""); // user's files are everywhere } diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference new file mode 100644 index 00000000000..ea0ec6fe765 --- /dev/null +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference @@ -0,0 +1,16 @@ +202001_1_1_0 1 2020-01-01 String +202001_4_4_0 1 2020-01-01 String +202002_2_2_0 2 2020-02-02 Another string +202002_5_5_0 2 2020-02-02 Another string +202003_3_3_0 3 2020-03-03 One more string +202003_6_6_0 3 2020-03-03 One more string +202001_1_1_0 4 2020-01-02 String for first partition +202001_4_4_0 4 2020-01-02 String for first partition +202001_1_4_1 1 2020-01-01 String +202001_1_4_1 1 2020-01-01 String +202002_2_5_1 2 2020-02-02 Another string +202002_2_5_1 2 2020-02-02 Another string +202003_3_6_1 3 2020-03-03 One more string +202003_3_6_1 3 2020-03-03 One more string +202001_1_4_1 4 2020-01-02 String for first partition +202001_1_4_1 4 2020-01-02 String for first partition diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh new file mode 100755 index 00000000000..e51d53c7524 --- /dev/null +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +WORKING_FOLDER="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" + +rm -rf "${WORKING_FOLDER}" +mkdir -p "${WORKING_FOLDER}/metadata/local/" + +## 1. Imagine we want to process this file: +cat < "${WORKING_FOLDER}/data.csv" +1,2020-01-01,"String" +2,2020-02-02,"Another string" +3,2020-03-03,"One more string" +4,2020-01-02,"String for first partition" +EOF + +## 2. that is the metadata for the table we want to fill +## schema should match the schema of the table from server +## (the easiest way is just to copy it from the server) + +## I've added sleepEachRow(0.5) here just to mimic slow insert +cat < "${WORKING_FOLDER}/metadata/local/test.sql" +ATTACH TABLE local.test (id UInt64, d Date, s String, x MATERIALIZED sleepEachRow(0.5)) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); +EOF + +## 3a. that is the metadata for the input file we want to read +## it should match the structure of source file + +## use stdin to read from pipe +cat < "${WORKING_FOLDER}/metadata/local/stdin.sql" +ATTACH TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin); +EOF + +## 3b. Instead of stdin you can use file path +cat < "${WORKING_FOLDER}/metadata/local/data_csv.sql" +ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '${WORKING_FOLDER}/data.csv'); +EOF + +## All preparations done, +## the rest is simple: + +# option a (if 3a used) with pipe / reading stdin +cat "${WORKING_FOLDER}/data.csv" | ${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.stdin" -- --path="${WORKING_FOLDER}" + +# option b (if 3b used) 0 with filepath +${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.data_csv" -- --path="${WORKING_FOLDER}" + +# now you can check what was inserted (i did both options so i have doubled data) +${CLICKHOUSE_LOCAL} --query "SELECT _part,* FROM local.test ORDER BY id, _part" -- --path="${WORKING_FOLDER}" + +# But you can't do OPTIMIZE (local will die with coredump) :) That would be too good +clickhouse-local --query "OPTIMIZE TABLE local.test FINAL" -- --path="${WORKING_FOLDER}" + +# now you can check what was inserted (i did both options so i have doubled data) +${CLICKHOUSE_LOCAL} --query "SELECT _part,* FROM local.test ORDER BY id, _part" -- --path="${WORKING_FOLDER}" + +## now you can upload those parts to a server (in detached subfolder) and attach them. +rm -rf "${WORKING_FOLDER}" \ No newline at end of file From 8a0435e17829efc554d2c3d04620dff9da6ee805 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 20:47:10 +0300 Subject: [PATCH 145/432] some fixes for split build --- .../0_stateless/00965_logs_level_bugfix.sh | 16 ++++++++-------- .../00965_send_logs_level_concurrent_queries.sh | 4 ++-- ...ickhouse_server_start_with_embedded_config.sh | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/00965_logs_level_bugfix.sh b/tests/queries/0_stateless/00965_logs_level_bugfix.sh index 69fc34e6988..d0126c19eb9 100755 --- a/tests/queries/0_stateless/00965_logs_level_bugfix.sh +++ b/tests/queries/0_stateless/00965_logs_level_bugfix.sh @@ -3,18 +3,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_BINARY client --send_logs_level="trace" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="trace" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Information" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Information" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Error" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Error" | head -n 1 echo "-" -$CLICKHOUSE_BINARY client --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information" | head -n 1 echo "." -$CLICKHOUSE_BINARY client --send_logs_level="None" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information\|Error" | head -n 1 +${CLICKHOUSE_CLIENT} --send_logs_level="None" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information\|Error" | head -n 1 diff --git a/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh index cd654bd6581..5e24e820995 100755 --- a/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh +++ b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh @@ -4,8 +4,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh for _ in {1..10}; do - $CLICKHOUSE_BINARY client --send_logs_level="trace" --query="SELECT * from numbers(1000000);" > /dev/null 2> /dev/null & - $CLICKHOUSE_BINARY client --send_logs_level="information" --query="SELECT * from numbers(1000000);" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" & + ${CLICKHOUSE_CLIENT} --send_logs_level="trace" --query="SELECT * from numbers(1000000);" > /dev/null 2> /dev/null & + ${CLICKHOUSE_CLIENT} --send_logs_level="information" --query="SELECT * from numbers(1000000);" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" & done wait diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 68198ec6e16..945be0a1324 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -10,7 +10,7 @@ echo "Starting clickhouse-server" $PORT -$CLICKHOUSE_BINARY server -- --tcp_port "$CLICKHOUSE_PORT_TCP" > server.log 2>&1 & +$CLICKHOUSE_BINARY-server -- --tcp_port "$CLICKHOUSE_PORT_TCP" > server.log 2>&1 & PID=$! function finish { From 7f7e6e809d7e64a1e448eedb8e1f303e4899ae9c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Tue, 20 Oct 2020 20:48:55 +0300 Subject: [PATCH 146/432] debug and -Og again --- docker/test/fasttest/run.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 2a04d59252b..f4e5befd52c 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -147,9 +147,9 @@ CMAKE_LIBS_CONFIG=( "-DUSE_STATIC_LIBRARIES=0" "-DSPLIT_SHARED_LIBRARIES=1" "-DCLICKHOUSE_SPLIT_BINARY=1" -# "-DCMAKE_BUILD_TYPE=Debug" -# "-DCMAKE_C_FLAGS=-Og" -# "-DCMAKE_CXX_FLAGS=-Og" + "-DCMAKE_BUILD_TYPE=Debug" + "-DCMAKE_C_FLAGS_ADD=-Og" + "-DCMAKE_CXX_FLAGS_ADD=-Og" ) # TODO remove this? we don't use ccache anyway. An option would be to download it From cc91eb474ea92932d0fcdf82dc8ee11ff8c294e9 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 20 Oct 2020 21:12:37 +0300 Subject: [PATCH 147/432] Add test --- .../configs/remote_servers.xml | 12 ++++ .../test_disabled_mysql_server/test.py | 61 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 tests/integration/test_disabled_mysql_server/configs/remote_servers.xml create mode 100644 tests/integration/test_disabled_mysql_server/test.py diff --git a/tests/integration/test_disabled_mysql_server/configs/remote_servers.xml b/tests/integration/test_disabled_mysql_server/configs/remote_servers.xml new file mode 100644 index 00000000000..de8e5865f12 --- /dev/null +++ b/tests/integration/test_disabled_mysql_server/configs/remote_servers.xml @@ -0,0 +1,12 @@ + + + + + + node1 + 9000 + + + + + diff --git a/tests/integration/test_disabled_mysql_server/test.py b/tests/integration/test_disabled_mysql_server/test.py new file mode 100644 index 00000000000..34dd094af3d --- /dev/null +++ b/tests/integration/test_disabled_mysql_server/test.py @@ -0,0 +1,61 @@ +import time +import contextlib +import pymysql.cursors +import pytest +import os +import subprocess + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster, get_docker_compose_path +from helpers.network import PartitionManager + +cluster = ClickHouseCluster(__file__) +clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql=True) + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +class MySQLNodeInstance: + def __init__(self, user='root', password='clickhouse', hostname='127.0.0.1', port=3308): + self.user = user + self.port = port + self.hostname = hostname + self.password = password + self.mysql_connection = None # lazy init + + def alloc_connection(self): + if self.mysql_connection is None: + self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.hostname, + port=self.port, autocommit=True) + return self.mysql_connection + + def query(self, execution_query): + with self.alloc_connection().cursor() as cursor: + cursor.execute(execution_query) + + def close(self): + if self.mysql_connection is not None: + self.mysql_connection.close() + + +def test_disabled_mysql_server(started_cluster): + with contextlib.closing(MySQLNodeInstance()) as mysql_node: + mysql_node.query("CREATE DATABASE test_db;") + mysql_node.query("CREATE TABLE test_db.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + + with PartitionManager() as pm: + clickhouse_node.query("CREATE DATABASE test_db ENGINE = MySQL('mysql1:3306', 'test_db', 'root', 'clickhouse')") + + + pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': 'DROP'}) + clickhouse_node.query("SELECT * FROM system.parts") + clickhouse_node.query("SELECT * FROM system.mutations") + clickhouse_node.query("SELECT * FROM system.graphite_retentions") + + clickhouse_node.query("DROP DATABASE test_db") From 8097c696de7d1d99904a50a36ea4a587a4b04ccf Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 20 Oct 2020 23:05:20 +0300 Subject: [PATCH 148/432] Trying another fix --- src/Storages/MergeTree/SimpleMergeSelector.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index 65d9aaecdab..ad8328cb7eb 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -90,9 +90,6 @@ double mapPiecewiseLinearToUnit(double value, double min, double max) /** Is allowed to merge parts in range with specific properties. */ -#if defined(__clang__) - ALWAYS_INLINE -#endif bool allow( double sum_size, double max_size, @@ -104,7 +101,9 @@ bool allow( // std::cerr << "sum_size: " << sum_size << "\n"; /// Map size to 0..1 using logarithmic scale - double size_normalized = mapPiecewiseLinearToUnit(log1p(sum_size), log1p(settings.min_size_to_lower_base), log1p(settings.max_size_to_lower_base)); + /// Use log(1 + x) instead of log1p(x) because our x variables (sum_size and settings) are always integer. + /// Also log1p seems to be slow and significantly affect performance of merges assignment. + double size_normalized = mapPiecewiseLinearToUnit(log(1 + sum_size), log(1 + settings.min_size_to_lower_base), log(1 + settings.max_size_to_lower_base)); // std::cerr << "size_normalized: " << size_normalized << "\n"; From 7e4494e2685aec72a84420b7d6aa15f412c9564a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 17 Oct 2020 01:17:38 +0300 Subject: [PATCH 149/432] Add a test for dictGet in sharding_key after dictionary reload --- ...dist_sharding_key_dictGet_reload.reference | 4 +++ ...01527_dist_sharding_key_dictGet_reload.sql | 26 +++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.reference create mode 100644 tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql diff --git a/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.reference b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.reference new file mode 100644 index 00000000000..1a3c5705a7e --- /dev/null +++ b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.reference @@ -0,0 +1,4 @@ +1 +2 +2 +1 diff --git a/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql new file mode 100644 index 00000000000..d8f6704b892 --- /dev/null +++ b/tests/queries/0_stateless/01527_dist_sharding_key_dictGet_reload.sql @@ -0,0 +1,26 @@ +set allow_nondeterministic_optimize_skip_unused_shards=1; +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=2; + +drop database if exists db_01527_ranges; +drop table if exists dist_01527; +drop table if exists data_01527; + +create database db_01527_ranges; + +create table data_01527 engine=Memory() as select toUInt64(number) key from numbers(2); +create table dist_01527 as data_01527 engine=Distributed('test_cluster_two_shards', currentDatabase(), data_01527, dictGetUInt64('db_01527_ranges.dict', 'shard', key)); + +create table db_01527_ranges.data engine=Memory() as select number key, number shard from numbers(100); +create dictionary db_01527_ranges.dict (key UInt64, shard UInt64) primary key key source(clickhouse(host '127.0.0.1' port 9000 table 'data' db 'db_01527_ranges' user 'default' password '')) lifetime(0) layout(hashed()); +system reload dictionary db_01527_ranges.dict; + +select _shard_num from dist_01527 where key=0; +select _shard_num from dist_01527 where key=1; + +drop table db_01527_ranges.data sync; +create table db_01527_ranges.data engine=Memory() as select number key, number+1 shard from numbers(100); +system reload dictionary db_01527_ranges.dict; + +select _shard_num from dist_01527 where key=0; +select _shard_num from dist_01527 where key=1; From 3559e3355c11f32d699e191c9bbd8105fbae4dd8 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 17 Oct 2020 00:14:49 +0300 Subject: [PATCH 150/432] Do not cache dictionary for dictGet*/dictHas* There are places where ExpressionActionsPtr is cached (StorageDistributed caching it for sharding_key_expr and optimize_skip_unused_shards), and if the dictionary will be cached within "query" then cached ExpressionActionsPtr will always have first version of the query and the dictionary will not be updated after reload. For example this will fix dictGet in sharding_key (and similar places, i.e. when the function context is stored permanently) Fixes: 01527_dist_sharding_key_dictGet_reload --- src/Functions/FunctionsExternalDictionaries.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index 4affcdfa7e0..b5cc1f9c820 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -82,13 +82,13 @@ public: std::shared_ptr getDictionary(const String & dictionary_name) { - auto dict = std::atomic_load(&dictionary); - if (dict) - return dict; String resolved_name = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name); - dict = external_loader.getDictionary(resolved_name); - context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); - std::atomic_store(&dictionary, dict); + auto dict = external_loader.getDictionary(resolved_name); + if (!access_checked) + { + context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); + access_checked = true; + } return dict; } @@ -122,6 +122,8 @@ private: const Context & context; const ExternalDictionariesLoader & external_loader; mutable std::shared_ptr dictionary; + /// Access cannot be not granted, since in this case checkAccess() will throw and access_checked will not be updated. + std::atomic access_checked = false; }; From 006855def4a6ae8d98885e3a5db493d8c8cfc2e9 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Wed, 21 Oct 2020 04:07:41 +0300 Subject: [PATCH 151/432] Trigger CI. --- tests/queries/0_stateless/01526_max_untracked_memory.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/queries/0_stateless/01526_max_untracked_memory.sh b/tests/queries/0_stateless/01526_max_untracked_memory.sh index e2cd069d303..2623d175d82 100755 --- a/tests/queries/0_stateless/01526_max_untracked_memory.sh +++ b/tests/queries/0_stateless/01526_max_untracked_memory.sh @@ -8,7 +8,6 @@ query="select randomPrintableASCII(number) from numbers(1000)" # (but actually even more) min_trace_entries=2 -# # TCP # do not use _, they should be escaped for LIKE @@ -18,9 +17,7 @@ ${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS" query_id_tcp="$(${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT query_id FROM system.query_log WHERE query LIKE '%$query_id_tcp_prefix%'")" ${CLICKHOUSE_CLIENT} -q "SELECT count()>=$min_trace_entries FROM system.trace_log WHERE query_id = '$query_id_tcp' AND abs(size) < 4e6 AND event_time >= now() - interval 1 hour" -# # HTTP -# # query_id cannot be longer then 28 bytes query_id_http="01526_http_${RANDOM}_$$" From 9ffa7bd0b6812c7ca379da53133b2d4033bb1d01 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Wed, 21 Oct 2020 09:10:40 +0200 Subject: [PATCH 152/432] remove sleep and make tests more deterministic --- .../01527_clickhouse_local_optimize.reference | 24 +++------ .../01527_clickhouse_local_optimize.sh | 49 ++++++++----------- 2 files changed, 28 insertions(+), 45 deletions(-) diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference index ea0ec6fe765..e7315547841 100644 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference @@ -1,16 +1,8 @@ -202001_1_1_0 1 2020-01-01 String -202001_4_4_0 1 2020-01-01 String -202002_2_2_0 2 2020-02-02 Another string -202002_5_5_0 2 2020-02-02 Another string -202003_3_3_0 3 2020-03-03 One more string -202003_6_6_0 3 2020-03-03 One more string -202001_1_1_0 4 2020-01-02 String for first partition -202001_4_4_0 4 2020-01-02 String for first partition -202001_1_4_1 1 2020-01-01 String -202001_1_4_1 1 2020-01-01 String -202002_2_5_1 2 2020-02-02 Another string -202002_2_5_1 2 2020-02-02 Another string -202003_3_6_1 3 2020-03-03 One more string -202003_3_6_1 3 2020-03-03 One more string -202001_1_4_1 4 2020-01-02 String for first partition -202001_1_4_1 4 2020-01-02 String for first partition +1 2020-01-01 String +2 2020-02-02 Another string +3 2020-03-03 One more string +4 2020-01-02 String for first partition +1 2020-01-01 String +2 2020-02-02 Another string +3 2020-03-03 One more string +4 2020-01-02 String for first partition diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh index e51d53c7524..13e8c847e71 100755 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh @@ -3,13 +3,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -WORKING_FOLDER="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" +WORKING_FOLDER_01527="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" -rm -rf "${WORKING_FOLDER}" -mkdir -p "${WORKING_FOLDER}/metadata/local/" +rm -rf "${WORKING_FOLDER_01527}" +mkdir -p "${WORKING_FOLDER_01527}/metadata/local/" + +# OPTIMIZE was crashing due to lack of temporary volume in local +${CLICKHOUSE_LOCAL} --query "drop database if exists d; create database d; create table d.t engine MergeTree order by a as select 1 a; optimize table d.t final" -- --path="${WORKING_FOLDER_01527}" + +# Some extra (unrealted) scenarios of clickhouse-local usage. ## 1. Imagine we want to process this file: -cat < "${WORKING_FOLDER}/data.csv" +cat < "${WORKING_FOLDER_01527}/data.csv" 1,2020-01-01,"String" 2,2020-02-02,"Another string" 3,2020-03-03,"One more string" @@ -19,42 +24,28 @@ EOF ## 2. that is the metadata for the table we want to fill ## schema should match the schema of the table from server ## (the easiest way is just to copy it from the server) - -## I've added sleepEachRow(0.5) here just to mimic slow insert -cat < "${WORKING_FOLDER}/metadata/local/test.sql" -ATTACH TABLE local.test (id UInt64, d Date, s String, x MATERIALIZED sleepEachRow(0.5)) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); +cat < "${WORKING_FOLDER_01527}/metadata/local/test.sql" +ATTACH TABLE local.test (id UInt64, d Date, s String) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); EOF ## 3a. that is the metadata for the input file we want to read ## it should match the structure of source file - ## use stdin to read from pipe -cat < "${WORKING_FOLDER}/metadata/local/stdin.sql" +cat < "${WORKING_FOLDER_01527}/metadata/local/stdin.sql" ATTACH TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin); EOF ## 3b. Instead of stdin you can use file path -cat < "${WORKING_FOLDER}/metadata/local/data_csv.sql" -ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '${WORKING_FOLDER}/data.csv'); +cat < "${WORKING_FOLDER_01527}/metadata/local/data_csv.sql" +ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '${WORKING_FOLDER_01527}/data.csv'); EOF -## All preparations done, -## the rest is simple: +## All preparations done, the rest is simple: -# option a (if 3a used) with pipe / reading stdin -cat "${WORKING_FOLDER}/data.csv" | ${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.stdin" -- --path="${WORKING_FOLDER}" +# option a (if 3a used) with pipe / reading stdin (truncate was added for the test) +cat "${WORKING_FOLDER_01527}/data.csv" | ${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.stdin; SELECT * FROM local.test ORDER BY id; TRUNCATE TABLE local.test;" -- --path="${WORKING_FOLDER_01527}" -# option b (if 3b used) 0 with filepath -${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.data_csv" -- --path="${WORKING_FOLDER}" +# option b (if 3b used) 0 with filepath (truncate was added for the test) +${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.data_csv; SELECT * FROM local.test ORDER BY id; TRUNCATE TABLE local.test;" -- --path="${WORKING_FOLDER_01527}" -# now you can check what was inserted (i did both options so i have doubled data) -${CLICKHOUSE_LOCAL} --query "SELECT _part,* FROM local.test ORDER BY id, _part" -- --path="${WORKING_FOLDER}" - -# But you can't do OPTIMIZE (local will die with coredump) :) That would be too good -clickhouse-local --query "OPTIMIZE TABLE local.test FINAL" -- --path="${WORKING_FOLDER}" - -# now you can check what was inserted (i did both options so i have doubled data) -${CLICKHOUSE_LOCAL} --query "SELECT _part,* FROM local.test ORDER BY id, _part" -- --path="${WORKING_FOLDER}" - -## now you can upload those parts to a server (in detached subfolder) and attach them. -rm -rf "${WORKING_FOLDER}" \ No newline at end of file +rm -rf "${WORKING_FOLDER_01527}" \ No newline at end of file From b45d42bb9c291e284017d9dd6dba331e36457045 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 11:35:36 +0300 Subject: [PATCH 153/432] Maybe devirtualization can help --- src/Storages/MergeTree/SimpleMergeSelector.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/SimpleMergeSelector.h b/src/Storages/MergeTree/SimpleMergeSelector.h index 9aeb73a40a8..fe57c40320a 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.h +++ b/src/Storages/MergeTree/SimpleMergeSelector.h @@ -6,7 +6,7 @@ namespace DB { -class SimpleMergeSelector : public IMergeSelector +class SimpleMergeSelector final : public IMergeSelector { public: struct Settings From e090a119fc9961ed9e721d24b0436b43b369a2d4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 13:11:26 +0300 Subject: [PATCH 154/432] Simplify TTL move test --- tests/integration/test_ttl_move/test.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index 751d15b8313..cd906cb04e1 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -900,7 +900,10 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): def alter_update(num): for i in range(num): - node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name)) + try: + node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name)) + except: + pass def alter_modify_ttl(num): for i in range(num): @@ -927,17 +930,17 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): p = Pool(15) tasks = [] for i in range(5): - tasks.append(p.apply_async(insert, (100,))) - tasks.append(p.apply_async(alter_move, (100,))) - tasks.append(p.apply_async(alter_update, (100,))) - tasks.append(p.apply_async(alter_modify_ttl, (100,))) - tasks.append(p.apply_async(optimize_table, (100,))) + tasks.append(p.apply_async(insert, (30,))) + tasks.append(p.apply_async(alter_move, (30,))) + tasks.append(p.apply_async(alter_update, (30,))) + tasks.append(p.apply_async(alter_modify_ttl, (30,))) + tasks.append(p.apply_async(optimize_table, (30,))) for task in tasks: task.get(timeout=120) assert node1.query("SELECT 1") == "1\n" - assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n" + assert node1.query("SELECT COUNT() FROM {}".format(name)) == "150\n" finally: node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name)) From a104c01cd9e0f9a4ef10b2c06fe6958469924159 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Wed, 21 Oct 2020 20:17:37 +0800 Subject: [PATCH 155/432] Add mysql binlog file check util --- utils/CMakeLists.txt | 1 + utils/check-mysql-binlog/CMakeLists.txt | 2 + utils/check-mysql-binlog/main.cpp | 162 ++++++++++++++++++++++++ 3 files changed, 165 insertions(+) create mode 100644 utils/check-mysql-binlog/CMakeLists.txt create mode 100644 utils/check-mysql-binlog/main.cpp diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index b4408a298c3..0987d64abed 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -30,6 +30,7 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS) add_subdirectory (checksum-for-compressed-block) add_subdirectory (db-generator) add_subdirectory (wal-dump) + add_subdirectory (check-mysql-binlog) endif () if (ENABLE_CODE_QUALITY) diff --git a/utils/check-mysql-binlog/CMakeLists.txt b/utils/check-mysql-binlog/CMakeLists.txt new file mode 100644 index 00000000000..b1a72650ee9 --- /dev/null +++ b/utils/check-mysql-binlog/CMakeLists.txt @@ -0,0 +1,2 @@ +add_executable(check-mysql-binlog main.cpp) +target_link_libraries(check-mysql-binlog PRIVATE dbms boost::program_options) diff --git a/utils/check-mysql-binlog/main.cpp b/utils/check-mysql-binlog/main.cpp new file mode 100644 index 00000000000..4ec40ac41cc --- /dev/null +++ b/utils/check-mysql-binlog/main.cpp @@ -0,0 +1,162 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody( + DB::MySQLReplication::EventHeader & header, DB::ReadBuffer & payload, + std::shared_ptr & last_table_map_event, bool exist_checksum) +{ + DB::MySQLReplication::BinlogEventPtr event; + DB::ReadBufferPtr limit_read_buffer = std::make_shared(payload, header.event_size - 19, false); + DB::ReadBufferPtr event_payload = limit_read_buffer; + + if (exist_checksum) + event_payload = std::make_shared(*limit_read_buffer); + + switch (header.type) + { + case DB::MySQLReplication::FORMAT_DESCRIPTION_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::ROTATE_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::QUERY_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + + auto query = std::static_pointer_cast(event); + switch (query->typ) + { + case DB::MySQLReplication::QUERY_EVENT_MULTI_TXN_FLAG: + case DB::MySQLReplication::QUERY_EVENT_XA: + { + event = std::make_shared(std::move(query->header)); + break; + } + default: + break; + } + break; + } + case DB::MySQLReplication::XID_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::TABLE_MAP_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + last_table_map_event = std::static_pointer_cast(event); + break; + } + case DB::MySQLReplication::WRITE_ROWS_EVENT_V1: + case DB::MySQLReplication::WRITE_ROWS_EVENT_V2: + { + event = std::make_shared(last_table_map_event, std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::DELETE_ROWS_EVENT_V1: + case DB::MySQLReplication::DELETE_ROWS_EVENT_V2: + { + event = std::make_shared(last_table_map_event, std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::UPDATE_ROWS_EVENT_V1: + case DB::MySQLReplication::UPDATE_ROWS_EVENT_V2: + { + event = std::make_shared(last_table_map_event, std::move(header)); + event->parseEvent(*event_payload); + break; + } + case DB::MySQLReplication::GTID_EVENT: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + break; + } + default: + { + event = std::make_shared(std::move(header)); + event->parseEvent(*event_payload); + break; + } + } + + return event; +} + +static int checkBinLogFile(const std::string & bin_path, bool exist_checksum) +{ + DB::ReadBufferFromFile in(bin_path); + DB::assertString("\xfe\x62\x69\x6e", in); /// magic number + + DB::MySQLReplication::BinlogEventPtr last_event; + std::shared_ptr last_header; + std::shared_ptr table_map; + + try + { + while (!in.eof()) + { + last_header = std::make_shared(); + last_header->parse(in); + last_event = parseSingleEventBody(*last_header, in, table_map, exist_checksum); + } + } + catch (...) + { + std::cerr << "Unable to parse MySQL binlog event. Code: " << DB::getCurrentExceptionCode() << ", Exception message: " + << DB::getCurrentExceptionMessage(false) << std::endl << ", Previous event: " << std::endl; + last_event->dump(std::cerr); + std::cerr << std::endl << ", Event header: " << std::endl; + last_header->dump(std::cerr); + std::cerr << std::endl; + return DB::getCurrentExceptionCode(); + } + + std::cout << "Check passed. " << std::endl << "No exception was thrown." << std::endl << "The last binlog event: " << std::endl; + last_event->dump(std::cout); + std::cout << std::endl; + return 0; +} + + +int main(int argc, char ** argv) +{ + boost::program_options::options_description desc("Allowed options"); + desc.add_options()("help,h", "Produce help message"); + desc.add_options()("disable_checksum", "Disable checksums in binlog files."); + + boost::program_options::variables_map options; + boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); + + if (options.count("help") || argc < 2) + { + std::cout << "Usage: " << argv[0] << " mysql_binlog_file" << std::endl; + std::cout << desc << std::endl; + return 1; + } + + return checkBinLogFile(argv[argc - 1], !options.count("disable_checksum")); +} From 0a928f25fc9c310406e693dbe22ff4168f14621e Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Wed, 21 Oct 2020 09:07:04 -0400 Subject: [PATCH 156/432] Converting test tests/queries/0_stateless/01446_json_strings_each_row to a shell script. --- .../01446_json_strings_each_row.reference | 2 - .../01446_json_strings_each_row.sh | 44 +++++++++++++++++++ .../01446_json_strings_each_row.sql | 38 ---------------- 3 files changed, 44 insertions(+), 40 deletions(-) create mode 100755 tests/queries/0_stateless/01446_json_strings_each_row.sh delete mode 100644 tests/queries/0_stateless/01446_json_strings_each_row.sql diff --git a/tests/queries/0_stateless/01446_json_strings_each_row.reference b/tests/queries/0_stateless/01446_json_strings_each_row.reference index 812026534ea..583c6468e2d 100644 --- a/tests/queries/0_stateless/01446_json_strings_each_row.reference +++ b/tests/queries/0_stateless/01446_json_strings_each_row.reference @@ -8,10 +8,8 @@ {"name":"c","c":"1"} 3 {"row":{"a":"1"}} -{"progress":{"read_rows":"1","read_bytes":"1","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} 4 {"row":{"a":"1"}} -{"progress":{"read_rows":"1","read_bytes":"1","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}} 5 {"v1":"first","v2":"1","v3":"2","v4":"0"} {"v1":"second","v2":"2","v3":"0","v4":"6"} diff --git a/tests/queries/0_stateless/01446_json_strings_each_row.sh b/tests/queries/0_stateless/01446_json_strings_each_row.sh new file mode 100755 index 00000000000..9b6c8d66bc0 --- /dev/null +++ b/tests/queries/0_stateless/01446_json_strings_each_row.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +echo "DROP TABLE IF EXISTS test_table;" | ${CLICKHOUSE_CLIENT} +echo "DROP TABLE IF EXISTS test_table_2;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 1;" | ${CLICKHOUSE_CLIENT} +# Check JSONStringsEachRow Output +echo "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;" | ${CLICKHOUSE_CLIENT} +echo "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');" | ${CLICKHOUSE_CLIENT} +echo "SELECT * FROM test_table FORMAT JSONStringsEachRow;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 2;" | ${CLICKHOUSE_CLIENT} +# Check Totals +echo "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONStringsEachRow;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 3;" | ${CLICKHOUSE_CLIENT} +# Check JSONStringsEachRowWithProgress Output +echo "SELECT 1 as a FROM system.one FORMAT JSONStringsEachRowWithProgress;" | ${CLICKHOUSE_CLIENT} | grep -v progress +echo "SELECT 4;" | ${CLICKHOUSE_CLIENT} +# Check Totals +echo "SELECT 1 as a FROM system.one GROUP BY a WITH TOTALS ORDER BY a FORMAT JSONStringsEachRowWithProgress;" | ${CLICKHOUSE_CLIENT} | grep -v progress +echo "DROP TABLE IF EXISTS test_table;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 5;" | ${CLICKHOUSE_CLIENT} +# Check JSONStringsEachRow Input +echo "CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2;" | ${CLICKHOUSE_CLIENT} +echo 'INSERT INTO test_table FORMAT JSONStringsEachRow {"v1": "first", "v2": "1", "v3": "2", "v4": "NULL"} {"v1": "second", "v2": "2", "v3": "null", "v4": "6"};' | ${CLICKHOUSE_CLIENT} +echo "SELECT * FROM test_table FORMAT JSONStringsEachRow;" | ${CLICKHOUSE_CLIENT} +echo "TRUNCATE TABLE test_table;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 6;" | ${CLICKHOUSE_CLIENT} +# Check input_format_null_as_default = 1 +echo 'INSERT INTO test_table FORMAT JSONStringsEachRow {"v1": "first", "v2": "1", "v3": "2", "v4": "ᴺᵁᴸᴸ"} {"v1": "second", "v2": "2", "v3": "null", "v4": "6"};' | ${CLICKHOUSE_CLIENT} --input_format_null_as_default=1 +echo "SELECT * FROM test_table FORMAT JSONStringsEachRow;" | ${CLICKHOUSE_CLIENT} +echo "TRUNCATE TABLE test_table;" | ${CLICKHOUSE_CLIENT} +echo "SELECT 7;" | ${CLICKHOUSE_CLIENT} +# Check Nested +echo "CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1;" | ${CLICKHOUSE_CLIENT} +cat << END | ${CLICKHOUSE_CLIENT} +INSERT INTO test_table_2 FORMAT JSONStringsEachRow {"v1": "16", "n.id": "[15, 16, 17]", "n.name": "['first', 'second', 'third']"}; +END +echo "SELECT * FROM test_table_2 FORMAT JSONStringsEachRow;" | ${CLICKHOUSE_CLIENT} +echo "TRUNCATE TABLE test_table_2;" | ${CLICKHOUSE_CLIENT} + +echo "DROP TABLE IF EXISTS test_table;" | ${CLICKHOUSE_CLIENT} +echo "DROP TABLE IF EXISTS test_table_2;" | ${CLICKHOUSE_CLIENT} diff --git a/tests/queries/0_stateless/01446_json_strings_each_row.sql b/tests/queries/0_stateless/01446_json_strings_each_row.sql deleted file mode 100644 index 98bd3e3ab47..00000000000 --- a/tests/queries/0_stateless/01446_json_strings_each_row.sql +++ /dev/null @@ -1,38 +0,0 @@ -DROP TABLE IF EXISTS test_table; -DROP TABLE IF EXISTS test_table_2; -SELECT 1; -/* Check JSONStringsEachRow Output */ -CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value; -INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); -SELECT * FROM test_table FORMAT JSONStringsEachRow; -SELECT 2; -/* Check Totals */ -SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONStringsEachRow; -SELECT 3; -/* Check JSONStringsEachRowWithProgress Output */ -SELECT 1 as a FROM system.one FORMAT JSONStringsEachRowWithProgress; -SELECT 4; -/* Check Totals */ -SELECT 1 as a FROM system.one GROUP BY a WITH TOTALS ORDER BY a FORMAT JSONStringsEachRowWithProgress; -DROP TABLE IF EXISTS test_table; -SELECT 5; -/* Check JSONStringsEachRow Input */ -CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; -INSERT INTO test_table FORMAT JSONStringsEachRow {"v1": "first", "v2": "1", "v3": "2", "v4": "NULL"} {"v1": "second", "v2": "2", "v3": "null", "v4": "6"}; -SELECT * FROM test_table FORMAT JSONStringsEachRow; -TRUNCATE TABLE test_table; -SELECT 6; -/* Check input_format_null_as_default = 1 */ -SET input_format_null_as_default = 1; -INSERT INTO test_table FORMAT JSONStringsEachRow {"v1": "first", "v2": "1", "v3": "2", "v4": "ᴺᵁᴸᴸ"} {"v1": "second", "v2": "2", "v3": "null", "v4": "6"}; -SELECT * FROM test_table FORMAT JSONStringsEachRow; -TRUNCATE TABLE test_table; -SELECT 7; -/* Check Nested */ -CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; -INSERT INTO test_table_2 FORMAT JSONStringsEachRow {"v1": "16", "n.id": "[15, 16, 17]", "n.name": "['first', 'second', 'third']"}; -SELECT * FROM test_table_2 FORMAT JSONStringsEachRow; -TRUNCATE TABLE test_table_2; - -DROP TABLE IF EXISTS test_table; -DROP TABLE IF EXISTS test_table_2; From 847e9ca9f6ae597ab4b957fbc380f500e1fc43db Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Wed, 21 Oct 2020 16:36:03 +0300 Subject: [PATCH 157/432] Fixed flappy `test_multiple_disks`. --- tests/integration/test_multiple_disks/test.py | 58 +++++++++---------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 496b34f22f0..209b6539c52 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -286,7 +286,7 @@ def test_query_parser(start_cluster): node1.query( "ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'") finally: - node1.query("DROP TABLE IF EXISTS table_with_normal_policy") + node1.query("DROP TABLE IF EXISTS table_with_normal_policy SYNC") @pytest.mark.parametrize("name,engine", [ @@ -327,7 +327,7 @@ def test_alter_policy(start_cluster, name, engine): name=name)) == "jbods_with_external\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") def get_random_string(length): @@ -355,9 +355,7 @@ def test_no_warning_about_zero_max_data_part_size(start_cluster): ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' """) - node.query(""" - DROP TABLE default.test_warning_table - """) + node.query("DROP TABLE default.test_warning_table SYNC") log = get_log(node) assert not re.search("Warning.*Volume.*special_warning_zero_volume", log) assert not re.search("Warning.*Volume.*special_warning_default_volume", log) @@ -398,7 +396,7 @@ def test_round_robin(start_cluster, name, engine): assert used_disks[0] != used_disks[1] assert used_disks[2] == used_disks[0] finally: - node1.query("DROP TABLE IF EXISTS {}".format(name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -425,7 +423,7 @@ def test_max_data_part_size(start_cluster, name, engine): assert len(used_disks) == 1 assert used_disks[0] == 'external' finally: - node1.query("DROP TABLE IF EXISTS {}".format(name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -478,7 +476,7 @@ def test_jbod_overflow(start_cluster, name, engine): assert all(disk == 'external' for disk in disks_for_merges) finally: - node1.query("DROP TABLE IF EXISTS {}".format(name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -524,7 +522,7 @@ def test_background_move(start_cluster, name, engine): assert path.startswith("/external") finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -611,7 +609,7 @@ def test_start_stop_moves(start_cluster, name, engine): assert used_disks[0] == 'external' finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") def get_path_for_part_from_part_log(node, table, part_name): @@ -699,7 +697,7 @@ def test_alter_move(start_cluster, name, engine): assert node1.query("SELECT COUNT() FROM {}".format(name)) == "4\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("volume_or_disk", [ @@ -748,7 +746,7 @@ def test_alter_move_half_of_partition(start_cluster, volume_or_disk): assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("volume_or_disk", [ @@ -792,7 +790,7 @@ def test_alter_double_move_partition(start_cluster, volume_or_disk): volume_or_disk=volume_or_disk)) finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") def produce_alter_move(node, name): @@ -876,7 +874,7 @@ def test_concurrent_alter_move(start_cluster, name, engine): assert node1.query("SELECT 1") == "1\n" assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -929,7 +927,7 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine): assert node1.query("SELECT 1") == "1\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -960,7 +958,7 @@ def test_detach_attach(start_cluster, name, engine): assert node1.query("SELECT count() FROM {}".format(name)).strip() == "5" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -1006,7 +1004,7 @@ def test_mutate_to_another_disk(start_cluster, name, engine): finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @pytest.mark.parametrize("name,engine", [ @@ -1064,7 +1062,7 @@ def test_concurrent_alter_modify(start_cluster, name, engine): assert node1.query("SELECT COUNT() FROM {}".format(name)) == "100\n" finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") def test_simple_replication_and_moves(start_cluster): @@ -1131,7 +1129,7 @@ def test_simple_replication_and_moves(start_cluster): set(disks2) == set(["jbod1", "external"]) finally: for node in [node1, node2]: - node.query("DROP TABLE IF EXISTS replicated_table_for_moves") + node.query("DROP TABLE IF EXISTS replicated_table_for_moves SYNC") def test_download_appropriate_disk(start_cluster): @@ -1165,7 +1163,7 @@ def test_download_appropriate_disk(start_cluster): finally: for node in [node1, node2]: - node.query("DROP TABLE IF EXISTS replicated_table_for_download") + node.query("DROP TABLE IF EXISTS replicated_table_for_download SYNC") def test_rename(start_cluster): @@ -1202,9 +1200,9 @@ def test_rename(start_cluster): node1.query("SELECT COUNT() FROM default.renaming_table1") finally: - node1.query("DROP TABLE IF EXISTS default.renaming_table") - node1.query("DROP TABLE IF EXISTS default.renaming_table1") - node1.query("DROP TABLE IF EXISTS test.renaming_table2") + node1.query("DROP TABLE IF EXISTS default.renaming_table SYNC") + node1.query("DROP TABLE IF EXISTS default.renaming_table1 SYNC") + node1.query("DROP TABLE IF EXISTS test.renaming_table2 SYNC") def test_freeze(start_cluster): @@ -1238,7 +1236,7 @@ def test_freeze(start_cluster): node1.exec_in_container(["bash", "-c", "find /external/shadow -name '*.mrk2' | grep '.*'"]) finally: - node1.query("DROP TABLE IF EXISTS default.freezing_table") + node1.query("DROP TABLE IF EXISTS default.freezing_table SYNC") node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"]) @@ -1282,7 +1280,7 @@ def test_kill_while_insert(start_cluster): finally: try: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") except: """ClickHouse may be inactive at this moment and we don't want to mask a meaningful exception.""" @@ -1343,7 +1341,7 @@ def test_move_while_merge(start_cluster): assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["2"] finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") def test_move_across_policies_does_not_work(start_cluster): @@ -1384,8 +1382,8 @@ def test_move_across_policies_does_not_work(start_cluster): assert node1.query("""SELECT * FROM {name}""".format(name=name)).splitlines() == ["1"] finally: - node1.query("DROP TABLE IF EXISTS {name}".format(name=name)) - node1.query("DROP TABLE IF EXISTS {name}2".format(name=name)) + node1.query(f"DROP TABLE IF EXISTS {name} SYNC") + node1.query(f"DROP TABLE IF EXISTS {name}2 SYNC") def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds): @@ -1420,7 +1418,7 @@ def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, pa assert len(parts) == parts_after_cmds finally: - node.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node.query(f"DROP TABLE IF EXISTS {name} SYNC") def _check_merges_are_working(node, storage_policy, volume, shall_work): @@ -1458,7 +1456,7 @@ def _check_merges_are_working(node, storage_policy, volume, shall_work): assert len(parts) == 1 if shall_work else created_parts finally: - node.query("DROP TABLE IF EXISTS {name}".format(name=name)) + node.query(f"DROP TABLE IF EXISTS {name} SYNC") def _get_prefer_not_to_merge_for_storage_policy(node, storage_policy): From ad655bcfc03ae951fe517f96a077d65fe06f04ad Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 17:23:45 +0300 Subject: [PATCH 158/432] Add stateless flaky check --- docker/test/stateless/run.sh | 2 +- tests/ci/ci_config.json | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 3a0842d07b7..d614cb152a8 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -23,7 +23,7 @@ read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" function run_tests() { for i in $(seq 1 $NUM_TRIES); do - clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt + clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt done } diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 6cc7a4a398a..418b8ce0356 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -584,6 +584,18 @@ "clang-tidy": "disable", "with_coverage": false } + }, + "Functional stateless tests flaky check (address)": { + "required_build_properties": { + "compiler": "clang-11", + "package_type": "deb", + "build_type": "relwithdebuginfo", + "sanitizer": "address", + "bundled": "bundled", + "splitted": "unsplitted", + "clang-tidy": "disable", + "with_coverage": false + } } } } From 848de47a2c9758f2c1285b0e257b049d300a2293 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 17:49:37 +0300 Subject: [PATCH 159/432] Remove checksum check from arrow package --- docker/packager/deb/Dockerfile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 5bd8a74cb99..4e9259b214a 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -31,14 +31,10 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \ && chmod +x dpkg-deb \ && cp dpkg-deb /usr/bin -ENV APACHE_PUBKEY_HASH="bba6987b63c63f710fd4ed476121c588bc3812e99659d27a855f8c4d312783ee66ad6adfce238765691b04d62fa3688f" - RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ && wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \ - && echo "${APACHE_PUBKEY_HASH} /tmp/arrow-keyring.deb" | sha384sum -c \ && dpkg -i /tmp/arrow-keyring.deb - # Libraries from OS are only needed to test the "unbundled" build (this is not used in production). RUN apt-get update \ && apt-get install \ From c10dada3d42ff0526c9ffbaa7c4beeb4467cde73 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 21 Oct 2020 18:06:42 +0300 Subject: [PATCH 160/432] try node 1 --- docker/test/performance-comparison/Dockerfile | 4 ++-- docker/test/performance-comparison/compare.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 3b43b68319b..1b4a75f7f5d 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -37,14 +37,14 @@ RUN apt-get update \ COPY * / -# Bind everything to node 0 early. We have to bind both servers and the tmpfs +# Bind everything to node 1 early. We have to bind both servers and the tmpfs # on which the database is stored. How to do it through Yandex Sandbox API is # unclear, but by default tmpfs uses 'process allocation policy', not sure # which process but hopefully the one that writes to it, so just bind the # downloader script as well. # We could also try to remount it with proper options in Sandbox task. # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -CMD ["numactl", "--cpunodebind=0", "--membind=0", "/entrypoint.sh"] +CMD ["numactl", "--cpunodebind=1", "--membind=1", "/entrypoint.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 769a4f8f735..8a567d7a11a 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -1090,7 +1090,7 @@ case "$stage" in "restart") numactl --hardware ||: lscpu ||: - sudo dmidecode -t 4 ||: + dmidecode -t 4 ||: time restart ;& "run_tests") From a678f0322999afd4a3c054531220339086f25c6a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 21 Oct 2020 18:21:54 +0300 Subject: [PATCH 161/432] fixup --- docker/test/performance-comparison/Dockerfile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 1b4a75f7f5d..1a904cf73c2 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -37,14 +37,15 @@ RUN apt-get update \ COPY * / -# Bind everything to node 1 early. We have to bind both servers and the tmpfs -# on which the database is stored. How to do it through Yandex Sandbox API is -# unclear, but by default tmpfs uses 'process allocation policy', not sure -# which process but hopefully the one that writes to it, so just bind the -# downloader script as well. +# Bind everything to NUMA node #1, if there's more than one. We avoid node #0, +# because it is more susceptible to system interruptions. We have to bind both +# servers and the tmpfs on which the database is stored. How to do it through +# Yandex Sandbox API is unclear, but by default tmpfs uses 'process allocation +# policy', not sure which process but hopefully the one that writes to it, so +# just bind the downloader script as well. # We could also try to remount it with proper options in Sandbox task. # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -CMD ["numactl", "--cpunodebind=1", "--membind=1", "/entrypoint.sh"] +CMD ["bash", "-c", "node=$(numactl --hardware | grep -q 'available: 1 nodes' && echo 0 || echo 1); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison From f9b0ed93256a709268fc5b4be206ec2f188986d0 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Wed, 21 Oct 2020 18:57:14 +0300 Subject: [PATCH 162/432] Empty commit to re-run checks. From 154553e4c046c8991ace2529d3cd8ae1cdcd62ed Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Wed, 21 Oct 2020 19:29:21 +0300 Subject: [PATCH 163/432] fix one more test The CLICKHOUSE_CLIENT has --send_logs_level option, and boost::program_options prefers the first entry, so we can't override it. Use CLICKHOUSE_CLIENT_BINARY instead, which does not contain options. --- .../0_stateless/00965_logs_level_bugfix.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/00965_logs_level_bugfix.sh b/tests/queries/0_stateless/00965_logs_level_bugfix.sh index d0126c19eb9..0c618cf1bf0 100755 --- a/tests/queries/0_stateless/00965_logs_level_bugfix.sh +++ b/tests/queries/0_stateless/00965_logs_level_bugfix.sh @@ -3,18 +3,18 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --send_logs_level="trace" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="trace" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Information" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Information" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Error" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Error" | head -n 1 echo "-" -${CLICKHOUSE_CLIENT} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="debug" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="information" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="error" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information" | head -n 1 echo "." -${CLICKHOUSE_CLIENT} --send_logs_level="None" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information\|Error" | head -n 1 +${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="None" --query="SELECT throwIf(1)" 2>&1 | awk '{ print $8 }' | grep "Debug\|Trace\|Information\|Error" | head -n 1 From 545d187ef7e349d29b9fba5a84d859808ce37f32 Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Wed, 21 Oct 2020 19:32:52 +0300 Subject: [PATCH 164/432] Docs for the crash_log table (en) --- docs/en/operations/system-tables/crash_log.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 docs/en/operations/system-tables/crash_log.md diff --git a/docs/en/operations/system-tables/crash_log.md b/docs/en/operations/system-tables/crash_log.md new file mode 100644 index 00000000000..d38ce31584f --- /dev/null +++ b/docs/en/operations/system-tables/crash_log.md @@ -0,0 +1,42 @@ +## system.crash_log {#system-tables_crash_log} + +Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. + +Columns: + +- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Date of the event. +- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Time of the event. +- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Timestamp of the event with nanoseconds. +- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID. +- `query_id` ([String](../../sql-reference/data-types/string.md)) — Query ID. +- `trace` ([Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Array of traces. +- `trace_full` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of full traces. +- `version` ([String](../../sql-reference/data-types/string.md)) — ClickHouse server version. +- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse server revision. +- `build_id` ([String](../../sql-reference/data-types/string.md)) — ClickHouse server build. + +**Example** + +Query: + +``` sql +SELECT * FROM system.crash_log ORDER BY event_time DESC LIMIT 1; +``` +Result (not full): + +``` text +event_date: 2020-10-14 +event_time: 2020-10-14 15:47:40 +timestamp_ns: 1602679660271312710 +signal: 11 +thread_id: 23624 +query_id: 428aab7c-8f5c-44e9-9607-d16b44467e69 +trace: [188531193,...] +trace_full: ['3. DB::(anonymous namespace)::FunctionFormatReadableTimeDelta::executeImpl(std::__1::vector >&, std::__1::vector > const&, unsigned long, unsigned long) const @ 0xb3cc1f9 in /home/username/work/ClickHouse/build/programs/clickhouse',...] +version: ClickHouse 20.11.1.1 +revision: 54442 +build_id: +``` + +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash_log) From dcf9a8cff00d26b0498023560c236be822f3a2a8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 19:39:39 +0300 Subject: [PATCH 165/432] Remove comment --- docker/test/stateless/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index ba348b052f3..355e76aeec8 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -34,7 +34,7 @@ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV NUM_TRIES=1 -ENV MAX_RUN_TIME=0 #unlimited +ENV MAX_RUN_TIME=0 COPY run.sh / CMD ["/bin/bash", "/run.sh"] From 69ae6ace4789cb5a906ecb0a500384326e3965ac Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 21 Oct 2020 21:17:27 +0300 Subject: [PATCH 166/432] Add untuple. --- src/DataTypes/DataTypeTuple.cpp | 34 +++-- src/DataTypes/DataTypeTuple.h | 2 + src/Functions/tuple.cpp | 17 ++- src/Interpreters/ActionsVisitor.cpp | 179 +++++++++++++++++++----- src/Interpreters/ActionsVisitor.h | 4 + src/Interpreters/ExpressionActions.cpp | 1 + src/Interpreters/ExpressionAnalyzer.cpp | 9 +- 7 files changed, 195 insertions(+), 51 deletions(-) diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index b69c4c31ca4..f78e679ddbe 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -44,6 +44,21 @@ DataTypeTuple::DataTypeTuple(const DataTypes & elems_) names[i] = toString(i + 1); } +static void checkTupleNames(const Strings & names, std::function on_error) +{ + std::unordered_set names_set; + for (const auto & name : names) + { + if (name.empty()) + on_error("Names of tuple elements cannot be empty", ErrorCodes::BAD_ARGUMENTS); + + if (isNumericASCII(name[0])) + on_error("Explicitly specified names of tuple elements cannot start with digit", ErrorCodes::BAD_ARGUMENTS); + + if (!names_set.insert(name).second) + on_error("Names of tuple elements must be unique", ErrorCodes::DUPLICATE_COLUMN); + } +} DataTypeTuple::DataTypeTuple(const DataTypes & elems_, const Strings & names_) : elems(elems_), names(names_), have_explicit_names(true) @@ -52,20 +67,15 @@ DataTypeTuple::DataTypeTuple(const DataTypes & elems_, const Strings & names_) if (names.size() != size) throw Exception("Wrong number of names passed to constructor of DataTypeTuple", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - std::unordered_set names_set; - for (size_t i = 0; i < size; ++i) - { - if (names[i].empty()) - throw Exception("Names of tuple elements cannot be empty", ErrorCodes::BAD_ARGUMENTS); - - if (isNumericASCII(names[i][0])) - throw Exception("Explicitly specified names of tuple elements cannot start with digit", ErrorCodes::BAD_ARGUMENTS); - - if (!names_set.insert(names[i]).second) - throw Exception("Names of tuple elements must be unique", ErrorCodes::DUPLICATE_COLUMN); - } + checkTupleNames(names, [](const char * msg, int code) { throw Exception(msg, code); }); } +bool DataTypeTuple::canBeCreatedWithNames(const Strings & names) +{ + bool has_error = false; + checkTupleNames(names, [&](const char *, int) { has_error = true; }); + return !has_error; +} std::string DataTypeTuple::doGetName() const { diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index a8d16c28fa5..ed16d3c47d5 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -28,6 +28,8 @@ public: DataTypeTuple(const DataTypes & elems); DataTypeTuple(const DataTypes & elems, const Strings & names); + static bool canBeCreatedWithNames(const Strings & names); + TypeIndex getTypeId() const override { return TypeIndex::Tuple; } std::string doGetName() const override; const char * getFamilyName() const override { return "Tuple"; } diff --git a/src/Functions/tuple.cpp b/src/Functions/tuple.cpp index 45b208dbdcd..6808eeb4e3a 100644 --- a/src/Functions/tuple.cpp +++ b/src/Functions/tuple.cpp @@ -52,12 +52,25 @@ public: bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { if (arguments.empty()) throw Exception("Function " + getName() + " requires at least one argument.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - return std::make_shared(arguments); + DataTypes types; + Strings names; + + for (const auto & argument : arguments) + { + types.emplace_back(argument.type); + names.emplace_back(argument.name); + } + + /// Create named tuple if possible. + if (DataTypeTuple::canBeCreatedWithNames(names)) + return std::make_shared(types, names); + + return std::make_shared(types); } void executeImpl(ColumnsWithTypeAndName & columns, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) const override diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e0e921b003b..a1bde054923 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -524,11 +524,14 @@ const ActionsDAG & ScopeStack::getLastActions() const struct CachedColumnName { + bool & skip_cache; String cached; + CachedColumnName(bool & skip_cache_) : skip_cache(skip_cache_) {} + const String & get(const ASTPtr & ast) { - if (cached.empty()) + if (cached.empty() || skip_cache) cached = ast->getColumnName(); return cached; } @@ -539,7 +542,8 @@ bool ActionsMatcher::needChildVisit(const ASTPtr & node, const ASTPtr & child) /// Visit children themself if (node->as() || node->as() || - node->as()) + node->as() || + node->as()) return false; /// Do not go to FROM, JOIN, UNION. @@ -558,11 +562,115 @@ void ActionsMatcher::visit(const ASTPtr & ast, Data & data) visit(*node, ast, data); else if (const auto * literal = ast->as()) visit(*literal, ast, data); + else if (auto * expression_list = ast->as()) + visit(*expression_list, ast, data); +} + +std::optional ActionsMatcher::getNameAndTypeFromAST(const ASTPtr & ast, Data & data) +{ + // If the argument is a literal, we generated a unique column name for it. + // Use it instead of a generic display name. + auto child_column_name = ast->getColumnName(); + const auto * as_literal = ast->as(); + if (as_literal) + { + assert(!as_literal->unique_column_name.empty()); + child_column_name = as_literal->unique_column_name; + } + + const auto & index = data.actions_stack.getLastActions().getIndex(); + auto it = index.find(child_column_name); + if (it != index.end()) + return NameAndTypePair(child_column_name, it->second->result_type); + + if (!data.only_consts) + throw Exception("Unknown identifier: " + child_column_name + " there are columns: " + data.actions_stack.dumpNames(), + ErrorCodes::UNKNOWN_IDENTIFIER); + + return {}; +} + +ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Data & data) +{ + if (function->arguments->children.size() != 1) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function untuple doesn't match. Passed {}, should be 1", + function->arguments->children.size()); + + auto & child = function->arguments->children[0]; + + /// Calculate nested function. + visit(child, data); + + /// Get type and name for tuple argument + auto tuple_name_type = getNameAndTypeFromAST(child, data); + if (!tuple_name_type) + return {}; + + const auto * tuple_type = typeid_cast(tuple_name_type->type.get()); + + if (!tuple_type) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Function untuple expect tuple argument, got {}", + tuple_name_type->type->getName()); + + ASTs columns; + size_t tid = 0; + for (const auto & name : tuple_type->getElementNames()) + { + auto tuple_ast = function->arguments->children[0]; + if (tid != 0) + tuple_ast = tuple_ast->clone(); + + auto literal = std::make_shared(UInt64(++tid)); + visit(*literal, literal, data); + + auto func = makeASTFunction("tupleElement", tuple_ast, literal); + + if (tuple_type->haveExplicitNames()) + func->setAlias(name); + else + func->setAlias(data.getUniqueName("_ut" + name)); + + auto function_builder = FunctionFactory::instance().get(func->name, data.context); + data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName()}, func->getColumnName()); + + columns.push_back(std::move(func)); + } + + return columns; +} + +void ActionsMatcher::visit(ASTExpressionList & expression_list, const ASTPtr &, Data & data) +{ + size_t num_children = expression_list.children.size(); + for (size_t i = 0; i < num_children; ++i) + { + if (const auto * function = expression_list.children[i]->as()) + { + if (function->name == "untuple") + { + auto columns = doUntuple(function, data); + + if (columns.empty()) + continue; + + expression_list.children.erase(expression_list.children.begin() + i); + expression_list.children.insert(expression_list.children.begin() + i, columns.begin(), columns.end()); + num_children += columns.size() - 1; + i += columns.size() - 1; + } + else + visit(expression_list.children[i], data); + } + else + visit(expression_list.children[i], data); + } } void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr & ast, Data & data) { - CachedColumnName column_name; + CachedColumnName column_name(data.has_untuple); if (data.hasColumn(column_name.get(ast))) return; @@ -588,7 +696,7 @@ void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr & ast, void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & data) { - CachedColumnName column_name; + CachedColumnName column_name(data.has_untuple); if (data.hasColumn(column_name.get(ast))) return; @@ -662,20 +770,20 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// If the function has an argument-lambda expression, you need to determine its type before the recursive call. bool has_lambda_arguments = false; - - for (size_t arg = 0; arg < node.arguments->children.size(); ++arg) + size_t num_arguments = node.arguments->children.size(); + for (size_t arg = 0; arg < num_arguments; ++arg) { auto & child = node.arguments->children[arg]; - const auto * lambda = child->as(); + const auto * function = child->as(); const auto * identifier = child->as(); - if (lambda && lambda->name == "lambda") + if (function && function->name == "lambda") { /// If the argument is a lambda expression, just remember its approximate type. - if (lambda->arguments->children.size() != 2) + if (function->arguments->children.size() != 2) throw Exception("lambda requires two arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - const auto * lambda_args_tuple = lambda->arguments->children.at(0)->as(); + const auto * lambda_args_tuple = function->arguments->children.at(0)->as(); if (!lambda_args_tuple || lambda_args_tuple->name != "tuple") throw Exception("First argument of lambda must be a tuple", ErrorCodes::TYPE_MISMATCH); @@ -685,6 +793,30 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// Select the name in the next cycle. argument_names.emplace_back(); } + else if (function && function->name == "untuple") + { + data.has_untuple = true; + auto columns = doUntuple(function, data); + + if (columns.empty()) + continue; + + for (const auto & column : columns) + { + if (auto name_type = getNameAndTypeFromAST(column, data)) + { + argument_types.push_back(name_type->type); + argument_names.push_back(name_type->name); + } + else + arguments_present = false; + } + + node.arguments->children.erase(node.arguments->children.begin() + arg); + node.arguments->children.insert(node.arguments->children.begin() + arg, columns.begin(), columns.end()); + num_arguments += columns.size() - 1; + arg += columns.size() - 1; + } else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set) { ColumnWithTypeAndName column; @@ -731,32 +863,13 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// If the argument is not a lambda expression, call it recursively and find out its type. visit(child, data); - // In the above visit() call, if the argument is a literal, we - // generated a unique column name for it. Use it instead of a generic - // display name. - auto child_column_name = child->getColumnName(); - const auto * as_literal = child->as(); - if (as_literal) + if (auto name_type = getNameAndTypeFromAST(child, data)) { - assert(!as_literal->unique_column_name.empty()); - child_column_name = as_literal->unique_column_name; - } - - const auto & index = data.actions_stack.getLastActions().getIndex(); - auto it = index.find(child_column_name); - if (it != index.end()) - { - argument_types.push_back(it->second->result_type); - argument_names.push_back(child_column_name); + argument_types.push_back(name_type->type); + argument_names.push_back(name_type->name); } else - { - if (data.only_consts) - arguments_present = false; - else - throw Exception("Unknown identifier: " + child_column_name + " there are columns: " + data.actions_stack.dumpNames(), - ErrorCodes::UNKNOWN_IDENTIFIER); - } + arguments_present = false; } } diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index f4da9932163..3aa60f2e3b5 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -118,6 +118,7 @@ public: bool no_makeset; bool only_consts; bool no_storage_or_local; + bool has_untuple = false; size_t visit_depth; ScopeStack actions_stack; @@ -191,8 +192,11 @@ private: static void visit(const ASTIdentifier & identifier, const ASTPtr & ast, Data & data); static void visit(const ASTFunction & node, const ASTPtr & ast, Data & data); static void visit(const ASTLiteral & literal, const ASTPtr & ast, Data & data); + static void visit(ASTExpressionList & expression_list, const ASTPtr & ast, Data & data); static SetPtr makeSet(const ASTFunction & node, Data & data, bool no_subqueries); + static ASTs doUntuple(const ASTFunction * function, ActionsMatcher::Data & data); + static std::optional getNameAndTypeFromAST(const ASTPtr & ast, Data & data); }; using ActionsVisitor = ActionsMatcher::Visitor; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 1bb2fd8e96b..c8a84ea5b55 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -1559,6 +1559,7 @@ const ActionsDAG::Node & ActionsDAG::addFunction( node.allow_constant_folding = node.allow_constant_folding && child.allow_constant_folding; ColumnWithTypeAndName argument; + argument.name = argument_names[i]; argument.column = child.column; argument.type = child.result_type; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index f79bb36ec46..ccf10f202d4 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -418,6 +418,8 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) for (const ASTFunction * node : aggregates()) { AggregateDescription aggregate; + getRootActionsNoMakeSet(node->arguments, true, actions); + aggregate.column_name = node->getColumnName(); const ASTs & arguments = node->arguments->children; @@ -427,7 +429,6 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) const auto & index = actions->getIndex(); for (size_t i = 0; i < arguments.size(); ++i) { - getRootActionsNoMakeSet(arguments[i], true, actions); const std::string & name = arguments[i]->getColumnName(); auto it = index.find(name); @@ -743,12 +744,12 @@ bool SelectQueryExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain, ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join); + getRootActions(select_query->where(), only_types, step.actions()); + auto where_column_name = select_query->where()->getColumnName(); step.required_output.push_back(where_column_name); step.can_remove_required_output = {true}; - getRootActions(select_query->where(), only_types, step.actions()); - auto filter_type = step.actions()->getIndex().find(where_column_name)->second->result_type; if (!filter_type->canBeUsedInBooleanContext()) throw Exception("Invalid type for filter in WHERE: " + filter_type->getName(), @@ -824,8 +825,8 @@ bool SelectQueryExpressionAnalyzer::appendHaving(ExpressionActionsChain & chain, ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns); - step.required_output.push_back(select_query->having()->getColumnName()); getRootActions(select_query->having(), only_types, step.actions()); + step.required_output.push_back(select_query->having()->getColumnName()); return true; } From 5292c8b916e1a7bd3e3ba6968d95b190b17524b3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 21 Oct 2020 21:22:13 +0300 Subject: [PATCH 167/432] Add test. --- tests/queries/0_stateless/01232_untuple.reference | 13 +++++++++++++ tests/queries/0_stateless/01232_untuple.sql | 10 ++++++++++ 2 files changed, 23 insertions(+) create mode 100644 tests/queries/0_stateless/01232_untuple.reference create mode 100644 tests/queries/0_stateless/01232_untuple.sql diff --git a/tests/queries/0_stateless/01232_untuple.reference b/tests/queries/0_stateless/01232_untuple.reference new file mode 100644 index 00000000000..44f96e1decd --- /dev/null +++ b/tests/queries/0_stateless/01232_untuple.reference @@ -0,0 +1,13 @@ +1 3 +hello 1 3 world +9 +9 (0,1) +key v1 v2 v3 v4 v5 +4 10 20 10 20 30 +3 70 20 10 20 30 +2 11 20 10 20 30 +5 10 20 10 20 30 +1 20 20 10 20 30 +6 10 20 10 20 30 +7 18 20 10 20 30 +8 30 20 10 20 30 diff --git a/tests/queries/0_stateless/01232_untuple.sql b/tests/queries/0_stateless/01232_untuple.sql new file mode 100644 index 00000000000..39ee9e82fa7 --- /dev/null +++ b/tests/queries/0_stateless/01232_untuple.sql @@ -0,0 +1,10 @@ +select untuple((* except (b),)) from (select 1 a, 2 b, 3 c); +select 'hello', untuple((* except (b),)), 'world' from (select 1 a, 2 b, 3 c); +select argMax(untuple(x)) from (select (number, number + 1) as x from numbers(10)); +select argMax(untuple(x)), min(x) from (select (number, number + 1) as x from numbers(10)) having tuple(untuple(min(x))).1 != 42; + +drop table if exists kv; +create table kv (key int, v1 int, v2 int, v3 int, v4 int, v5 int) engine MergeTree order by key; +insert into kv values (1, 10, 20, 10, 20, 30), (2, 11, 20, 10, 20, 30), (1, 18, 20, 10, 20, 30), (1, 20, 20, 10, 20, 30), (3, 70, 20, 10, 20, 30), (4, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (5, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (8, 30, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (6, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (7, 18, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (7, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (8, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30); +select key, untuple(argMax((* except (key),), v1)) from kv group by key format TSVWithNames; +drop table if exists kv; From 748ff404f94e46917c4231adc08fff59e66bfdc6 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Wed, 21 Oct 2020 20:36:01 +0200 Subject: [PATCH 168/432] Attempt to fix the race --- programs/local/LocalServer.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index bfc7cac7fc1..ef96db4e96a 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -335,15 +335,19 @@ void LocalServer::processQueries() if (!parse_res.second) throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR); - context->makeSessionContext(); - context->makeQueryContext(); + /// we can't mutate global context (due to possible races), so we can't reuse it safely as a query context + /// so we need a copy here + auto query_context = Context(context); - context->setUser("default", "", Poco::Net::SocketAddress{}); - context->setCurrentQueryId(""); + query_context->makeSessionContext(); + query_context->makeQueryContext(); + + query_context->setUser("default", "", Poco::Net::SocketAddress{}); + query_context->setCurrentQueryId(""); applyCmdSettings(); /// Use the same query_id (and thread group) for all queries - CurrentThread::QueryScope query_scope_holder(*context); + CurrentThread::QueryScope query_scope_holder(*query_context); bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); std::exception_ptr exception; @@ -362,7 +366,7 @@ void LocalServer::processQueries() try { - executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *context, {}); + executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *query_context, {}); } catch (...) { From fd77ca82517ab4b6dfd3780b9e9640b9ce967d21 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 21 Oct 2020 22:20:52 +0300 Subject: [PATCH 169/432] Fix tests. --- src/Parsers/ParserDataType.cpp | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index a0a4eb97efe..acd68e51e42 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -8,6 +8,23 @@ namespace DB { +namespace +{ + class ParserNestedOrExpression : public IParserBase + { + protected: + const char * getName() const override { return "nested or expression"; } + + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + ParserNestedTable nested_parser; + ParserExpression expr_parser; + return nested_parser.parse(pos, node, expected) || expr_parser.parse(pos, node, expected); + } + }; +} + + bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserNestedTable nested; @@ -78,7 +95,7 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ++pos; /// Parse optional parameters - ParserList args_parser(std::make_unique(), std::make_unique(TokenType::Comma)); + ParserList args_parser(std::make_unique(), std::make_unique(TokenType::Comma)); ASTPtr expr_list_args; if (!args_parser.parse(pos, expr_list_args, expected)) From ac42199ae52c5769dc915249d30628b25eb9bb2b Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 21 Oct 2020 22:24:16 +0300 Subject: [PATCH 170/432] fix race on StorageReplicatedMergeTree::data_parts_exchange_endpoint --- src/Storages/StorageReplicatedMergeTree.cpp | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 4d3a16aef43..5f031d1463a 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3481,8 +3481,10 @@ void StorageReplicatedMergeTree::startup() { queue.initialize(getDataParts()); - data_parts_exchange_endpoint = std::make_shared(*this); - global_context.getInterserverIOHandler().addEndpoint(data_parts_exchange_endpoint->getId(replica_path), data_parts_exchange_endpoint); + InterserverIOEndpointPtr data_parts_exchange_ptr = std::make_shared(*this); + [[maybe_unused]] auto prev_ptr = std::atomic_exchange(&data_parts_exchange_endpoint, data_parts_exchange_ptr); + assert(prev_ptr == nullptr); + global_context.getInterserverIOHandler().addEndpoint(data_parts_exchange_ptr->getId(replica_path), data_parts_exchange_ptr); /// In this thread replica will be activated. restarting_thread.start(); @@ -3549,15 +3551,15 @@ void StorageReplicatedMergeTree::shutdown() global_context.getBackgroundMovePool().removeTask(move_parts_task_handle); move_parts_task_handle.reset(); - if (data_parts_exchange_endpoint) + auto data_parts_exchange_ptr = std::atomic_exchange(&data_parts_exchange_endpoint, InterserverIOEndpointPtr{}); + if (data_parts_exchange_ptr) { - global_context.getInterserverIOHandler().removeEndpointIfExists(data_parts_exchange_endpoint->getId(replica_path)); + global_context.getInterserverIOHandler().removeEndpointIfExists(data_parts_exchange_ptr->getId(replica_path)); /// Ask all parts exchange handlers to finish asap. New ones will fail to start - data_parts_exchange_endpoint->blocker.cancelForever(); + data_parts_exchange_ptr->blocker.cancelForever(); /// Wait for all of them - std::unique_lock lock(data_parts_exchange_endpoint->rwlock); + std::unique_lock lock(data_parts_exchange_ptr->rwlock); } - data_parts_exchange_endpoint.reset(); /// We clear all old parts after stopping all background operations. It's /// important, because background operations can produce temporary parts @@ -5870,7 +5872,10 @@ ActionLock StorageReplicatedMergeTree::getActionLock(StorageActionBlockType acti return fetcher.blocker.cancel(); if (action_type == ActionLocks::PartsSend) - return data_parts_exchange_endpoint ? data_parts_exchange_endpoint->blocker.cancel() : ActionLock(); + { + auto data_parts_exchange_ptr = std::atomic_load(&data_parts_exchange_endpoint); + return data_parts_exchange_ptr ? data_parts_exchange_ptr->blocker.cancel() : ActionLock(); + } if (action_type == ActionLocks::ReplicationQueue) return queue.actions_blocker.cancel(); From 60aae56266e1a20fd4679d874fb488682fc566e8 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 20 Oct 2020 21:12:02 +0300 Subject: [PATCH 171/432] Link dbms w/ atomic to fix undefined reference in unbundled build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will fix undefined reference for __atomic_load under clang10 and unbundled build (libstdc++ shared linkage): clickhouse-server: symbol lookup error: /src/ch/clickhouse/.cmake/src/libclickhouse_disksd.so: undefined symbol: __atomic_load From [1]: "libatomic (GNU) libgcc_s does not provide an implementation of an atomics library. Instead, GCC’s libatomic library can be used to supply these when using libgcc_s. Note Clang does not currently automatically link against libatomic when using libgcc_s. You may need to manually add -latomic to support this configuration when using non-native atomic operations (if you see link errors referring to __atomic_* functions)." [1]: https://clang.llvm.org/docs/Toolchain.html --- src/CMakeLists.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0016c51b7f8..085269847e4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -173,14 +173,20 @@ add_object_library(clickhouse_processors_merges Processors/Merges) add_object_library(clickhouse_processors_merges_algorithms Processors/Merges/Algorithms) add_object_library(clickhouse_processors_queryplan Processors/QueryPlan) +set (DBMS_COMMON_LIBRARIES) +# libgcc_s does not provide an implementation of an atomics library. Instead, +# GCC’s libatomic library can be used to supply these when using libgcc_s. +if ((NOT USE_LIBCXX) AND COMPILER_CLANG AND OS_LINUX) + list (APPEND DBMS_COMMON_LIBRARIES atomic) +endif() if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PRIVATE jemalloc libdivide) + target_link_libraries (dbms PRIVATE jemalloc libdivide ${DBMS_COMMON_LIBRARIES}) set (all_modules dbms) else() add_library (dbms SHARED ${dbms_headers} ${dbms_sources}) - target_link_libraries (dbms PUBLIC ${all_modules}) + target_link_libraries (dbms PUBLIC ${all_modules} ${DBMS_COMMON_LIBRARIES}) target_link_libraries (clickhouse_interpreters PRIVATE jemalloc libdivide) list (APPEND all_modules dbms) # force all split libs to be linked From 69279e6d764e207e99fd4cefb5e38d1b5f6db4e5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 21 Oct 2020 23:05:10 +0300 Subject: [PATCH 172/432] Fix tests. --- src/DataTypes/DataTypeTuple.cpp | 6 +++--- src/DataTypes/DataTypeTuple.h | 3 ++- src/Functions/tuple.cpp | 2 +- src/Parsers/ParserDataType.cpp | 19 +------------------ 4 files changed, 7 insertions(+), 23 deletions(-) diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index f78e679ddbe..0218a7a4e1c 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -60,8 +60,8 @@ static void checkTupleNames(const Strings & names, std::functiongetName(); diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index ed16d3c47d5..da3dfdb1d3d 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -22,11 +22,12 @@ private: DataTypes elems; Strings names; bool have_explicit_names; + bool serialize_names; public: static constexpr bool is_parametric = true; DataTypeTuple(const DataTypes & elems); - DataTypeTuple(const DataTypes & elems, const Strings & names); + DataTypeTuple(const DataTypes & elems, const Strings & names, bool serialize_names_ = true); static bool canBeCreatedWithNames(const Strings & names); diff --git a/src/Functions/tuple.cpp b/src/Functions/tuple.cpp index 6808eeb4e3a..024473937eb 100644 --- a/src/Functions/tuple.cpp +++ b/src/Functions/tuple.cpp @@ -68,7 +68,7 @@ public: /// Create named tuple if possible. if (DataTypeTuple::canBeCreatedWithNames(names)) - return std::make_shared(types, names); + return std::make_shared(types, names, false); return std::make_shared(types); } diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index acd68e51e42..a0a4eb97efe 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -8,23 +8,6 @@ namespace DB { -namespace -{ - class ParserNestedOrExpression : public IParserBase - { - protected: - const char * getName() const override { return "nested or expression"; } - - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override - { - ParserNestedTable nested_parser; - ParserExpression expr_parser; - return nested_parser.parse(pos, node, expected) || expr_parser.parse(pos, node, expected); - } - }; -} - - bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserNestedTable nested; @@ -95,7 +78,7 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ++pos; /// Parse optional parameters - ParserList args_parser(std::make_unique(), std::make_unique(TokenType::Comma)); + ParserList args_parser(std::make_unique(), std::make_unique(TokenType::Comma)); ASTPtr expr_list_args; if (!args_parser.parse(pos, expr_list_args, expected)) From 369365bdb09d21271fdafd35b6267d23ca1dfa31 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 21 Oct 2020 23:11:35 +0300 Subject: [PATCH 173/432] Fix run.sh --- docker/test/stateless/run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index d614cb152a8..e69fdc0fce0 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -1,6 +1,7 @@ #!/bin/bash -set -e -x +# fail on errors, verbose and export all env variables +set -e -x -a dpkg -i package_folder/clickhouse-common-static_*.deb dpkg -i package_folder/clickhouse-common-static-dbg_*.deb From 67cbb55d63d81f32bc19d8597ef8c4eefaf10a14 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Wed, 21 Oct 2020 22:16:13 +0200 Subject: [PATCH 174/432] Fix compilation --- programs/local/LocalServer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index ef96db4e96a..bb9918d633f 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -337,7 +337,7 @@ void LocalServer::processQueries() /// we can't mutate global context (due to possible races), so we can't reuse it safely as a query context /// so we need a copy here - auto query_context = Context(context); + auto query_context = Context(context.get()); query_context->makeSessionContext(); query_context->makeQueryContext(); From 0da6e855c0c826535eae9ee98b2babbd7f53c2aa Mon Sep 17 00:00:00 2001 From: MyroTk Date: Thu, 22 Oct 2020 00:04:09 +0200 Subject: [PATCH 175/432] getuid changes in common and specification --- docker/test/testflows/runner/Dockerfile | 2 +- tests/testflows/rbac/helper/common.py | 7 +- tests/testflows/rbac/regression.py | 4 + .../rbac/requirements/requirements.md | 5 +- .../rbac/requirements/requirements.py | 5429 ++++++++++++++--- 5 files changed, 4598 insertions(+), 849 deletions(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index 7efee354ce8..63e32fad162 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.56 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.58 docker-compose docker dicttoxml kazoo tzlocal ENV DOCKER_CHANNEL stable ENV DOCKER_VERSION 17.09.1-ce diff --git a/tests/testflows/rbac/helper/common.py b/tests/testflows/rbac/helper/common.py index 4a2ae3004bc..1a30e3d22ba 100755 --- a/tests/testflows/rbac/helper/common.py +++ b/tests/testflows/rbac/helper/common.py @@ -1,5 +1,6 @@ import uuid from multiprocessing.dummy import Pool +from testflows.core.name import basename, parentname from contextlib import contextmanager from testflows.core import * @@ -52,7 +53,11 @@ def permutations(table_count=1): return [*range((1 << table_count)-1)] def getuid(): - return str(uuid.uuid1()).replace('-', '_') + if "=" in basename(current().name): + testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + else: + testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" + return testname + "_" + str(uuid.uuid1()).replace('-', '_') @contextmanager def table(node, name, table_type_name="MergeTree"): diff --git a/tests/testflows/rbac/regression.py b/tests/testflows/rbac/regression.py index b25c1f93e34..c0307664061 100755 --- a/tests/testflows/rbac/regression.py +++ b/tests/testflows/rbac/regression.py @@ -7,6 +7,7 @@ append_path(sys.path, "..") from helpers.cluster import Cluster from helpers.argparser import argparser +from rbac.requirements import SRS_006_ClickHouse_Role_Based_Access_Control issue_14091 = "https://github.com/ClickHouse/ClickHouse/issues/14091" issue_14149 = "https://github.com/ClickHouse/ClickHouse/issues/14149" @@ -100,6 +101,9 @@ xflags = { @XFails(xfails) @XFlags(xflags) @Name("rbac") +@Specifications( + SRS_006_ClickHouse_Role_Based_Access_Control +) def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): """RBAC regression. """ diff --git a/tests/testflows/rbac/requirements/requirements.md b/tests/testflows/rbac/requirements/requirements.md index fba161d9e19..10b2d277523 100644 --- a/tests/testflows/rbac/requirements/requirements.md +++ b/tests/testflows/rbac/requirements/requirements.md @@ -1,4 +1,5 @@ -# SRS-006 ClickHouse Role Based Access Control
Software Requirements Specification +# SRS-006 ClickHouse Role Based Access Control +# Software Requirements Specification ## Table of Contents @@ -3713,4 +3714,4 @@ through one of the roles with **admin option** privilege assigned to the user. [Revision history]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/rbac/requirements/requirements.md [Git]: https://git-scm.com/ [MySQL]: https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html -[PostgreSQL]: https://www.postgresql.org/docs/12/user-manag.html \ No newline at end of file +[PostgreSQL]: https://www.postgresql.org/docs/12/user-manag.html diff --git a/tests/testflows/rbac/requirements/requirements.py b/tests/testflows/rbac/requirements/requirements.py index a0cff884164..bce0b3a9fcb 100755 --- a/tests/testflows/rbac/requirements/requirements.py +++ b/tests/testflows/rbac/requirements/requirements.py @@ -1,10 +1,3747 @@ # These requirements were auto generated # from software requirements specification (SRS) -# document by TestFlows v1.6.200917.1194158. +# document by TestFlows v1.6.201021.1163815. # Do not edit by hand but re-generate instead # using 'tfs requirements generate' command. +from testflows.core import Specification from testflows.core import Requirement +SRS_006_ClickHouse_Role_Based_Access_Control = Specification( + name='SRS-006 ClickHouse Role Based Access Control', + description=None, + author=None, + date=None, + status=None, + approved_by=None, + approved_date=None, + approved_version=None, + version=None, + group=None, + type=None, + link=None, + uid=None, + parent=None, + children=None, + content=''' +# SRS-006 ClickHouse Role Based Access Control +# Software Requirements Specification + +## Table of Contents + +* 1 [Revision History](#revision-history) +* 2 [Introduction](#introduction) +* 3 [Terminology](#terminology) +* 4 [Privilege Definitions](#privilege-definitions) +* 5 [Requirements](#requirements) + * 5.1 [Generic](#generic) + * 5.1.1 [RQ.SRS-006.RBAC](#rqsrs-006rbac) + * 5.1.2 [Login](#login) + * 5.1.2.1 [RQ.SRS-006.RBAC.Login](#rqsrs-006rbaclogin) + * 5.1.2.2 [RQ.SRS-006.RBAC.Login.DefaultUser](#rqsrs-006rbaclogindefaultuser) + * 5.1.3 [User](#user) + * 5.1.3.1 [RQ.SRS-006.RBAC.User](#rqsrs-006rbacuser) + * 5.1.3.2 [RQ.SRS-006.RBAC.User.Roles](#rqsrs-006rbacuserroles) + * 5.1.3.3 [RQ.SRS-006.RBAC.User.Privileges](#rqsrs-006rbacuserprivileges) + * 5.1.3.4 [RQ.SRS-006.RBAC.User.Variables](#rqsrs-006rbacuservariables) + * 5.1.3.5 [RQ.SRS-006.RBAC.User.Variables.Constraints](#rqsrs-006rbacuservariablesconstraints) + * 5.1.3.6 [RQ.SRS-006.RBAC.User.SettingsProfile](#rqsrs-006rbacusersettingsprofile) + * 5.1.3.7 [RQ.SRS-006.RBAC.User.Quotas](#rqsrs-006rbacuserquotas) + * 5.1.3.8 [RQ.SRS-006.RBAC.User.RowPolicies](#rqsrs-006rbacuserrowpolicies) + * 5.1.3.9 [RQ.SRS-006.RBAC.User.AccountLock](#rqsrs-006rbacuseraccountlock) + * 5.1.3.10 [RQ.SRS-006.RBAC.User.AccountLock.DenyAccess](#rqsrs-006rbacuseraccountlockdenyaccess) + * 5.1.3.11 [RQ.SRS-006.RBAC.User.DefaultRole](#rqsrs-006rbacuserdefaultrole) + * 5.1.3.12 [RQ.SRS-006.RBAC.User.RoleSelection](#rqsrs-006rbacuserroleselection) + * 5.1.3.13 [RQ.SRS-006.RBAC.User.ShowCreate](#rqsrs-006rbacusershowcreate) + * 5.1.3.14 [RQ.SRS-006.RBAC.User.ShowPrivileges](#rqsrs-006rbacusershowprivileges) + * 5.1.4 [Role](#role) + * 5.1.4.1 [RQ.SRS-006.RBAC.Role](#rqsrs-006rbacrole) + * 5.1.4.2 [RQ.SRS-006.RBAC.Role.Privileges](#rqsrs-006rbacroleprivileges) + * 5.1.4.3 [RQ.SRS-006.RBAC.Role.Variables](#rqsrs-006rbacrolevariables) + * 5.1.4.4 [RQ.SRS-006.RBAC.Role.SettingsProfile](#rqsrs-006rbacrolesettingsprofile) + * 5.1.4.5 [RQ.SRS-006.RBAC.Role.Quotas](#rqsrs-006rbacrolequotas) + * 5.1.4.6 [RQ.SRS-006.RBAC.Role.RowPolicies](#rqsrs-006rbacrolerowpolicies) + * 5.1.5 [Partial Revokes](#partial-revokes) + * 5.1.5.1 [RQ.SRS-006.RBAC.PartialRevokes](#rqsrs-006rbacpartialrevokes) + * 5.1.6 [Settings Profile](#settings-profile) + * 5.1.6.1 [RQ.SRS-006.RBAC.SettingsProfile](#rqsrs-006rbacsettingsprofile) + * 5.1.6.2 [RQ.SRS-006.RBAC.SettingsProfile.Constraints](#rqsrs-006rbacsettingsprofileconstraints) + * 5.1.6.3 [RQ.SRS-006.RBAC.SettingsProfile.ShowCreate](#rqsrs-006rbacsettingsprofileshowcreate) + * 5.1.7 [Quotas](#quotas) + * 5.1.7.1 [RQ.SRS-006.RBAC.Quotas](#rqsrs-006rbacquotas) + * 5.1.7.2 [RQ.SRS-006.RBAC.Quotas.Keyed](#rqsrs-006rbacquotaskeyed) + * 5.1.7.3 [RQ.SRS-006.RBAC.Quotas.Queries](#rqsrs-006rbacquotasqueries) + * 5.1.7.4 [RQ.SRS-006.RBAC.Quotas.Errors](#rqsrs-006rbacquotaserrors) + * 5.1.7.5 [RQ.SRS-006.RBAC.Quotas.ResultRows](#rqsrs-006rbacquotasresultrows) + * 5.1.7.6 [RQ.SRS-006.RBAC.Quotas.ReadRows](#rqsrs-006rbacquotasreadrows) + * 5.1.7.7 [RQ.SRS-006.RBAC.Quotas.ResultBytes](#rqsrs-006rbacquotasresultbytes) + * 5.1.7.8 [RQ.SRS-006.RBAC.Quotas.ReadBytes](#rqsrs-006rbacquotasreadbytes) + * 5.1.7.9 [RQ.SRS-006.RBAC.Quotas.ExecutionTime](#rqsrs-006rbacquotasexecutiontime) + * 5.1.7.10 [RQ.SRS-006.RBAC.Quotas.ShowCreate](#rqsrs-006rbacquotasshowcreate) + * 5.1.8 [Row Policy](#row-policy) + * 5.1.8.1 [RQ.SRS-006.RBAC.RowPolicy](#rqsrs-006rbacrowpolicy) + * 5.1.8.2 [RQ.SRS-006.RBAC.RowPolicy.Condition](#rqsrs-006rbacrowpolicycondition) + * 5.1.8.3 [RQ.SRS-006.RBAC.RowPolicy.ShowCreate](#rqsrs-006rbacrowpolicyshowcreate) + * 5.2 [Specific](#specific) + * 5.2.8.1 [RQ.SRS-006.RBAC.User.Use.DefaultRole](#rqsrs-006rbacuserusedefaultrole) + * 5.2.8.2 [RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole](#rqsrs-006rbacuseruseallroleswhennodefaultrole) + * 5.2.8.3 [RQ.SRS-006.RBAC.User.Create](#rqsrs-006rbacusercreate) + * 5.2.8.4 [RQ.SRS-006.RBAC.User.Create.IfNotExists](#rqsrs-006rbacusercreateifnotexists) + * 5.2.8.5 [RQ.SRS-006.RBAC.User.Create.Replace](#rqsrs-006rbacusercreatereplace) + * 5.2.8.6 [RQ.SRS-006.RBAC.User.Create.Password.NoPassword](#rqsrs-006rbacusercreatepasswordnopassword) + * 5.2.8.7 [RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login](#rqsrs-006rbacusercreatepasswordnopasswordlogin) + * 5.2.8.8 [RQ.SRS-006.RBAC.User.Create.Password.PlainText](#rqsrs-006rbacusercreatepasswordplaintext) + * 5.2.8.9 [RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login](#rqsrs-006rbacusercreatepasswordplaintextlogin) + * 5.2.8.10 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Password](#rqsrs-006rbacusercreatepasswordsha256password) + * 5.2.8.11 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login](#rqsrs-006rbacusercreatepasswordsha256passwordlogin) + * 5.2.8.12 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash](#rqsrs-006rbacusercreatepasswordsha256hash) + * 5.2.8.13 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login](#rqsrs-006rbacusercreatepasswordsha256hashlogin) + * 5.2.8.14 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password](#rqsrs-006rbacusercreatepassworddoublesha1password) + * 5.2.8.15 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login](#rqsrs-006rbacusercreatepassworddoublesha1passwordlogin) + * 5.2.8.16 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash](#rqsrs-006rbacusercreatepassworddoublesha1hash) + * 5.2.8.17 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login](#rqsrs-006rbacusercreatepassworddoublesha1hashlogin) + * 5.2.8.18 [RQ.SRS-006.RBAC.User.Create.Host.Name](#rqsrs-006rbacusercreatehostname) + * 5.2.8.19 [RQ.SRS-006.RBAC.User.Create.Host.Regexp](#rqsrs-006rbacusercreatehostregexp) + * 5.2.8.20 [RQ.SRS-006.RBAC.User.Create.Host.IP](#rqsrs-006rbacusercreatehostip) + * 5.2.8.21 [RQ.SRS-006.RBAC.User.Create.Host.Any](#rqsrs-006rbacusercreatehostany) + * 5.2.8.22 [RQ.SRS-006.RBAC.User.Create.Host.None](#rqsrs-006rbacusercreatehostnone) + * 5.2.8.23 [RQ.SRS-006.RBAC.User.Create.Host.Local](#rqsrs-006rbacusercreatehostlocal) + * 5.2.8.24 [RQ.SRS-006.RBAC.User.Create.Host.Like](#rqsrs-006rbacusercreatehostlike) + * 5.2.8.25 [RQ.SRS-006.RBAC.User.Create.Host.Default](#rqsrs-006rbacusercreatehostdefault) + * 5.2.8.26 [RQ.SRS-006.RBAC.User.Create.DefaultRole](#rqsrs-006rbacusercreatedefaultrole) + * 5.2.8.27 [RQ.SRS-006.RBAC.User.Create.DefaultRole.None](#rqsrs-006rbacusercreatedefaultrolenone) + * 5.2.8.28 [RQ.SRS-006.RBAC.User.Create.DefaultRole.All](#rqsrs-006rbacusercreatedefaultroleall) + * 5.2.8.29 [RQ.SRS-006.RBAC.User.Create.Settings](#rqsrs-006rbacusercreatesettings) + * 5.2.8.30 [RQ.SRS-006.RBAC.User.Create.OnCluster](#rqsrs-006rbacusercreateoncluster) + * 5.2.8.31 [RQ.SRS-006.RBAC.User.Create.Syntax](#rqsrs-006rbacusercreatesyntax) + * 5.2.8.32 [RQ.SRS-006.RBAC.User.Alter](#rqsrs-006rbacuseralter) + * 5.2.8.33 [RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation](#rqsrs-006rbacuseralterorderofevaluation) + * 5.2.8.34 [RQ.SRS-006.RBAC.User.Alter.IfExists](#rqsrs-006rbacuseralterifexists) + * 5.2.8.35 [RQ.SRS-006.RBAC.User.Alter.Cluster](#rqsrs-006rbacuseraltercluster) + * 5.2.8.36 [RQ.SRS-006.RBAC.User.Alter.Rename](#rqsrs-006rbacuseralterrename) + * 5.2.8.37 [RQ.SRS-006.RBAC.User.Alter.Password.PlainText](#rqsrs-006rbacuseralterpasswordplaintext) + * 5.2.8.38 [RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password](#rqsrs-006rbacuseralterpasswordsha256password) + * 5.2.8.39 [RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password](#rqsrs-006rbacuseralterpassworddoublesha1password) + * 5.2.8.40 [RQ.SRS-006.RBAC.User.Alter.Host.AddDrop](#rqsrs-006rbacuseralterhostadddrop) + * 5.2.8.41 [RQ.SRS-006.RBAC.User.Alter.Host.Local](#rqsrs-006rbacuseralterhostlocal) + * 5.2.8.42 [RQ.SRS-006.RBAC.User.Alter.Host.Name](#rqsrs-006rbacuseralterhostname) + * 5.2.8.43 [RQ.SRS-006.RBAC.User.Alter.Host.Regexp](#rqsrs-006rbacuseralterhostregexp) + * 5.2.8.44 [RQ.SRS-006.RBAC.User.Alter.Host.IP](#rqsrs-006rbacuseralterhostip) + * 5.2.8.45 [RQ.SRS-006.RBAC.User.Alter.Host.Like](#rqsrs-006rbacuseralterhostlike) + * 5.2.8.46 [RQ.SRS-006.RBAC.User.Alter.Host.Any](#rqsrs-006rbacuseralterhostany) + * 5.2.8.47 [RQ.SRS-006.RBAC.User.Alter.Host.None](#rqsrs-006rbacuseralterhostnone) + * 5.2.8.48 [RQ.SRS-006.RBAC.User.Alter.DefaultRole](#rqsrs-006rbacuseralterdefaultrole) + * 5.2.8.49 [RQ.SRS-006.RBAC.User.Alter.DefaultRole.All](#rqsrs-006rbacuseralterdefaultroleall) + * 5.2.8.50 [RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept](#rqsrs-006rbacuseralterdefaultroleallexcept) + * 5.2.8.51 [RQ.SRS-006.RBAC.User.Alter.Settings](#rqsrs-006rbacuseraltersettings) + * 5.2.8.52 [RQ.SRS-006.RBAC.User.Alter.Settings.Min](#rqsrs-006rbacuseraltersettingsmin) + * 5.2.8.53 [RQ.SRS-006.RBAC.User.Alter.Settings.Max](#rqsrs-006rbacuseraltersettingsmax) + * 5.2.8.54 [RQ.SRS-006.RBAC.User.Alter.Settings.Profile](#rqsrs-006rbacuseraltersettingsprofile) + * 5.2.8.55 [RQ.SRS-006.RBAC.User.Alter.Syntax](#rqsrs-006rbacuseraltersyntax) + * 5.2.8.56 [RQ.SRS-006.RBAC.SetDefaultRole](#rqsrs-006rbacsetdefaultrole) + * 5.2.8.57 [RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser](#rqsrs-006rbacsetdefaultrolecurrentuser) + * 5.2.8.58 [RQ.SRS-006.RBAC.SetDefaultRole.All](#rqsrs-006rbacsetdefaultroleall) + * 5.2.8.59 [RQ.SRS-006.RBAC.SetDefaultRole.AllExcept](#rqsrs-006rbacsetdefaultroleallexcept) + * 5.2.8.60 [RQ.SRS-006.RBAC.SetDefaultRole.None](#rqsrs-006rbacsetdefaultrolenone) + * 5.2.8.61 [RQ.SRS-006.RBAC.SetDefaultRole.Syntax](#rqsrs-006rbacsetdefaultrolesyntax) + * 5.2.8.62 [RQ.SRS-006.RBAC.SetRole](#rqsrs-006rbacsetrole) + * 5.2.8.63 [RQ.SRS-006.RBAC.SetRole.Default](#rqsrs-006rbacsetroledefault) + * 5.2.8.64 [RQ.SRS-006.RBAC.SetRole.None](#rqsrs-006rbacsetrolenone) + * 5.2.8.65 [RQ.SRS-006.RBAC.SetRole.All](#rqsrs-006rbacsetroleall) + * 5.2.8.66 [RQ.SRS-006.RBAC.SetRole.AllExcept](#rqsrs-006rbacsetroleallexcept) + * 5.2.8.67 [RQ.SRS-006.RBAC.SetRole.Syntax](#rqsrs-006rbacsetrolesyntax) + * 5.2.8.68 [RQ.SRS-006.RBAC.User.ShowCreateUser](#rqsrs-006rbacusershowcreateuser) + * 5.2.8.69 [RQ.SRS-006.RBAC.User.ShowCreateUser.For](#rqsrs-006rbacusershowcreateuserfor) + * 5.2.8.70 [RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax](#rqsrs-006rbacusershowcreateusersyntax) + * 5.2.8.71 [RQ.SRS-006.RBAC.User.Drop](#rqsrs-006rbacuserdrop) + * 5.2.8.72 [RQ.SRS-006.RBAC.User.Drop.IfExists](#rqsrs-006rbacuserdropifexists) + * 5.2.8.73 [RQ.SRS-006.RBAC.User.Drop.OnCluster](#rqsrs-006rbacuserdroponcluster) + * 5.2.8.74 [RQ.SRS-006.RBAC.User.Drop.Syntax](#rqsrs-006rbacuserdropsyntax) + * 5.2.8.75 [RQ.SRS-006.RBAC.Role.Create](#rqsrs-006rbacrolecreate) + * 5.2.8.76 [RQ.SRS-006.RBAC.Role.Create.IfNotExists](#rqsrs-006rbacrolecreateifnotexists) + * 5.2.8.77 [RQ.SRS-006.RBAC.Role.Create.Replace](#rqsrs-006rbacrolecreatereplace) + * 5.2.8.78 [RQ.SRS-006.RBAC.Role.Create.Settings](#rqsrs-006rbacrolecreatesettings) + * 5.2.8.79 [RQ.SRS-006.RBAC.Role.Create.Syntax](#rqsrs-006rbacrolecreatesyntax) + * 5.2.8.80 [RQ.SRS-006.RBAC.Role.Alter](#rqsrs-006rbacrolealter) + * 5.2.8.81 [RQ.SRS-006.RBAC.Role.Alter.IfExists](#rqsrs-006rbacrolealterifexists) + * 5.2.8.82 [RQ.SRS-006.RBAC.Role.Alter.Cluster](#rqsrs-006rbacrolealtercluster) + * 5.2.8.83 [RQ.SRS-006.RBAC.Role.Alter.Rename](#rqsrs-006rbacrolealterrename) + * 5.2.8.84 [RQ.SRS-006.RBAC.Role.Alter.Settings](#rqsrs-006rbacrolealtersettings) + * 5.2.8.85 [RQ.SRS-006.RBAC.Role.Alter.Syntax](#rqsrs-006rbacrolealtersyntax) + * 5.2.8.86 [RQ.SRS-006.RBAC.Role.Drop](#rqsrs-006rbacroledrop) + * 5.2.8.87 [RQ.SRS-006.RBAC.Role.Drop.IfExists](#rqsrs-006rbacroledropifexists) + * 5.2.8.88 [RQ.SRS-006.RBAC.Role.Drop.Cluster](#rqsrs-006rbacroledropcluster) + * 5.2.8.89 [RQ.SRS-006.RBAC.Role.Drop.Syntax](#rqsrs-006rbacroledropsyntax) + * 5.2.8.90 [RQ.SRS-006.RBAC.Role.ShowCreate](#rqsrs-006rbacroleshowcreate) + * 5.2.8.91 [RQ.SRS-006.RBAC.Role.ShowCreate.Syntax](#rqsrs-006rbacroleshowcreatesyntax) + * 5.2.8.92 [RQ.SRS-006.RBAC.Grant.Privilege.To](#rqsrs-006rbacgrantprivilegeto) + * 5.2.8.93 [RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser](#rqsrs-006rbacgrantprivilegetocurrentuser) + * 5.2.8.94 [RQ.SRS-006.RBAC.Grant.Privilege.Select](#rqsrs-006rbacgrantprivilegeselect) + * 5.2.8.95 [RQ.SRS-006.RBAC.Grant.Privilege.Insert](#rqsrs-006rbacgrantprivilegeinsert) + * 5.2.8.96 [RQ.SRS-006.RBAC.Grant.Privilege.Alter](#rqsrs-006rbacgrantprivilegealter) + * 5.2.8.97 [RQ.SRS-006.RBAC.Grant.Privilege.Create](#rqsrs-006rbacgrantprivilegecreate) + * 5.2.8.98 [RQ.SRS-006.RBAC.Grant.Privilege.Drop](#rqsrs-006rbacgrantprivilegedrop) + * 5.2.8.99 [RQ.SRS-006.RBAC.Grant.Privilege.Truncate](#rqsrs-006rbacgrantprivilegetruncate) + * 5.2.8.100 [RQ.SRS-006.RBAC.Grant.Privilege.Optimize](#rqsrs-006rbacgrantprivilegeoptimize) + * 5.2.8.101 [RQ.SRS-006.RBAC.Grant.Privilege.Show](#rqsrs-006rbacgrantprivilegeshow) + * 5.2.8.102 [RQ.SRS-006.RBAC.Grant.Privilege.KillQuery](#rqsrs-006rbacgrantprivilegekillquery) + * 5.2.8.103 [RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement](#rqsrs-006rbacgrantprivilegeaccessmanagement) + * 5.2.8.104 [RQ.SRS-006.RBAC.Grant.Privilege.System](#rqsrs-006rbacgrantprivilegesystem) + * 5.2.8.105 [RQ.SRS-006.RBAC.Grant.Privilege.Introspection](#rqsrs-006rbacgrantprivilegeintrospection) + * 5.2.8.106 [RQ.SRS-006.RBAC.Grant.Privilege.Sources](#rqsrs-006rbacgrantprivilegesources) + * 5.2.8.107 [RQ.SRS-006.RBAC.Grant.Privilege.DictGet](#rqsrs-006rbacgrantprivilegedictget) + * 5.2.8.108 [RQ.SRS-006.RBAC.Grant.Privilege.None](#rqsrs-006rbacgrantprivilegenone) + * 5.2.8.109 [RQ.SRS-006.RBAC.Grant.Privilege.All](#rqsrs-006rbacgrantprivilegeall) + * 5.2.8.110 [RQ.SRS-006.RBAC.Grant.Privilege.GrantOption](#rqsrs-006rbacgrantprivilegegrantoption) + * 5.2.8.111 [RQ.SRS-006.RBAC.Grant.Privilege.On](#rqsrs-006rbacgrantprivilegeon) + * 5.2.8.112 [RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns](#rqsrs-006rbacgrantprivilegeprivilegecolumns) + * 5.2.8.113 [RQ.SRS-006.RBAC.Grant.Privilege.OnCluster](#rqsrs-006rbacgrantprivilegeoncluster) + * 5.2.8.114 [RQ.SRS-006.RBAC.Grant.Privilege.Syntax](#rqsrs-006rbacgrantprivilegesyntax) + * 5.2.8.115 [RQ.SRS-006.RBAC.Revoke.Privilege.Cluster](#rqsrs-006rbacrevokeprivilegecluster) + * 5.2.8.116 [RQ.SRS-006.RBAC.Revoke.Privilege.Any](#rqsrs-006rbacrevokeprivilegeany) + * 5.2.8.117 [RQ.SRS-006.RBAC.Revoke.Privilege.Select](#rqsrs-006rbacrevokeprivilegeselect) + * 5.2.8.118 [RQ.SRS-006.RBAC.Revoke.Privilege.Insert](#rqsrs-006rbacrevokeprivilegeinsert) + * 5.2.8.119 [RQ.SRS-006.RBAC.Revoke.Privilege.Alter](#rqsrs-006rbacrevokeprivilegealter) + * 5.2.8.120 [RQ.SRS-006.RBAC.Revoke.Privilege.Create](#rqsrs-006rbacrevokeprivilegecreate) + * 5.2.8.121 [RQ.SRS-006.RBAC.Revoke.Privilege.Drop](#rqsrs-006rbacrevokeprivilegedrop) + * 5.2.8.122 [RQ.SRS-006.RBAC.Revoke.Privilege.Truncate](#rqsrs-006rbacrevokeprivilegetruncate) + * 5.2.8.123 [RQ.SRS-006.RBAC.Revoke.Privilege.Optimize](#rqsrs-006rbacrevokeprivilegeoptimize) + * 5.2.8.124 [RQ.SRS-006.RBAC.Revoke.Privilege.Show](#rqsrs-006rbacrevokeprivilegeshow) + * 5.2.8.125 [RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery](#rqsrs-006rbacrevokeprivilegekillquery) + * 5.2.8.126 [RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement](#rqsrs-006rbacrevokeprivilegeaccessmanagement) + * 5.2.8.127 [RQ.SRS-006.RBAC.Revoke.Privilege.System](#rqsrs-006rbacrevokeprivilegesystem) + * 5.2.8.128 [RQ.SRS-006.RBAC.Revoke.Privilege.Introspection](#rqsrs-006rbacrevokeprivilegeintrospection) + * 5.2.8.129 [RQ.SRS-006.RBAC.Revoke.Privilege.Sources](#rqsrs-006rbacrevokeprivilegesources) + * 5.2.8.130 [RQ.SRS-006.RBAC.Revoke.Privilege.DictGet](#rqsrs-006rbacrevokeprivilegedictget) + * 5.2.8.131 [RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns](#rqsrs-006rbacrevokeprivilegeprivelegecolumns) + * 5.2.8.132 [RQ.SRS-006.RBAC.Revoke.Privilege.Multiple](#rqsrs-006rbacrevokeprivilegemultiple) + * 5.2.8.133 [RQ.SRS-006.RBAC.Revoke.Privilege.All](#rqsrs-006rbacrevokeprivilegeall) + * 5.2.8.134 [RQ.SRS-006.RBAC.Revoke.Privilege.None](#rqsrs-006rbacrevokeprivilegenone) + * 5.2.8.135 [RQ.SRS-006.RBAC.Revoke.Privilege.On](#rqsrs-006rbacrevokeprivilegeon) + * 5.2.8.136 [RQ.SRS-006.RBAC.Revoke.Privilege.From](#rqsrs-006rbacrevokeprivilegefrom) + * 5.2.8.137 [RQ.SRS-006.RBAC.Revoke.Privilege.Syntax](#rqsrs-006rbacrevokeprivilegesyntax) + * 5.2.8.138 [RQ.SRS-006.RBAC.PartialRevoke.Syntax](#rqsrs-006rbacpartialrevokesyntax) + * 5.2.8.139 [RQ.SRS-006.RBAC.Grant.Role](#rqsrs-006rbacgrantrole) + * 5.2.8.140 [RQ.SRS-006.RBAC.Grant.Role.CurrentUser](#rqsrs-006rbacgrantrolecurrentuser) + * 5.2.8.141 [RQ.SRS-006.RBAC.Grant.Role.AdminOption](#rqsrs-006rbacgrantroleadminoption) + * 5.2.8.142 [RQ.SRS-006.RBAC.Grant.Role.OnCluster](#rqsrs-006rbacgrantroleoncluster) + * 5.2.8.143 [RQ.SRS-006.RBAC.Grant.Role.Syntax](#rqsrs-006rbacgrantrolesyntax) + * 5.2.8.144 [RQ.SRS-006.RBAC.Revoke.Role](#rqsrs-006rbacrevokerole) + * 5.2.8.145 [RQ.SRS-006.RBAC.Revoke.Role.Keywords](#rqsrs-006rbacrevokerolekeywords) + * 5.2.8.146 [RQ.SRS-006.RBAC.Revoke.Role.Cluster](#rqsrs-006rbacrevokerolecluster) + * 5.2.8.147 [RQ.SRS-006.RBAC.Revoke.AdminOption](#rqsrs-006rbacrevokeadminoption) + * 5.2.8.148 [RQ.SRS-006.RBAC.Revoke.Role.Syntax](#rqsrs-006rbacrevokerolesyntax) + * 5.2.8.149 [RQ.SRS-006.RBAC.Show.Grants](#rqsrs-006rbacshowgrants) + * 5.2.8.150 [RQ.SRS-006.RBAC.Show.Grants.For](#rqsrs-006rbacshowgrantsfor) + * 5.2.8.151 [RQ.SRS-006.RBAC.Show.Grants.Syntax](#rqsrs-006rbacshowgrantssyntax) + * 5.2.8.152 [RQ.SRS-006.RBAC.SettingsProfile.Create](#rqsrs-006rbacsettingsprofilecreate) + * 5.2.8.153 [RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists](#rqsrs-006rbacsettingsprofilecreateifnotexists) + * 5.2.8.154 [RQ.SRS-006.RBAC.SettingsProfile.Create.Replace](#rqsrs-006rbacsettingsprofilecreatereplace) + * 5.2.8.155 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables](#rqsrs-006rbacsettingsprofilecreatevariables) + * 5.2.8.156 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value](#rqsrs-006rbacsettingsprofilecreatevariablesvalue) + * 5.2.8.157 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints](#rqsrs-006rbacsettingsprofilecreatevariablesconstraints) + * 5.2.8.158 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment](#rqsrs-006rbacsettingsprofilecreateassignment) + * 5.2.8.159 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None](#rqsrs-006rbacsettingsprofilecreateassignmentnone) + * 5.2.8.160 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All](#rqsrs-006rbacsettingsprofilecreateassignmentall) + * 5.2.8.161 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept](#rqsrs-006rbacsettingsprofilecreateassignmentallexcept) + * 5.2.8.162 [RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit](#rqsrs-006rbacsettingsprofilecreateinherit) + * 5.2.8.163 [RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster](#rqsrs-006rbacsettingsprofilecreateoncluster) + * 5.2.8.164 [RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax](#rqsrs-006rbacsettingsprofilecreatesyntax) + * 5.2.8.165 [RQ.SRS-006.RBAC.SettingsProfile.Alter](#rqsrs-006rbacsettingsprofilealter) + * 5.2.8.166 [RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists](#rqsrs-006rbacsettingsprofilealterifexists) + * 5.2.8.167 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename](#rqsrs-006rbacsettingsprofilealterrename) + * 5.2.8.168 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables](#rqsrs-006rbacsettingsprofilealtervariables) + * 5.2.8.169 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value](#rqsrs-006rbacsettingsprofilealtervariablesvalue) + * 5.2.8.170 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints](#rqsrs-006rbacsettingsprofilealtervariablesconstraints) + * 5.2.8.171 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment](#rqsrs-006rbacsettingsprofilealterassignment) + * 5.2.8.172 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None](#rqsrs-006rbacsettingsprofilealterassignmentnone) + * 5.2.8.173 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All](#rqsrs-006rbacsettingsprofilealterassignmentall) + * 5.2.8.174 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept](#rqsrs-006rbacsettingsprofilealterassignmentallexcept) + * 5.2.8.175 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit](#rqsrs-006rbacsettingsprofilealterassignmentinherit) + * 5.2.8.176 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster](#rqsrs-006rbacsettingsprofilealterassignmentoncluster) + * 5.2.8.177 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax](#rqsrs-006rbacsettingsprofilealtersyntax) + * 5.2.8.178 [RQ.SRS-006.RBAC.SettingsProfile.Drop](#rqsrs-006rbacsettingsprofiledrop) + * 5.2.8.179 [RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists](#rqsrs-006rbacsettingsprofiledropifexists) + * 5.2.8.180 [RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster](#rqsrs-006rbacsettingsprofiledroponcluster) + * 5.2.8.181 [RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax](#rqsrs-006rbacsettingsprofiledropsyntax) + * 5.2.8.182 [RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile](#rqsrs-006rbacsettingsprofileshowcreatesettingsprofile) + * 5.2.8.183 [RQ.SRS-006.RBAC.Quota.Create](#rqsrs-006rbacquotacreate) + * 5.2.8.184 [RQ.SRS-006.RBAC.Quota.Create.IfNotExists](#rqsrs-006rbacquotacreateifnotexists) + * 5.2.8.185 [RQ.SRS-006.RBAC.Quota.Create.Replace](#rqsrs-006rbacquotacreatereplace) + * 5.2.8.186 [RQ.SRS-006.RBAC.Quota.Create.Cluster](#rqsrs-006rbacquotacreatecluster) + * 5.2.8.187 [RQ.SRS-006.RBAC.Quota.Create.Interval](#rqsrs-006rbacquotacreateinterval) + * 5.2.8.188 [RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized](#rqsrs-006rbacquotacreateintervalrandomized) + * 5.2.8.189 [RQ.SRS-006.RBAC.Quota.Create.Queries](#rqsrs-006rbacquotacreatequeries) + * 5.2.8.190 [RQ.SRS-006.RBAC.Quota.Create.Errors](#rqsrs-006rbacquotacreateerrors) + * 5.2.8.191 [RQ.SRS-006.RBAC.Quota.Create.ResultRows](#rqsrs-006rbacquotacreateresultrows) + * 5.2.8.192 [RQ.SRS-006.RBAC.Quota.Create.ReadRows](#rqsrs-006rbacquotacreatereadrows) + * 5.2.8.193 [RQ.SRS-006.RBAC.Quota.Create.ResultBytes](#rqsrs-006rbacquotacreateresultbytes) + * 5.2.8.194 [RQ.SRS-006.RBAC.Quota.Create.ReadBytes](#rqsrs-006rbacquotacreatereadbytes) + * 5.2.8.195 [RQ.SRS-006.RBAC.Quota.Create.ExecutionTime](#rqsrs-006rbacquotacreateexecutiontime) + * 5.2.8.196 [RQ.SRS-006.RBAC.Quota.Create.NoLimits](#rqsrs-006rbacquotacreatenolimits) + * 5.2.8.197 [RQ.SRS-006.RBAC.Quota.Create.TrackingOnly](#rqsrs-006rbacquotacreatetrackingonly) + * 5.2.8.198 [RQ.SRS-006.RBAC.Quota.Create.KeyedBy](#rqsrs-006rbacquotacreatekeyedby) + * 5.2.8.199 [RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions](#rqsrs-006rbacquotacreatekeyedbyoptions) + * 5.2.8.200 [RQ.SRS-006.RBAC.Quota.Create.Assignment](#rqsrs-006rbacquotacreateassignment) + * 5.2.8.201 [RQ.SRS-006.RBAC.Quota.Create.Assignment.None](#rqsrs-006rbacquotacreateassignmentnone) + * 5.2.8.202 [RQ.SRS-006.RBAC.Quota.Create.Assignment.All](#rqsrs-006rbacquotacreateassignmentall) + * 5.2.8.203 [RQ.SRS-006.RBAC.Quota.Create.Assignment.Except](#rqsrs-006rbacquotacreateassignmentexcept) + * 5.2.8.204 [RQ.SRS-006.RBAC.Quota.Create.Syntax](#rqsrs-006rbacquotacreatesyntax) + * 5.2.8.205 [RQ.SRS-006.RBAC.Quota.Alter](#rqsrs-006rbacquotaalter) + * 5.2.8.206 [RQ.SRS-006.RBAC.Quota.Alter.IfExists](#rqsrs-006rbacquotaalterifexists) + * 5.2.8.207 [RQ.SRS-006.RBAC.Quota.Alter.Rename](#rqsrs-006rbacquotaalterrename) + * 5.2.8.208 [RQ.SRS-006.RBAC.Quota.Alter.Cluster](#rqsrs-006rbacquotaaltercluster) + * 5.2.8.209 [RQ.SRS-006.RBAC.Quota.Alter.Interval](#rqsrs-006rbacquotaalterinterval) + * 5.2.8.210 [RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized](#rqsrs-006rbacquotaalterintervalrandomized) + * 5.2.8.211 [RQ.SRS-006.RBAC.Quota.Alter.Queries](#rqsrs-006rbacquotaalterqueries) + * 5.2.8.212 [RQ.SRS-006.RBAC.Quota.Alter.Errors](#rqsrs-006rbacquotaaltererrors) + * 5.2.8.213 [RQ.SRS-006.RBAC.Quota.Alter.ResultRows](#rqsrs-006rbacquotaalterresultrows) + * 5.2.8.214 [RQ.SRS-006.RBAC.Quota.Alter.ReadRows](#rqsrs-006rbacquotaalterreadrows) + * 5.2.8.215 [RQ.SRS-006.RBAC.Quota.ALter.ResultBytes](#rqsrs-006rbacquotaalterresultbytes) + * 5.2.8.216 [RQ.SRS-006.RBAC.Quota.Alter.ReadBytes](#rqsrs-006rbacquotaalterreadbytes) + * 5.2.8.217 [RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime](#rqsrs-006rbacquotaalterexecutiontime) + * 5.2.8.218 [RQ.SRS-006.RBAC.Quota.Alter.NoLimits](#rqsrs-006rbacquotaalternolimits) + * 5.2.8.219 [RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly](#rqsrs-006rbacquotaaltertrackingonly) + * 5.2.8.220 [RQ.SRS-006.RBAC.Quota.Alter.KeyedBy](#rqsrs-006rbacquotaalterkeyedby) + * 5.2.8.221 [RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions](#rqsrs-006rbacquotaalterkeyedbyoptions) + * 5.2.8.222 [RQ.SRS-006.RBAC.Quota.Alter.Assignment](#rqsrs-006rbacquotaalterassignment) + * 5.2.8.223 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.None](#rqsrs-006rbacquotaalterassignmentnone) + * 5.2.8.224 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.All](#rqsrs-006rbacquotaalterassignmentall) + * 5.2.8.225 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except](#rqsrs-006rbacquotaalterassignmentexcept) + * 5.2.8.226 [RQ.SRS-006.RBAC.Quota.Alter.Syntax](#rqsrs-006rbacquotaaltersyntax) + * 5.2.8.227 [RQ.SRS-006.RBAC.Quota.Drop](#rqsrs-006rbacquotadrop) + * 5.2.8.228 [RQ.SRS-006.RBAC.Quota.Drop.IfExists](#rqsrs-006rbacquotadropifexists) + * 5.2.8.229 [RQ.SRS-006.RBAC.Quota.Drop.Cluster](#rqsrs-006rbacquotadropcluster) + * 5.2.8.230 [RQ.SRS-006.RBAC.Quota.Drop.Syntax](#rqsrs-006rbacquotadropsyntax) + * 5.2.8.231 [RQ.SRS-006.RBAC.Quota.ShowQuotas](#rqsrs-006rbacquotashowquotas) + * 5.2.8.232 [RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile](#rqsrs-006rbacquotashowquotasintooutfile) + * 5.2.8.233 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Format](#rqsrs-006rbacquotashowquotasformat) + * 5.2.8.234 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings](#rqsrs-006rbacquotashowquotassettings) + * 5.2.8.235 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax](#rqsrs-006rbacquotashowquotassyntax) + * 5.2.8.236 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name](#rqsrs-006rbacquotashowcreatequotaname) + * 5.2.8.237 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current](#rqsrs-006rbacquotashowcreatequotacurrent) + * 5.2.8.238 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax](#rqsrs-006rbacquotashowcreatequotasyntax) + * 5.2.8.239 [RQ.SRS-006.RBAC.RowPolicy.Create](#rqsrs-006rbacrowpolicycreate) + * 5.2.8.240 [RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists](#rqsrs-006rbacrowpolicycreateifnotexists) + * 5.2.8.241 [RQ.SRS-006.RBAC.RowPolicy.Create.Replace](#rqsrs-006rbacrowpolicycreatereplace) + * 5.2.8.242 [RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster](#rqsrs-006rbacrowpolicycreateoncluster) + * 5.2.8.243 [RQ.SRS-006.RBAC.RowPolicy.Create.On](#rqsrs-006rbacrowpolicycreateon) + * 5.2.8.244 [RQ.SRS-006.RBAC.RowPolicy.Create.Access](#rqsrs-006rbacrowpolicycreateaccess) + * 5.2.8.245 [RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive](#rqsrs-006rbacrowpolicycreateaccesspermissive) + * 5.2.8.246 [RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive](#rqsrs-006rbacrowpolicycreateaccessrestrictive) + * 5.2.8.247 [RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect](#rqsrs-006rbacrowpolicycreateforselect) + * 5.2.8.248 [RQ.SRS-006.RBAC.RowPolicy.Create.Condition](#rqsrs-006rbacrowpolicycreatecondition) + * 5.2.8.249 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment](#rqsrs-006rbacrowpolicycreateassignment) + * 5.2.8.250 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None](#rqsrs-006rbacrowpolicycreateassignmentnone) + * 5.2.8.251 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All](#rqsrs-006rbacrowpolicycreateassignmentall) + * 5.2.8.252 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept](#rqsrs-006rbacrowpolicycreateassignmentallexcept) + * 5.2.8.253 [RQ.SRS-006.RBAC.RowPolicy.Create.Syntax](#rqsrs-006rbacrowpolicycreatesyntax) + * 5.2.8.254 [RQ.SRS-006.RBAC.RowPolicy.Alter](#rqsrs-006rbacrowpolicyalter) + * 5.2.8.255 [RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists](#rqsrs-006rbacrowpolicyalterifexists) + * 5.2.8.256 [RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect](#rqsrs-006rbacrowpolicyalterforselect) + * 5.2.8.257 [RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster](#rqsrs-006rbacrowpolicyalteroncluster) + * 5.2.8.258 [RQ.SRS-006.RBAC.RowPolicy.Alter.On](#rqsrs-006rbacrowpolicyalteron) + * 5.2.8.259 [RQ.SRS-006.RBAC.RowPolicy.Alter.Rename](#rqsrs-006rbacrowpolicyalterrename) + * 5.2.8.260 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access](#rqsrs-006rbacrowpolicyalteraccess) + * 5.2.8.261 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive](#rqsrs-006rbacrowpolicyalteraccesspermissive) + * 5.2.8.262 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive](#rqsrs-006rbacrowpolicyalteraccessrestrictive) + * 5.2.8.263 [RQ.SRS-006.RBAC.RowPolicy.Alter.Condition](#rqsrs-006rbacrowpolicyaltercondition) + * 5.2.8.264 [RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None](#rqsrs-006rbacrowpolicyalterconditionnone) + * 5.2.8.265 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment](#rqsrs-006rbacrowpolicyalterassignment) + * 5.2.8.266 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None](#rqsrs-006rbacrowpolicyalterassignmentnone) + * 5.2.8.267 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All](#rqsrs-006rbacrowpolicyalterassignmentall) + * 5.2.8.268 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept](#rqsrs-006rbacrowpolicyalterassignmentallexcept) + * 5.2.8.269 [RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax](#rqsrs-006rbacrowpolicyaltersyntax) + * 5.2.8.270 [RQ.SRS-006.RBAC.RowPolicy.Drop](#rqsrs-006rbacrowpolicydrop) + * 5.2.8.271 [RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists](#rqsrs-006rbacrowpolicydropifexists) + * 5.2.8.272 [RQ.SRS-006.RBAC.RowPolicy.Drop.On](#rqsrs-006rbacrowpolicydropon) + * 5.2.8.273 [RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster](#rqsrs-006rbacrowpolicydroponcluster) + * 5.2.8.274 [RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax](#rqsrs-006rbacrowpolicydropsyntax) + * 5.2.8.275 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy](#rqsrs-006rbacrowpolicyshowcreaterowpolicy) + * 5.2.8.276 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On](#rqsrs-006rbacrowpolicyshowcreaterowpolicyon) + * 5.2.8.277 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax](#rqsrs-006rbacrowpolicyshowcreaterowpolicysyntax) + * 5.2.8.278 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies](#rqsrs-006rbacrowpolicyshowrowpolicies) + * 5.2.8.279 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On](#rqsrs-006rbacrowpolicyshowrowpolicieson) + * 5.2.8.280 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax](#rqsrs-006rbacrowpolicyshowrowpoliciessyntax) + * 5.2.9 [Table Privileges](#table-privileges) + * 5.2.9.1 [RQ.SRS-006.RBAC.Table.PublicTables](#rqsrs-006rbactablepublictables) + * 5.2.9.2 [RQ.SRS-006.RBAC.Table.ShowTables](#rqsrs-006rbactableshowtables) + * 5.2.9.3 [Distributed Tables](#distributed-tables) + * 5.2.9.3.1 [RQ.SRS-006.RBAC.Table.DistributedTable.Create](#rqsrs-006rbactabledistributedtablecreate) + * 5.2.9.3.2 [RQ.SRS-006.RBAC.Table.DistributedTable.Select](#rqsrs-006rbactabledistributedtableselect) + * 5.2.9.3.3 [RQ.SRS-006.RBAC.Table.DistributedTable.Insert](#rqsrs-006rbactabledistributedtableinsert) + * 5.2.9.3.4 [RQ.SRS-006.RBAC.Table.DistributedTable.SpecialTables](#rqsrs-006rbactabledistributedtablespecialtables) + * 5.2.9.3.5 [RQ.SRS-006.RBAC.Table.DistributedTable.LocalUser](#rqsrs-006rbactabledistributedtablelocaluser) + * 5.2.9.3.6 [RQ.SRS-006.RBAC.Table.DistributedTable.SameUserDifferentNodesDifferentPrivileges](#rqsrs-006rbactabledistributedtablesameuserdifferentnodesdifferentprivileges) + * 5.2.10 [Views](#views) + * 5.2.10.1 [View](#view) + * 5.2.10.1.1 [RQ.SRS-006.RBAC.View](#rqsrs-006rbacview) + * 5.2.10.1.2 [RQ.SRS-006.RBAC.View.Create](#rqsrs-006rbacviewcreate) + * 5.2.10.1.3 [RQ.SRS-006.RBAC.View.Select](#rqsrs-006rbacviewselect) + * 5.2.10.1.4 [RQ.SRS-006.RBAC.View.Drop](#rqsrs-006rbacviewdrop) + * 5.2.10.2 [Materialized View](#materialized-view) + * 5.2.10.2.1 [RQ.SRS-006.RBAC.MaterializedView](#rqsrs-006rbacmaterializedview) + * 5.2.10.2.2 [RQ.SRS-006.RBAC.MaterializedView.Create](#rqsrs-006rbacmaterializedviewcreate) + * 5.2.10.2.3 [RQ.SRS-006.RBAC.MaterializedView.Select](#rqsrs-006rbacmaterializedviewselect) + * 5.2.10.2.4 [RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable](#rqsrs-006rbacmaterializedviewselecttargettable) + * 5.2.10.2.5 [RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable](#rqsrs-006rbacmaterializedviewselectsourcetable) + * 5.2.10.2.6 [RQ.SRS-006.RBAC.MaterializedView.Drop](#rqsrs-006rbacmaterializedviewdrop) + * 5.2.10.2.7 [RQ.SRS-006.RBAC.MaterializedView.ModifyQuery](#rqsrs-006rbacmaterializedviewmodifyquery) + * 5.2.10.2.8 [RQ.SRS-006.RBAC.MaterializedView.Insert](#rqsrs-006rbacmaterializedviewinsert) + * 5.2.10.2.9 [RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable](#rqsrs-006rbacmaterializedviewinsertsourcetable) + * 5.2.10.2.10 [RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable](#rqsrs-006rbacmaterializedviewinserttargettable) + * 5.2.10.3 [Live View](#live-view) + * 5.2.10.3.1 [RQ.SRS-006.RBAC.LiveView](#rqsrs-006rbacliveview) + * 5.2.10.3.2 [RQ.SRS-006.RBAC.LiveView.Create](#rqsrs-006rbacliveviewcreate) + * 5.2.10.3.3 [RQ.SRS-006.RBAC.LiveView.Select](#rqsrs-006rbacliveviewselect) + * 5.2.10.3.4 [RQ.SRS-006.RBAC.LiveView.Drop](#rqsrs-006rbacliveviewdrop) + * 5.2.10.3.5 [RQ.SRS-006.RBAC.LiveView.Refresh](#rqsrs-006rbacliveviewrefresh) + * 5.2.11 [Privileges](#privileges) + * 5.2.11.1 [RQ.SRS-006.RBAC.Privileges.Usage](#rqsrs-006rbacprivilegesusage) + * 5.2.11.2 [Select](#select) + * 5.2.11.2.1 [RQ.SRS-006.RBAC.Privileges.Select](#rqsrs-006rbacprivilegesselect) + * 5.2.11.2.2 [RQ.SRS-006.RBAC.Privileges.Select.Grant](#rqsrs-006rbacprivilegesselectgrant) + * 5.2.11.2.3 [RQ.SRS-006.RBAC.Privileges.Select.Revoke](#rqsrs-006rbacprivilegesselectrevoke) + * 5.2.11.2.4 [RQ.SRS-006.RBAC.Privileges.Select.Column](#rqsrs-006rbacprivilegesselectcolumn) + * 5.2.11.2.5 [RQ.SRS-006.RBAC.Privileges.Select.Cluster](#rqsrs-006rbacprivilegesselectcluster) + * 5.2.11.2.6 [RQ.SRS-006.RBAC.Privileges.Select.GrantOption](#rqsrs-006rbacprivilegesselectgrantoption) + * 5.2.11.2.7 [RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Grant](#rqsrs-006rbacprivilegesselectgrantoptiongrant) + * 5.2.11.2.8 [RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Revoke](#rqsrs-006rbacprivilegesselectgrantoptionrevoke) + * 5.2.11.2.9 [RQ.SRS-006.RBAC.Privileges.Select.TableEngines](#rqsrs-006rbacprivilegesselecttableengines) + * 5.2.11.3 [Insert](#insert) + * 5.2.11.3.1 [RQ.SRS-006.RBAC.Privileges.Insert](#rqsrs-006rbacprivilegesinsert) + * 5.2.11.3.2 [RQ.SRS-006.RBAC.Privileges.Insert.Grant](#rqsrs-006rbacprivilegesinsertgrant) + * 5.2.11.3.3 [RQ.SRS-006.RBAC.Privileges.Insert.Revoke](#rqsrs-006rbacprivilegesinsertrevoke) + * 5.2.11.3.4 [RQ.SRS-006.RBAC.Privileges.Insert.Column](#rqsrs-006rbacprivilegesinsertcolumn) + * 5.2.11.3.5 [RQ.SRS-006.RBAC.Privileges.Insert.Cluster](#rqsrs-006rbacprivilegesinsertcluster) + * 5.2.11.3.6 [RQ.SRS-006.RBAC.Privileges.Insert.GrantOption](#rqsrs-006rbacprivilegesinsertgrantoption) + * 5.2.11.3.7 [RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Grant](#rqsrs-006rbacprivilegesinsertgrantoptiongrant) + * 5.2.11.3.8 [RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Revoke](#rqsrs-006rbacprivilegesinsertgrantoptionrevoke) + * 5.2.11.3.9 [RQ.SRS-006.RBAC.Privileges.Insert.TableEngines](#rqsrs-006rbacprivilegesinserttableengines) + * 5.2.11.4 [AlterColumn](#altercolumn) + * 5.2.11.4.1 [RQ.SRS-006.RBAC.Privileges.AlterColumn](#rqsrs-006rbacprivilegesaltercolumn) + * 5.2.11.4.2 [RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant](#rqsrs-006rbacprivilegesaltercolumngrant) + * 5.2.11.4.3 [RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke](#rqsrs-006rbacprivilegesaltercolumnrevoke) + * 5.2.11.4.4 [RQ.SRS-006.RBAC.Privileges.AlterColumn.Column](#rqsrs-006rbacprivilegesaltercolumncolumn) + * 5.2.11.4.5 [RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster](#rqsrs-006rbacprivilegesaltercolumncluster) + * 5.2.11.4.6 [RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption](#rqsrs-006rbacprivilegesaltercolumngrantoption) + * 5.2.11.4.7 [RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Grant](#rqsrs-006rbacprivilegesaltercolumngrantoptiongrant) + * 5.2.11.4.8 [RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Revoke](#rqsrs-006rbacprivilegesaltercolumngrantoptionrevoke) + * 5.2.11.4.9 [RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines](#rqsrs-006rbacprivilegesaltercolumntableengines) + * 5.2.11.5 [AlterIndex](#alterindex) + * 5.2.11.5.1 [RQ.SRS-006.RBAC.Privileges.AlterIndex](#rqsrs-006rbacprivilegesalterindex) + * 5.2.11.5.2 [RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant](#rqsrs-006rbacprivilegesalterindexgrant) + * 5.2.11.5.3 [RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke](#rqsrs-006rbacprivilegesalterindexrevoke) + * 5.2.11.5.4 [RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster](#rqsrs-006rbacprivilegesalterindexcluster) + * 5.2.11.5.5 [RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption](#rqsrs-006rbacprivilegesalterindexgrantoption) + * 5.2.11.5.6 [RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Grant](#rqsrs-006rbacprivilegesalterindexgrantoptiongrant) + * 5.2.11.5.7 [RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Revoke](#rqsrs-006rbacprivilegesalterindexgrantoptionrevoke) + * 5.2.11.5.8 [RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines](#rqsrs-006rbacprivilegesalterindextableengines) + * 5.2.11.6 [AlterConstraint](#alterconstraint) + * 5.2.11.6.1 [RQ.SRS-006.RBAC.Privileges.AlterConstraint](#rqsrs-006rbacprivilegesalterconstraint) + * 5.2.11.6.2 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant](#rqsrs-006rbacprivilegesalterconstraintgrant) + * 5.2.11.6.3 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke](#rqsrs-006rbacprivilegesalterconstraintrevoke) + * 5.2.11.6.4 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster](#rqsrs-006rbacprivilegesalterconstraintcluster) + * 5.2.11.6.5 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption](#rqsrs-006rbacprivilegesalterconstraintgrantoption) + * 5.2.11.6.6 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Grant](#rqsrs-006rbacprivilegesalterconstraintgrantoptiongrant) + * 5.2.11.6.7 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Revoke](#rqsrs-006rbacprivilegesalterconstraintgrantoptionrevoke) + * 5.2.11.6.8 [RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines](#rqsrs-006rbacprivilegesalterconstrainttableengines) + * 5.2.11.7 [AlterTTL](#alterttl) + * 5.2.11.7.1 [RQ.SRS-006.RBAC.Privileges.AlterTTL](#rqsrs-006rbacprivilegesalterttl) + * 5.2.11.7.2 [RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant](#rqsrs-006rbacprivilegesalterttlgrant) + * 5.2.11.7.3 [RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke](#rqsrs-006rbacprivilegesalterttlrevoke) + * 5.2.11.7.4 [RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster](#rqsrs-006rbacprivilegesalterttlcluster) + * 5.2.11.7.5 [RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption](#rqsrs-006rbacprivilegesalterttlgrantoption) + * 5.2.11.7.6 [RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Grant](#rqsrs-006rbacprivilegesalterttlgrantoptiongrant) + * 5.2.11.7.7 [RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Revoke](#rqsrs-006rbacprivilegesalterttlgrantoptionrevoke) + * 5.2.11.7.8 [RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines](#rqsrs-006rbacprivilegesalterttltableengines) + * 5.2.11.8 [AlterSettings](#altersettings) + * 5.2.11.8.1 [RQ.SRS-006.RBAC.Privileges.AlterSettings](#rqsrs-006rbacprivilegesaltersettings) + * 5.2.11.8.2 [RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant](#rqsrs-006rbacprivilegesaltersettingsgrant) + * 5.2.11.8.3 [RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke](#rqsrs-006rbacprivilegesaltersettingsrevoke) + * 5.2.11.8.4 [RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster](#rqsrs-006rbacprivilegesaltersettingscluster) + * 5.2.11.8.5 [RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption](#rqsrs-006rbacprivilegesaltersettingsgrantoption) + * 5.2.11.8.6 [RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Grant](#rqsrs-006rbacprivilegesaltersettingsgrantoptiongrant) + * 5.2.11.8.7 [RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Revoke](#rqsrs-006rbacprivilegesaltersettingsgrantoptionrevoke) + * 5.2.11.8.8 [RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines](#rqsrs-006rbacprivilegesaltersettingstableengines) + * 5.2.11.9 [RQ.SRS-006.RBAC.Privileges.Delete](#rqsrs-006rbacprivilegesdelete) + * 5.2.11.10 [RQ.SRS-006.RBAC.Privileges.Alter](#rqsrs-006rbacprivilegesalter) + * 5.2.11.11 [RQ.SRS-006.RBAC.Privileges.Create](#rqsrs-006rbacprivilegescreate) + * 5.2.11.12 [RQ.SRS-006.RBAC.Privileges.Drop](#rqsrs-006rbacprivilegesdrop) + * 5.2.11.13 [RQ.SRS-006.RBAC.Privileges.All](#rqsrs-006rbacprivilegesall) + * 5.2.11.14 [RQ.SRS-006.RBAC.Privileges.All.GrantRevoke](#rqsrs-006rbacprivilegesallgrantrevoke) + * 5.2.11.15 [RQ.SRS-006.RBAC.Privileges.GrantOption](#rqsrs-006rbacprivilegesgrantoption) + * 5.2.11.16 [RQ.SRS-006.RBAC.Privileges.AdminOption](#rqsrs-006rbacprivilegesadminoption) + * 5.2.12 [Required Privileges](#required-privileges) + * 5.2.12.1 [RQ.SRS-006.RBAC.RequiredPrivileges.Create](#rqsrs-006rbacrequiredprivilegescreate) + * 5.2.12.2 [RQ.SRS-006.RBAC.RequiredPrivileges.Alter](#rqsrs-006rbacrequiredprivilegesalter) + * 5.2.12.3 [RQ.SRS-006.RBAC.RequiredPrivileges.Drop](#rqsrs-006rbacrequiredprivilegesdrop) + * 5.2.12.4 [RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table](#rqsrs-006rbacrequiredprivilegesdroptable) + * 5.2.12.5 [RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke](#rqsrs-006rbacrequiredprivilegesgrantrevoke) + * 5.2.12.6 [RQ.SRS-006.RBAC.RequiredPrivileges.Use](#rqsrs-006rbacrequiredprivilegesuse) + * 5.2.12.7 [RQ.SRS-006.RBAC.RequiredPrivileges.Admin](#rqsrs-006rbacrequiredprivilegesadmin) +* 6 [References](#references) + +## Revision History + +This document is stored in an electronic form using [Git] source control management software +hosted in a GitHub repository. + +All the updates are tracked using the [Git]'s revision history. + +* GitHub repository: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/rbac/requirements/requirements.md +* Revision history: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/rbac/requirements/requirements.md + +## Introduction + +[ClickHouse] currently has support for only basic access control. Users can be defined to allow +access to specific databases and dictionaries. A profile is used for the user +that can specify a read-only mode as well as a set of quotas that can limit user's resource +consumption. Beyond this basic functionality there is no way to control access rights within +a database. A user can either be denied access, have read-only rights or have complete access +to the whole database on the server. + +In many cases a more granular access control is needed where one can control user's access in +a much more granular approach. A typical solution to this problem in the **SQL** world +is provided by implementing **RBAC (role-based access control)**. +For example a version of **RBAC** is implemented by both [MySQL] and [PostgreSQL]. + +[ClickHouse] shall implement **RBAC** to meet the growing needs of its users. In order to minimize +the learning curve the concepts and the syntax of its implementation shall be +as close as possible to the [MySQL] and [PostgreSQL]. The goal is to allow for fast +transition of users which are already familiar with these features in those databases +to [ClickHouse]. + +## Terminology + +* **RBAC** - + role-based access control +* **quota** - + setting that limits specific resource consumption + +## Privilege Definitions + +* **usage** - + privilege to access a database or a table +* **select** - + privilege to read data from a database or a table +* **insert** + privilege to insert data into a database or a table +* **delete** + privilege to delete a database or a table +* **alter** + privilege to alter tables +* **create** + privilege to create a database or a table +* **drop** + privilege to drop a database or a table +* **all** + privilege that includes **usage**, **select**, + **insert**, **delete**, **alter**, **create**, and **drop** +* **grant option** + privilege to grant the same privilege to other users or roles +* **admin option** + privilege to perform administrative tasks are defined in the **system queries** + +## Requirements + +### Generic + +#### RQ.SRS-006.RBAC +version: 1.0 + +[ClickHouse] SHALL support role based access control. + +#### Login + +##### RQ.SRS-006.RBAC.Login +version: 1.0 + +[ClickHouse] SHALL only allow access to the server for a given +user only when correct username and password are used during +the connection to the server. + +##### RQ.SRS-006.RBAC.Login.DefaultUser +version: 1.0 + +[ClickHouse] SHALL use the **default user** when no username and password +are specified during the connection to the server. + +#### User + +##### RQ.SRS-006.RBAC.User +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of +one or more **user** accounts to which roles, privileges, +settings profile, quotas and row policies can be assigned. + +##### RQ.SRS-006.RBAC.User.Roles +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **roles** +to a **user**. + +##### RQ.SRS-006.RBAC.User.Privileges +version: 1.0 + +[ClickHouse] SHALL support assigning one or more privileges to a **user**. + +##### RQ.SRS-006.RBAC.User.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning one or more variables to a **user**. + +##### RQ.SRS-006.RBAC.User.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support assigning min, max and read-only constraints +for the variables that can be set and read by the **user**. + +##### RQ.SRS-006.RBAC.User.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **settings profiles** +to a **user**. + +##### RQ.SRS-006.RBAC.User.Quotas +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **quotas** to a **user**. + +##### RQ.SRS-006.RBAC.User.RowPolicies +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **row policies** to a **user**. + +##### RQ.SRS-006.RBAC.User.AccountLock +version: 1.0 + +[ClickHouse] SHALL support locking and unlocking of **user** accounts. + +##### RQ.SRS-006.RBAC.User.AccountLock.DenyAccess +version: 1.0 + +[ClickHouse] SHALL deny access to the user whose account is locked. + +##### RQ.SRS-006.RBAC.User.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support assigning a default role to a **user**. + +##### RQ.SRS-006.RBAC.User.RoleSelection +version: 1.0 + +[ClickHouse] SHALL support selection of one or more **roles** from the available roles +that are assigned to a **user**. + +##### RQ.SRS-006.RBAC.User.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **user** account was created. + +##### RQ.SRS-006.RBAC.User.ShowPrivileges +version: 1.0 + +[ClickHouse] SHALL support listing the privileges of the **user**. + +#### Role + +##### RQ.SRS-006.RBAC.Role +version: 1.0 + +[ClikHouse] SHALL support creation and manipulation of **roles** +to which privileges, settings profile, quotas and row policies can be +assigned. + +##### RQ.SRS-006.RBAC.Role.Privileges +version: 1.0 + +[ClickHouse] SHALL support assigning one or more privileges to a **role**. + +##### RQ.SRS-006.RBAC.Role.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning one or more variables to a **role**. + +##### RQ.SRS-006.RBAC.Role.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **settings profiles** +to a **role**. + +##### RQ.SRS-006.RBAC.Role.Quotas +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **quotas** to a **role**. + +##### RQ.SRS-006.RBAC.Role.RowPolicies +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **row policies** to a **role**. + +#### Partial Revokes + +##### RQ.SRS-006.RBAC.PartialRevokes +version: 1.0 + +[ClickHouse] SHALL support partial revoking of privileges granted +to a **user** or a **role**. + +#### Settings Profile + +##### RQ.SRS-006.RBAC.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of **settings profiles** +that can include value definition for one or more variables and can +can be assigned to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.SettingsProfile.Constraints +version: 1.0 + +[ClickHouse] SHALL support assigning min, max and read-only constraints +for the variables specified in the **settings profile**. + +##### RQ.SRS-006.RBAC.SettingsProfile.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **setting profile** was created. + +#### Quotas + +##### RQ.SRS-006.RBAC.Quotas +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of **quotas** +that can be used to limit resource usage by a **user** or a **role** +over a period of time. + +##### RQ.SRS-006.RBAC.Quotas.Keyed +version: 1.0 + +[ClickHouse] SHALL support creating **quotas** that are keyed +so that a quota is tracked separately for each key value. + +##### RQ.SRS-006.RBAC.Quotas.Queries +version: 1.0 + +[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests. + +##### RQ.SRS-006.RBAC.Quotas.Errors +version: 1.0 + +[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception. + +##### RQ.SRS-006.RBAC.Quotas.ResultRows +version: 1.0 + +[ClickHouse] SHALL support setting **result rows** quota to limit the +the total number of rows given as the result. + +##### RQ.SRS-006.RBAC.Quotas.ReadRows +version: 1.0 + +[ClickHouse] SHALL support setting **read rows** quota to limit the total +number of source rows read from tables for running the query on all remote servers. + +##### RQ.SRS-006.RBAC.Quotas.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support setting **result bytes** quota to limit the total number +of bytes that can be returned as the result. + +##### RQ.SRS-006.RBAC.Quotas.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support setting **read bytes** quota to limit the total number +of source bytes read from tables for running the query on all remote servers. + +##### RQ.SRS-006.RBAC.Quotas.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support setting **execution time** quota to limit the maximum +query execution time. + +##### RQ.SRS-006.RBAC.Quotas.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **quota** was created. + +#### Row Policy + +##### RQ.SRS-006.RBAC.RowPolicy +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of table **row policies** +that can be used to limit access to the table contents for a **user** or a **role** +using a specified **condition**. + +##### RQ.SRS-006.RBAC.RowPolicy.Condition +version: 1.0 + +[ClickHouse] SHALL support row policy **conditions** that can be any SQL +expression that returns a boolean. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **row policy** was created. + +### Specific + +##### RQ.SRS-006.RBAC.User.Use.DefaultRole +version: 1.0 + +[ClickHouse] SHALL by default use default role or roles assigned +to the user if specified. + +##### RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole +version: 1.0 + +[ClickHouse] SHALL by default use all the roles assigned to the user +if no default role or roles are specified for the user. + +##### RQ.SRS-006.RBAC.User.Create +version: 1.0 + +[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE USER` statement +to skip raising an exception if a user with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be +raised if a user with the same **name** already exists. + +##### RQ.SRS-006.RBAC.User.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement +to replace existing user account if already exists. + +##### RQ.SRS-006.RBAC.User.Create.Password.NoPassword +version: 1.0 + +[ClickHouse] SHALL support specifying no password when creating +user account using `IDENTIFIED WITH NO_PASSWORD` clause . + +##### RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login +version: 1.0 + +[ClickHouse] SHALL use no password for the user when connecting to the server +when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.PlainText +version: 1.0 + +[ClickHouse] SHALL support specifying plaintext password when creating +user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login +version: 1.0 + +[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server +when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause +and compare the password with the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server +when an account was created with `IDENTIFIED WITH SHA256_PASSWORD` or with 'IDENTIFIED BY' clause +and compare the calculated hash to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA256` of the already calculated hash passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH SHA256_HASH` clause +and compare the calculated hash to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA1 two times +to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA1` two times over the password passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause +and compare the calculated value to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA1 two times +to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA1` two times over the hash passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause +and compare the calculated value to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Name +version: 1.0 + +[ClickHouse] SHALL support specifying one or more hostnames from +which user can access the server using the `HOST NAME` clause +in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Regexp +version: 1.0 + +[ClickHouse] SHALL support specifying one or more regular expressions +to match hostnames from which user can access the server +using the `HOST REGEXP` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.IP +version: 1.0 + +[ClickHouse] SHALL support specifying one or more IP address or subnet from +which user can access the server using the `HOST IP` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Any +version: 1.0 + +[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement +to indicate that user can access the server from any host. + +##### RQ.SRS-006.RBAC.User.Create.Host.None +version: 1.0 + +[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Local +version: 1.0 + +[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Like +version: 1.0 + +[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the +`HOST LIKE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Default +version: 1.0 + +[ClickHouse] SHALL support user access to server from any host +if no `HOST` clause is specified in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support specifying one or more default roles +using `DEFAULT ROLE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole.None +version: 1.0 + +[ClickHouse] SHALL support specifying no default roles +using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support specifying all roles to be used as default +using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying settings and profile +using `SETTINGS` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which the user +will be created using `ON CLUSTER` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `CREATE USER` statement. + +```sql +CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] + [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...]] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.User.Alter +version: 1.0 + +[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation +version: 1.0 + +[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right +where things defined on the right override anything that was previously defined on +the left. + +##### RQ.SRS-006.RBAC.User.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement +to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist. If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist. + +##### RQ.SRS-006.RBAC.User.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support specifying the cluster the user is on +when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support specifying a new name for the user when +altering user account using `RENAME` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.PlainText +version: 1.0 + +[ClickHouse] SHALL support specifying plaintext password when altering +user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or +using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some password as identification when altering user account using +`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying Double SHA1 +to some password as identification when altering user account using +`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.AddDrop +version: 1.0 + +[ClickHouse] SHALL support altering user by adding and dropping access to hosts with the `ADD HOST` or the `DROP HOST`in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Local +version: 1.0 + +[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Name +version: 1.0 + +[ClickHouse] SHALL support specifying one or more hostnames from +which user can access the server using the `HOST NAME` clause +in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Regexp +version: 1.0 + +[ClickHouse] SHALL support specifying one or more regular expressions +to match hostnames from which user can access the server +using the `HOST REGEXP` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.IP +version: 1.0 + +[ClickHouse] SHALL support specifying one or more IP address or subnet from +which user can access the server using the `HOST IP` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Like +version: 1.0 + +[ClickHouse] SHALL support specifying sone or more similar hosts using `LIKE` command syntax using the `HOST LIKE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Any +version: 1.0 + +[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement +to indicate that user can access the server from any host. + +##### RQ.SRS-006.RBAC.User.Alter.Host.None +version: 1.0 + +[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support specifying one or more default roles +using `DEFAULT ROLE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support specifying all roles to be used as default +using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support specifying one or more roles which will not be used as default +using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying one or more variables +using `SETTINGS` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Min +version: 1.0 + +[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement. + + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Max +version: 1.0 + +[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Profile +version: 1.0 + +[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER USER` statement. + +```sql +ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] + [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.SetDefaultRole +version: 1.0 + +[ClickHouse] SHALL support setting or changing granted roles to default for one or more +users using `SET DEFAULT ROLE` statement which +SHALL permanently change the default roles for the user or users if successful. + +##### RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser +version: 1.0 + +[ClickHouse] SHALL support setting or changing granted roles to default for +the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support setting or changing all granted roles to default +for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support setting or changing all granted roles except those specified +to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.None +version: 1.0 + +[ClickHouse] SHALL support removing all granted roles from default +for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `SET DEFAULT ROLE` statement. + +```sql +SET DEFAULT ROLE + {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} + TO {user|CURRENT_USER} [,...] + +``` + +##### RQ.SRS-006.RBAC.SetRole +version: 1.0 + +[ClickHouse] SHALL support activating role or roles for the current user +using `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.Default +version: 1.0 + +[ClickHouse] SHALL support activating default roles for the current user +using `DEFAULT` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.None +version: 1.0 + +[ClickHouse] SHALL support activating no roles for the current user +using `NONE` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.All +version: 1.0 + +[ClickHouse] SHALL support activating all roles for the current user +using `ALL` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support activating all roles except those specified +for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.Syntax +version: 1.0 + +```sql +SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]} +``` + +##### RQ.SRS-006.RBAC.User.ShowCreateUser +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object +using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument. + +##### RQ.SRS-006.RBAC.User.ShowCreateUser.For +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object +using the `FOR` clause in the `SHOW CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax +version: 1.0 + +[ClickHouse] SHALL support showing the following syntax for `SHOW CREATE USER` statement. + +```sql +SHOW CREATE USER [name | CURRENT_USER] +``` + +##### RQ.SRS-006.RBAC.User.Drop +version: 1.0 + +[ClickHouse] SHALL support removing a user account using `DROP USER` statement. + +##### RQ.SRS-006.RBAC.User.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP USER` statement +to skip raising an exception if the user account does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a user does not exist. + +##### RQ.SRS-006.RBAC.User.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement +to specify the name of the cluster the user should be dropped from. + +##### RQ.SRS-006.RBAC.User.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `DROP USER` statement + +```sql +DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.Role.Create +version: 1.0 + +[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROLE` statement +to raising an exception if a role with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be +raised if a role with the same **name** already exists. + +##### RQ.SRS-006.RBAC.Role.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement +to replace existing role if it already exists. + +##### RQ.SRS-006.RBAC.Role.Create.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying settings and profile using `SETTINGS` +clause in the `CREATE ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE ROLE` statement + +``` sql +CREATE ROLE [IF NOT EXISTS | OR REPLACE] name + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.Role.Alter +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception +will be thrown if the role does not exist. + +##### RQ.SRS-006.RBAC.Role.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the +cluster location of the specified role. + +##### RQ.SRS-006.RBAC.Role.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the +role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the +`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change. + +##### RQ.SRS-006.RBAC.Role.Alter.Settings +version: 1.0 + +[ClickHouse] SHALL support altering the settings of one **role** using `ALTER ROLE role SETTINGS ...` statement. +Altering variable values, creating max and min values, specifying readonly or writable, and specifying the +profiles for which this alter change shall be applied to, are all supported, using the following syntax. + +```sql +[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +One or more variables and profiles may be specified as shown above. + +##### RQ.SRS-006.RBAC.Role.Alter.Syntax +version: 1.0 + +```sql +ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.Role.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP ROLE` statement +to skip raising an exception if the role does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a role does not exist. + +##### RQ.SRS-006.RBAC.Role.Drop.Cluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role. + +##### RQ.SRS-006.RBAC.Role.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP ROLE` statement + +``` sql +DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.Role.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE` +statement. + +##### RQ.SRS-006.RBAC.Role.ShowCreate.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `SHOW CREATE ROLE` command. + +```sql +SHOW CREATE ROLE name +``` + +##### RQ.SRS-006.RBAC.Grant.Privilege.To +version: 1.0 + +[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause +in the `GRANT PRIVILEGE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser +version: 1.0 + +[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause +in the `GRANT PRIVILEGE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Select +version: 1.0 + +[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles +for a database or a table using the `GRANT SELECT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Insert +version: 1.0 + +[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles +for a database or a table using the `GRANT INSERT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Alter +version: 1.0 + +[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles +for a database or a table using the `GRANT ALTER` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Create +version: 1.0 + +[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles +for a database or a table using the `GRANT CREATE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Drop +version: 1.0 + +[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles +for a database or a table using the `GRANT DROP` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Truncate +version: 1.0 + +[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles +for a database or a table using `GRANT TRUNCATE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Optimize +version: 1.0 + +[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles +for a database or a table using `GRANT OPTIMIZE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Show +version: 1.0 + +[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles +for a database or a table using `GRANT SHOW` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.KillQuery +version: 1.0 + +[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles +for a database or a table using `GRANT KILL QUERY` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement +version: 1.0 + +[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles +for a database or a table using `GRANT ACCESS MANAGEMENT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.System +version: 1.0 + +[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles +for a database or a table using `GRANT SYSTEM` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Introspection +version: 1.0 + +[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles +for a database or a table using `GRANT INTROSPECTION` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Sources +version: 1.0 + +[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles +for a database or a table using `GRANT SOURCES` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.DictGet +version: 1.0 + +[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles +for a database or a table using `GRANT dictGet` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.None +version: 1.0 + +[ClickHouse] SHALL support granting no privileges to one or more users or roles +for a database or a table using `GRANT NONE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.All +version: 1.0 + +[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles +for a database or a table using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements. + +##### RQ.SRS-006.RBAC.Grant.Privilege.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles +for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.On +version: 1.0 + +[ClickHouse] SHALL support the `ON` clause in the `GRANT` privilege statement +which SHALL allow to specify one or more tables to which the privilege SHALL +be granted using the following patterns + +* `*.*` any table in any database +* `database.*` any table in the specified database +* `database.table` specific table in the specified database +* `*` any table in the current database +* `table` specific table in the current database + +##### RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns +version: 1.0 + +[ClickHouse] SHALL support granting the privilege **some_privilege** to one or more users or roles +for a database or a table using the `GRANT some_privilege(column)` statement for one column. +Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement. +The privileges will be granted for only the specified columns. + +##### RQ.SRS-006.RBAC.Grant.Privilege.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER` +clause in the `GRANT PRIVILEGE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `GRANT` statement that +grants explicit privileges to a user or a role. + +```sql +GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...] + ON {db.table|db.*|*.*|table|*} + TO {user | role | CURRENT_USER} [,...] + [WITH GRANT OPTION] +``` + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Cluster +version: 1.0 + +[ClickHouse] SHALL support revoking privileges to one or more users or roles +for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Any +version: 1.0 + +[ClickHouse] SHALL support revoking ANY privilege to one or more users or roles +for a database or a table using the `REVOKE some_privilege` statement. +**some_privilege** refers to any Clickhouse defined privilege, whose hierarchy includes +SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT, +SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Select +version: 1.0 + +[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles +for a database or a table using the `REVOKE SELECT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Insert +version: 1.0 + +[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles +for a database or a table using the `REVOKE INSERT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Alter +version: 1.0 + +[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles +for a database or a table using the `REVOKE ALTER` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Create +version: 1.0 + +[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles +for a database or a table using the `REVOKE CREATE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Drop +version: 1.0 + +[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles +for a database or a table using the `REVOKE DROP` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Truncate +version: 1.0 + +[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles +for a database or a table using the `REVOKE TRUNCATE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Optimize +version: 1.0 + +[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles +for a database or a table using the `REVOKE OPTIMIZE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Show +version: 1.0 + +[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles +for a database or a table using the `REVOKE SHOW` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery +version: 1.0 + +[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles +for a database or a table using the `REVOKE KILL QUERY` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement +version: 1.0 + +[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles +for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.System +version: 1.0 + +[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles +for a database or a table using the `REVOKE SYSTEM` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Introspection +version: 1.0 + +[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles +for a database or a table using the `REVOKE INTROSPECTION` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Sources +version: 1.0 + +[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles +for a database or a table using the `REVOKE SOURCES` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.DictGet +version: 1.0 + +[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles +for a database or a table using the `REVOKE dictGet` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns +version: 1.0 + +[ClickHouse] SHALL support revoking the privilege **some_privilege** to one or more users or roles +for a database or a table using the `REVOKE some_privilege(column)` statement for one column. +Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement. +The privileges will be revoked for only the specified columns. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Multiple +version: 1.0 + +[ClickHouse] SHALL support revoking MULTIPLE **privileges** to one or more users or roles +for a database or a table using the `REVOKE privilege1, privilege2...` statement. +**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes +SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT, +SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.All +version: 1.0 + +[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles +for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.None +version: 1.0 + +[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles +for a database or a table using the `REVOKE NONE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.On +version: 1.0 + +[ClickHouse] SHALL support the `ON` clause in the `REVOKE` privilege statement +which SHALL allow to specify one or more tables to which the privilege SHALL +be revoked using the following patterns + +* `db.table` specific table in the specified database +* `db.*` any table in the specified database +* `*.*` any table in any database +* `table` specific table in the current database +* `*` any table in the current database + +##### RQ.SRS-006.RBAC.Revoke.Privilege.From +version: 1.0 + +[ClickHouse] SHALL support the `FROM` clause in the `REVOKE` privilege statement +which SHALL allow to specify one or more users to which the privilege SHALL +be revoked using the following patterns + +* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user +* `ALL` all users +* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `REVOKE` statement that +revokes explicit privileges of a user or a role. + +```sql +REVOKE [ON CLUSTER cluster_name] privilege + [(column_name [,...])] [,...] + ON {db.table|db.*|*.*|table|*} + FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...] +``` + +##### RQ.SRS-006.RBAC.PartialRevoke.Syntax +version: 1.0 + +[ClickHouse] SHALL support partial revokes by using `partial_revokes` variable +that can be set or unset using the following syntax. + +To disable partial revokes the `partial_revokes` variable SHALL be set to `0` + +```sql +SET partial_revokes = 0 +``` + +To enable partial revokes the `partial revokes` variable SHALL be set to `1` + +```sql +SET partial_revokes = 1 +``` + +##### RQ.SRS-006.RBAC.Grant.Role +version: 1.0 + +[ClickHouse] SHALL support granting one or more roles to +one or more users or roles using the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.CurrentUser +version: 1.0 + +[ClickHouse] SHALL support granting one or more roles to current user using +`TO CURRENT_USER` clause in the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.AdminOption +version: 1.0 + +[ClickHouse] SHALL support granting `admin option` privilege +to one or more users or roles using the `WITH ADMIN OPTION` clause +in the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles +using `ON CLUSTER` clause in the `GRANT` statement. + +##### RQ.SRS-006.RBAC.Grant.Role.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `GRANT` role statement + +``` sql +GRANT + ON CLUSTER cluster_name + role [, role ...] + TO {user | role | CURRENT_USER} [,...] + [WITH ADMIN OPTION] +``` + +##### RQ.SRS-006.RBAC.Revoke.Role +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +one or more users or roles using the `REVOKE` role statement. + +##### RQ.SRS-006.RBAC.Revoke.Role.Keywords +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`, +and `CURRENT_USER` keywords. + +##### RQ.SRS-006.RBAC.Revoke.Role.Cluster +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +one or more users or roles from one or more clusters +using the `REVOKE ON CLUSTER` role statement. + +##### RQ.SRS-006.RBAC.Revoke.AdminOption +version: 1.0 + +[ClickHouse] SHALL support revoking `admin option` privilege +in one or more users or roles using the `ADMIN OPTION FOR` clause +in the `REVOKE` role statement. + +##### RQ.SRS-006.RBAC.Revoke.Role.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `REVOKE` role statement + +```sql +REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] + role [,...] + FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] +``` + +##### RQ.SRS-006.RBAC.Show.Grants +version: 1.0 + +[ClickHouse] SHALL support listing all the privileges granted to current user and role +using the `SHOW GRANTS` statement. + +##### RQ.SRS-006.RBAC.Show.Grants.For +version: 1.0 + +[ClickHouse] SHALL support listing all the privileges granted to a user or a role +using the `FOR` clause in the `SHOW GRANTS` statement. + +##### RQ.SRS-006.RBAC.Show.Grants.Syntax +version: 1.0 + +[Clickhouse] SHALL use the following syntax for the `SHOW GRANTS` statement + +``` sql +SHOW GRANTS [FOR user_or_role] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Create +version: 1.0 + +[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE SETTINGS PROFILE` statement +to skip raising an exception if a settings profile with the same **name** already exists. +If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a settings profile with the same **name** already exists. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement +to replace existing settings profile if it already exists. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning values and constraints to one or more +variables in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value +version: 1.0 + +[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE` +constraints for the variables in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to one or more users +or roles in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to no users or roles using +`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to all current users and roles +using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment to one or more users or roles using +the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit +version: 1.0 + +[ClickHouse] SHALL support inheriting profile settings from indicated profile using +the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying what cluster to create settings profile on +using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE SETTINGS PROFILE` statement. + +``` sql +CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name + [ON CLUSTER cluster_name] + [SET varname [= value] [MIN min] [MAX max] [READONLY|WRITABLE] | [INHERIT 'profile_name'] [,...]] + [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter +version: 1.0 + +[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER SETTINGS PROFILE` statement +to not raise exception if a settings profile does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a settings profile does not exist. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause +in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables +version: 1.0 + +[ClickHouse] SHALL support altering values and constraints of one or more +variables in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value +version: 1.0 + +[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE` +constraints for the variables in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to one or more users +or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to no users or roles using the +`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to all current users and roles +using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment to one or more users or roles using +the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit +version: 1.0 + +[ClickHouse] SHALL support altering the settings profile by inheriting settings from +specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster +version: 1.0 + +[ClickHouse] SHALL support altering the settings profile on a specified cluster using +`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER SETTINGS PROFILE` statement. + +``` sql +ALTER SETTINGS PROFILE [IF EXISTS] name + [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] + [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]} +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP SETTINGS PROFILE` statement +to skip raising an exception if the settings profile does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a settings profile does not exist. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using +`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP SETTINGS PROFILE` statement + +``` sql +DROP SETTINGS PROFILE [IF EXISTS] name [,name,...] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE SETTINGS PROFILE` statement used to create the settings profile +using the `SHOW CREATE SETTINGS PROFILE` statement with the following syntax + +``` sql +SHOW CREATE SETTINGS PROFILE name +``` + +##### RQ.SRS-006.RBAC.Quota.Create +version: 1.0 + +[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE QUOTA` statement +to skip raising an exception if a quota with the same **name** already exists. +If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a quota with the same **name** already exists. + +##### RQ.SRS-006.RBAC.Quota.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement +to replace existing quota if it already exists. + +##### RQ.SRS-006.RBAC.Quota.Create.Cluster +version: 1.0 + +[ClickHouse] SHALL support creating quotas on a specific cluster with the +`ON CLUSTER` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Interval +version: 1.0 + +[ClickHouse] SHALL support defining the quota interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR INTERVAL` clause in the `CREATE QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number +to define the interval. + + +##### RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized +version: 1.0 + +[ClickHouse] SHALL support defining the quota randomized interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR RANDOMIZED INTERVAL` clause in the `CREATE QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some +real number to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Create.Queries +version: 1.0 + +[ClickHouse] SHALL support limiting number of requests over a period of time +using the `QUERIES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Errors +version: 1.0 + +[ClickHouse] SHALL support limiting number of queries that threw an exception +using the `ERRORS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ResultRows +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of rows given as the result +using the `RESULT ROWS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ReadRows +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of source rows read from tables +for running the query on all remote servers +using the `READ ROWS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result +using the `RESULT BYTES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of source bytes read from tables +for running the query on all remote servers +using the `READ BYTES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.NoLimits +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `NO LIMITS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.TrackingOnly +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.KeyedBy +version: 1.0 + +[ClickHouse] SHALL support to track quota for some key +following the `KEYED BY` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions +version: 1.0 + +[ClickHouse] SHALL support to track quota separately for some parameter +using the `KEYED BY 'parameter'` clause in the `CREATE QUOTA` statement. + +'parameter' can be one of: +`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}` + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning quota to one or more users +or roles using the `TO` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning quota to no users or roles using +`TO NONE` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning quota to all current users and roles +using `TO ALL` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.Except +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using +the `EXCEPT` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE QUOTA` statement + +```sql +CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | + NO LIMITS | TRACKING ONLY} [,...]] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.Quota.Alter +version: 1.0 + +[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER QUOTA` statement +to skip raising an exception if a quota does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be raised if +a quota does not exist. + +##### RQ.SRS-006.RBAC.Quota.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement +to rename the quota to the specified name. + +##### RQ.SRS-006.RBAC.Quota.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support altering quotas on a specific cluster with the +`ON CLUSTER` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Interval +version: 1.0 + +[ClickHouse] SHALL support redefining the quota interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR INTERVAL` clause in the `ALTER QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number +to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized +version: 1.0 + +[ClickHouse] SHALL support redefining the quota randomized interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR RANDOMIZED INTERVAL` clause in the `ALTER QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some +real number to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Alter.Queries +version: 1.0 + +[ClickHouse] SHALL support altering the limit of number of requests over a period of time +using the `QUERIES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Errors +version: 1.0 + +[ClickHouse] SHALL support altering the limit of number of queries that threw an exception +using the `ERRORS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ResultRows +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of rows given as the result +using the `RESULT ROWS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ReadRows +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables +for running the query on all remote servers +using the `READ ROWS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.ALter.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result +using the `RESULT BYTES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables +for running the query on all remote servers +using the `READ BYTES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the maximum query execution time +using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.NoLimits +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `NO LIMITS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.KeyedBy +version: 1.0 + +[ClickHouse] SHALL support altering quota to track quota separately for some key +following the `KEYED BY` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions +version: 1.0 + +[ClickHouse] SHALL support altering quota to track quota separately for some parameter +using the `KEYED BY 'parameter'` clause in the `ALTER QUOTA` statement. + +'parameter' can be one of: +`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}` + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to one or more users +or roles using the `TO` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to no users or roles using +`TO NONE` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to all current users and roles +using `TO ALL` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using +the `EXCEPT` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER QUOTA` statement + +``` sql +ALTER QUOTA [IF EXIST] name + {{{QUERIES | ERRORS | RESULT ROWS | READ ROWS | RESULT BYTES | READ BYTES | EXECUTION TIME} number} [, ...] FOR INTERVAL number time_unit} [, ...] + [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY] + [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]] +``` + +##### RQ.SRS-006.RBAC.Quota.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP QUOTA` statement +to skip raising an exception when the quota does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if the quota does not exist. + +##### RQ.SRS-006.RBAC.Quota.Drop.Cluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement +to indicate the cluster the quota to be dropped is located on. + +##### RQ.SRS-006.RBAC.Quota.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP QUOTA` statement + +``` sql +DROP QUOTA [IF EXISTS] name [,name...] +``` + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas +version: 1.0 + +[ClickHouse] SHALL support showing all of the current quotas +using the `SHOW QUOTAS` statement with the following syntax + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile +version: 1.0 + +[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal. + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Format +version: 1.0 + +[ClickHouse] SHALL support the `FORMAT` clause in the `SHOW QUOTAS` statement to define a format for the output quota list. + +The types of valid formats are many, listed in output column: +https://clickhouse.tech/docs/en/interfaces/formats/ + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings +version: 1.0 + +[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas. + + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax +version: 1.0 + +[ClickHouse] SHALL support using the `SHOW QUOTAS` statement +with the following syntax +``` sql +SHOW QUOTAS +``` +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the quota with some given name +using the `SHOW CREATE QUOTA` statement with the following syntax + +``` sql +SHOW CREATE QUOTA name +``` + +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota +using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form +`SHOW CREATE QUOTA` + +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax when +using the `SHOW CREATE QUOTA` statement. + +```sql +SHOW CREATE QUOTA [name | CURRENT] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Create +version: 1.0 + +[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROW POLICY` statement +to skip raising an exception if a row policy with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a row policy with the same **name** already exists. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement +to replace existing row policy if it already exists. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to create the role policy +using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.On +version: 1.0 + +[ClickHouse] SHALL support specifying table on which to create the role policy +using the `ON` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access +version: 1.0 + +[ClickHouse] SHALL support allowing or restricting access to rows using the +`AS` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive +version: 1.0 + +[ClickHouse] SHALL support allowing access to rows using the +`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive +version: 1.0 + +[ClickHouse] SHALL support restricting access to rows using the +`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect +version: 1.0 + +[ClickHouse] SHALL support specifying which rows are affected +using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement. +REQUIRES CONFIRMATION + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Condition +version: 1.0 + +[ClickHouse] SHALL support specifying a condition that +that can be any SQL expression which returns a boolean using the `USING` +clause in the `CREATE ROW POLOCY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to one or more users +or roles using the `TO` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to no users or roles using +the `TO NONE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to all current users and roles +using `TO ALL` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using +the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CRETE ROW POLICY` statement + +``` sql +CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING condition] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Alter +version: 1.0 + +[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support the `IF EXISTS` clause in the `ALTER ROW POLICY` statement +to skip raising an exception if a row policy does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be raised if +a row policy does not exist. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect +version: 1.0 + +[ClickHouse] SHALL support modifying rows on which to apply the row policy +using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement. +REQUIRES FUNCTION CONFIRMATION. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to alter the row policy +using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.On +version: 1.0 + +[ClickHouse] SHALL support specifying table on which to alter the row policy +using the `ON` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause +in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access +version: 1.0 + +[ClickHouse] SHALL support altering access to rows using the +`AS` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive +version: 1.0 + +[ClickHouse] SHALL support permitting access to rows using the +`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive +version: 1.0 + +[ClickHouse] SHALL support restricting access to rows using the +`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Condition +version: 1.0 + +[ClickHouse] SHALL support re-specifying the row policy condition +using the `USING` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None +version: 1.0 + +[ClickHouse] SHALL support removing the row policy condition +using the `USING NONE` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to one or more users +or roles using the `TO` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to no users or roles using +the `TO NONE` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to all current users and roles +using the `TO ALL` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using +the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER ROW POLICY` statement + +``` sql +ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table + [RENAME TO new_name] + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING {condition | NONE}][,...] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using the `IF EXISTS` clause in the `DROP ROW POLICY` statement +to skip raising an exception when the row policy does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if the row policy does not exist. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.On +version: 1.0 + +[ClickHouse] SHALL support removing row policy from one or more specified tables +using the `ON` clause in the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support removing row policy from specified cluster +using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP ROW POLICY` statement. + +``` sql +DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy +using the `SHOW CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On +version: 1.0 + +[ClickHouse] SHALL support showing statement used to create row policy on specific table +using the `ON` in the `SHOW CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `SHOW CREATE ROW POLICY`. + +``` sql +SHOW CREATE [ROW] POLICY name ON [database.]table +``` + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies +version: 1.0 + +[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On +version: 1.0 + +[ClickHouse] SHALL support showing row policies on a specific table +using the `ON` clause in the `SHOW ROW POLICIES` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `SHOW ROW POLICIES`. + +```sql +SHOW [ROW] POLICIES [ON [database.]table] +``` + +#### Table Privileges + +##### RQ.SRS-006.RBAC.Table.PublicTables +version: 1.0 + +[ClickHouse] SHALL support that a user without any privileges will be able to access the following tables + +* system.one +* system.numbers +* system.contributors +* system.functions + +##### RQ.SRS-006.RBAC.Table.ShowTables +version: 1.0 + +[ClickHouse] SHALL add a table to the list of tables accessible through `SHOW TABLES` by a user if and only if +that user has any privilege on that table, either directly or through a role. + +##### Distributed Tables + +###### RQ.SRS-006.RBAC.Table.DistributedTable.Create +version: 1.0 + +[ClickHouse] SHALL successfully `CREATE` a distributed table if and only if +the user has **create table** privilege on the table and **remote** privilege on *.*. + +###### RQ.SRS-006.RBAC.Table.DistributedTable.Select +version: 1.0 + +[ClickHouse] SHALL successfully `SELECT` from a distributed table if and only if +the user has **select** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table. + +Does not require **select** privilege for the remote table if the remote table does not exist on the same server as the user. + +###### RQ.SRS-006.RBAC.Table.DistributedTable.Insert +version: 1.0 + +[ClickHouse] SHALL successfully `INSERT` into a distributed table if and only if +the user has **insert** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table. + +Does not require **insert** privilege for the remote table if the remote table does not exist on the same server as the user, +insert executes into the remote table on a different server. + +###### RQ.SRS-006.RBAC.Table.DistributedTable.SpecialTables +version: 1.0 + +[ClickHouse] SHALL successfully execute a query using a distributed table that uses one of the special tables if and only if +the user has the necessary privileges to interact with that special table, either granted directly or through a role. +Special tables include: +* materialized view +* distributed table +* source table of a materialized view + +###### RQ.SRS-006.RBAC.Table.DistributedTable.LocalUser +version: 1.0 + +[ClickHouse] SHALL successfully execute a query using a distributed table from +a user present locally, but not remotely. + +###### RQ.SRS-006.RBAC.Table.DistributedTable.SameUserDifferentNodesDifferentPrivileges +version: 1.0 + +[ClickHouse] SHALL successfully execute a query using a distributed table by a user that exists on multiple nodes +if and only if the user has the required privileges on the node the query is being executed from. + +#### Views + +##### View + +###### RQ.SRS-006.RBAC.View +version: 1.0 + +[ClickHouse] SHALL support controlling access to **create**, **select** and **drop** +privileges for a view for users or roles. + +###### RQ.SRS-006.RBAC.View.Create +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `CREATE VIEW` command if and only if +the user has **create view** privilege either explicitly or through roles. + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE VIEW view AS SELECT * FROM source_table +CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column +CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 +``` + +###### RQ.SRS-006.RBAC.View.Select +version: 1.0 + +[ClickHouse] SHALL only successfully `SELECT` from a view if and only if +the user has **select** privilege for that view either explicitly or through a role. + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE VIEW view AS SELECT * FROM source_table +CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column +CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 + +SELECT * FROM view +``` + +###### RQ.SRS-006.RBAC.View.Drop +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if +the user has **drop view** privilege on that view either explicitly or through a role. + +##### Materialized View + +###### RQ.SRS-006.RBAC.MaterializedView +version: 1.0 + +[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop** +privileges for a materialized view for users or roles. + +###### RQ.SRS-006.RBAC.MaterializedView.Create +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `CREATE MATERIALIZED VIEW` command if and only if +the user has **create view** privilege either explicitly or through roles. + +If `POPULATE` is specified, the user must have `INSERT` privilege on the view, +either explicitly or through roles. +For example, +```sql +CREATE MATERIALIZED VIEW view ENGINE = Memory POPULATE AS SELECT * FROM source_table +``` + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 +``` + +If the materialized view has a target table explicitly declared in the `TO` clause, the user must have +**insert** and **select** privilege on the target table. +For example, +```sql +CREATE MATERIALIZED VIEW view TO target_table AS SELECT * FROM source_table +``` + +###### RQ.SRS-006.RBAC.MaterializedView.Select +version: 1.0 + +[ClickHouse] SHALL only successfully `SELECT` from a materialized view if and only if +the user has **select** privilege for that view either explicitly or through a role. + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 + +SELECT * FROM view +``` + +###### RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable +version: 1.0 + +[ClickHouse] SHALL only successfully `SELECT` from the target table, implicit or explicit, of a materialized view if and only if +the user has `SELECT` privilege for the table, either explicitly or through a role. + +###### RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable +version: 1.0 + +[ClickHouse] SHALL only successfully `SELECT` from the source table of a materialized view if and only if +the user has `SELECT` privilege for the table, either explicitly or through a role. + +###### RQ.SRS-006.RBAC.MaterializedView.Drop +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if +the user has **drop view** privilege on that view either explicitly or through a role. + +###### RQ.SRS-006.RBAC.MaterializedView.ModifyQuery +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `MODIFY QUERY` command if and only if +the user has **modify query** privilege on that view either explicitly or through a role. + +If the new query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +ALTER TABLE view MODIFY QUERY SELECT * FROM source_table +``` + +###### RQ.SRS-006.RBAC.MaterializedView.Insert +version: 1.0 + +[ClickHouse] SHALL only succesfully `INSERT` into a materialized view if and only if +the user has `INSERT` privilege on the view, either explicitly or through a role. + +###### RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable +version: 1.0 + +[ClickHouse] SHALL only succesfully `INSERT` into a source table of a materialized view if and only if +the user has `INSERT` privilege on the source table, either explicitly or through a role. + +###### RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable +version: 1.0 + +[ClickHouse] SHALL only succesfully `INSERT` into a target table of a materialized view if and only if +the user has `INSERT` privelege on the target table, either explicitly or through a role. + +##### Live View + +###### RQ.SRS-006.RBAC.LiveView +version: 1.0 + +[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop** +privileges for a live view for users or roles. + +###### RQ.SRS-006.RBAC.LiveView.Create +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `CREATE LIVE VIEW` command if and only if +the user has **create view** privilege either explicitly or through roles. + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE LIVE VIEW view AS SELECT * FROM source_table +CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column +CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 +``` + +###### RQ.SRS-006.RBAC.LiveView.Select +version: 1.0 + +[ClickHouse] SHALL only successfully `SELECT` from a live view if and only if +the user has **select** privilege for that view either explicitly or through a role. + +If the stored query includes one or more source tables, the user must have **select** privilege +on all the source tables either explicitly or through a role. +For example, +```sql +CREATE LIVE VIEW view AS SELECT * FROM source_table +CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression)) +CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column +CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2 +CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression)) +CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2 + +SELECT * FROM view +``` + +###### RQ.SRS-006.RBAC.LiveView.Drop +version: 1.0 + +[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if +the user has **drop view** privilege on that view either explicitly or through a role. + +###### RQ.SRS-006.RBAC.LiveView.Refresh +version: 1.0 + +[ClickHouse] SHALL only successfully execute an `ALTER LIVE VIEW REFRESH` command if and only if +the user has **refresh** privilege on that view either explicitly or through a role. + +#### Privileges + +##### RQ.SRS-006.RBAC.Privileges.Usage +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **usage** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### Select + +###### RQ.SRS-006.RBAC.Privileges.Select +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **select** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `SELECT INTO` statements SHALL not to be executed, unless the user +has the **select** privilege for the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.Select.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **select** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.Select.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **select** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.Select.Column +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **select** privilege +for one or more specified columns in a table to one or more **users** or **roles**. +Any `SELECT INTO` statements SHALL not to be executed, unless the user +has the **select** privilege for the destination column +either because of the explicit grant or through one of the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.Select.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **select** privilege +on a specified cluster to one or more **users** or **roles**. +Any `SELECT INTO` statements SHALL succeed only on nodes where +the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.Select.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **select** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **select** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **select** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. Any `SELECT INTO` statements SHALL succeed +when done by a user with privilege granted by a user with `GRANT OPTION`, +either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **select** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. Any `SELECT INTO` statements SHALL fail +when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.Select.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **select** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### Insert + +###### RQ.SRS-006.RBAC.Privileges.Insert +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **insert** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `INSERT INTO` statements SHALL not to be executed, unless the user +has the **insert** privilege for the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.Insert.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **insert** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.Insert.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **insert** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.Insert.Column +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **insert** privilege +for one or more specified columns in a table to one or more **users** or **roles**. +Any `INSERT INTO` statements SHALL not to be executed, unless the user +has the **insert** privilege for the destination column +either because of the explicit grant or through one of the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.Insert.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **insert** privilege +on a specified cluster to one or more **users** or **roles**. +Any `INSERT INTO` statements SHALL succeed only on nodes where +the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.Insert.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **insert** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **insert** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **insert** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. Any `INSERT INTO` statements SHALL succeed +when done by a user with privilege granted by a user with `GRANT OPTION`, +either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **insert** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. Any `INSERT INTO` statements SHALL fail +when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.Insert.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **insert** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### AlterColumn + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **AlterColumn** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL +return an error, unless the user has the **alter column** privilege for +the destination table either because of the explicit grant or through one of +the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **alter column** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **alter column** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.Column +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter column** privilege +for one or more specified columns in a table to one or more **users** or **roles**. +Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL return an error, + unless the user has the **alter column** privilege for the destination column +either because of the explicit grant or through one of the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter column** privilege +on a specified cluster to one or more **users** or **roles**. +Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` +statements SHALL succeed only on nodes where the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **alter column** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **alter column** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **alter column** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL +succeed when done by a user with privilege granted by a user with +`GRANT OPTION`, either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **alter column** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL fail +when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter column** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### AlterIndex + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter index** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements SHALL +return an error, unless the user has the **alter index** privilege for +the destination table either because of the explicit grant or through one of +the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **alter index** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **alter index** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter index** privilege +on a specified cluster to one or more **users** or **roles**. +Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` +statements SHALL succeed only on nodes where the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **alter index** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **alter index** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **alter index** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements SHALL +succeed when done by a user with privilege granted by a user with +`GRANT OPTION`, either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **alter index** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements +SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter index** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### AlterConstraint + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter constraint** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `ALTER TABLE ... ADD|DROP CONSTRAINT` statements SHALL +return an error, unless the user has the **alter constraint** privilege for +the destination table either because of the explicit grant or through one of +the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **alter constraint** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **alter constraint** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter constraint** privilege +on a specified cluster to one or more **users** or **roles**. +Any `ALTER TABLE ... ADD|DROP CONSTRAINT` +statements SHALL succeed only on nodes where the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **alter constraint** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **alter constraint** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **alter constraint** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ADD|DROP CONSTRAINT` statements SHALL +succeed when done by a user with privilege granted by a user with +`GRANT OPTION`, either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **alter constraint** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ADD|DROP CONSTRAINT` statements +SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter constraint** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### AlterTTL + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements SHALL +return an error, unless the user has the **alter ttl** or **alter materialize ttl** privilege for +the destination table either because of the explicit grant or through one of +the roles assigned to the user. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **alter ttl** or **alter materialize ttl** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **alter ttl** or **alter materialize ttl** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter ttl** or **alter materialize ttl** privilege +on a specified cluster to one or more **users** or **roles**. +Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` +statements SHALL succeed only on nodes where the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **alter ttl** or **alter materialize ttl** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **alter ttl** or **alter materialize ttl** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **alter ttl** or **alter materialize ttl** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements SHALL +succeed when done by a user with privilege granted by a user with +`GRANT OPTION`, either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **alter ttl** or **alter materialize ttl** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements +SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege +on tables created using the following engines + +* MergeTree + +##### AlterSettings + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter settings** privilege +for a database or a specific table to one or more **users** or **roles**. +Any `ALTER TABLE ... MODIFY SETTING setting` statements SHALL +return an error, unless the user has the **alter settings** privilege for +the destination table either because of the explicit grant or through one of +the roles assigned to the user. The **alter settings** privilege allows +modifying table engine settings. It doesn’t affect settings or server configuration parameters. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant +version: 1.0 + +[ClickHouse] SHALL support granting **alter settings** privilege +for a database or a specific table to one or more **users** or **roles**. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke +version: 1.0 + +[ClickHouse] SHALL support revoking **alter settings** privilege +for a database or a specific table to one or more **users** or **roles** + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter settings** privilege +on a specified cluster to one or more **users** or **roles**. +Any `ALTER TABLE ... MODIFY SETTING setting` +statements SHALL succeed only on nodes where the table exists and privilege was granted. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting **alter settings** privilege +for a database or a specific table to one or more **users** or **roles** +with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to +change access to the **alter settings** privilege by another user or role +on the same or smaller scope that they have access to. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Grant +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +granting **alter settings** privilege to other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... MODIFY SETTING setting` statements SHALL +succeed when done by a user with privilege granted by a user with +`GRANT OPTION`, either directly or through an assigned role. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Revoke +version: 1.0 + +[ClickHouse] SHALL support a user with **grant option** privilege +revoking **alter settings** privilege from other **users** or **roles** on the same +or smaller scope that they have access to. +Any `ALTER TABLE ... MODIFY SETTING setting` statements +SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`, +either directly or through an assigned role, unless they have access otherwise. + +###### RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines +version: 1.0 + +[ClickHouse] SHALL support controlling access to the **alter settings** privilege +on tables created using the following engines + +* MergeTree +* ReplacingMergeTree +* SummingMergeTree +* AggregatingMergeTree +* CollapsingMergeTree +* VersionedCollapsingMergeTree +* GraphiteMergeTree +* ReplicatedMergeTree +* ReplicatedSummingMergeTree +* ReplicatedReplacingMergeTree +* ReplicatedAggregatingMergeTree +* ReplicatedCollapsingMergeTree +* ReplicatedVersionedCollapsingMergeTree +* ReplicatedGraphiteMergeTree + +##### RQ.SRS-006.RBAC.Privileges.Delete +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **delete** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Alter +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Create +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **create** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Drop +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **drop** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.All +version: 1.0 + +[ClickHouse] SHALL include in the **all** privilege the same rights +as provided by **usage**, **select**, **select columns**, +**insert**, **delete**, **alter**, **create**, and **drop** privileges. + +##### RQ.SRS-006.RBAC.Privileges.All.GrantRevoke +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **all** privileges +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **grant option** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.AdminOption +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **admin option** privilege +to one or more **users** or **roles**. + +#### Required Privileges + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Create +version: 1.0 + +[ClickHouse] SHALL not allow any `CREATE` statements +to be executed unless the user has the **create** privilege for the destination database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Alter +version: 1.0 + +[ClickHouse] SHALL not allow any `ALTER` statements +to be executed unless the user has the **alter** privilege for the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Drop +version: 1.0 + +[ClickHouse] SHALL not allow any `DROP` statements +to be executed unless the user has the **drop** privilege for the destination database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table +version: 1.0 + +[ClickHouse] SHALL not allow any `DROP TABLE` statements +to be executed unless the user has the **drop** privilege for the destination database or the table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke +version: 1.0 + +[ClickHouse] SHALL not allow any `GRANT` or `REVOKE` statements +to be executed unless the user has the **grant option** privilege +for the privilege of the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Use +version: 1.0 + +[ClickHouse] SHALL not allow the `USE` statement to be executed +unless the user has at least one of the privileges for the database +or the table inside that database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Admin +version: 1.0 + +[ClickHouse] SHALL not allow any of the following statements + +* `SYSTEM` +* `SHOW` +* `ATTACH` +* `CHECK TABLE` +* `DESCRIBE TABLE` +* `DETACH` +* `EXISTS` +* `KILL QUERY` +* `KILL MUTATION` +* `OPTIMIZE` +* `RENAME` +* `TRUNCATE` + +to be executed unless the user has the **admin option** privilege +through one of the roles with **admin option** privilege assigned to the user. + +## References + +* **ClickHouse:** https://clickhouse.tech +* **GitHub repository:** https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/rbac/requirements/requirements.md +* **Revision history:** https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/rbac/requirements/requirements.md +* **Git:** https://git-scm.com/ +* **MySQL:** https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html +* **PostgreSQL:** https://www.postgresql.org/docs/12/user-manag.html + +[ClickHouse]: https://clickhouse.tech +[GitHub repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/rbac/requirements/requirements.md +[Revision history]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/rbac/requirements/requirements.md +[Git]: https://git-scm.com/ +[MySQL]: https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html +[PostgreSQL]: https://www.postgresql.org/docs/12/user-manag.html +''') + RQ_SRS_006_RBAC = Requirement( name='RQ.SRS-006.RBAC', version='1.0', @@ -14,9 +3751,9 @@ RQ_SRS_006_RBAC = Requirement( uid=None, description=( '[ClickHouse] SHALL support role based access control.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Login = Requirement( name='RQ.SRS-006.RBAC.Login', @@ -29,9 +3766,9 @@ RQ_SRS_006_RBAC_Login = Requirement( '[ClickHouse] SHALL only allow access to the server for a given\n' 'user only when correct username and password are used during\n' 'the connection to the server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Login_DefaultUser = Requirement( name='RQ.SRS-006.RBAC.Login.DefaultUser', @@ -43,9 +3780,9 @@ RQ_SRS_006_RBAC_Login_DefaultUser = Requirement( description=( '[ClickHouse] SHALL use the **default user** when no username and password\n' 'are specified during the connection to the server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User = Requirement( name='RQ.SRS-006.RBAC.User', @@ -58,9 +3795,9 @@ RQ_SRS_006_RBAC_User = Requirement( '[ClickHouse] SHALL support creation and manipulation of\n' 'one or more **user** accounts to which roles, privileges,\n' 'settings profile, quotas and row policies can be assigned.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Roles = Requirement( name='RQ.SRS-006.RBAC.User.Roles', @@ -72,9 +3809,9 @@ RQ_SRS_006_RBAC_User_Roles = Requirement( description=( '[ClickHouse] SHALL support assigning one or more **roles**\n' 'to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Privileges = Requirement( name='RQ.SRS-006.RBAC.User.Privileges', @@ -85,9 +3822,9 @@ RQ_SRS_006_RBAC_User_Privileges = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more privileges to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Variables = Requirement( name='RQ.SRS-006.RBAC.User.Variables', @@ -98,9 +3835,9 @@ RQ_SRS_006_RBAC_User_Variables = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more variables to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Variables_Constraints = Requirement( name='RQ.SRS-006.RBAC.User.Variables.Constraints', @@ -112,9 +3849,9 @@ RQ_SRS_006_RBAC_User_Variables_Constraints = Requirement( description=( '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' 'for the variables that can be set and read by the **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_SettingsProfile = Requirement( name='RQ.SRS-006.RBAC.User.SettingsProfile', @@ -126,9 +3863,9 @@ RQ_SRS_006_RBAC_User_SettingsProfile = Requirement( description=( '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' 'to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Quotas = Requirement( name='RQ.SRS-006.RBAC.User.Quotas', @@ -139,9 +3876,9 @@ RQ_SRS_006_RBAC_User_Quotas = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more **quotas** to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_RowPolicies = Requirement( name='RQ.SRS-006.RBAC.User.RowPolicies', @@ -152,9 +3889,9 @@ RQ_SRS_006_RBAC_User_RowPolicies = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more **row policies** to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_AccountLock = Requirement( name='RQ.SRS-006.RBAC.User.AccountLock', @@ -165,9 +3902,9 @@ RQ_SRS_006_RBAC_User_AccountLock = Requirement( uid=None, description=( '[ClickHouse] SHALL support locking and unlocking of **user** accounts.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_AccountLock_DenyAccess = Requirement( name='RQ.SRS-006.RBAC.User.AccountLock.DenyAccess', @@ -178,9 +3915,9 @@ RQ_SRS_006_RBAC_User_AccountLock_DenyAccess = Requirement( uid=None, description=( '[ClickHouse] SHALL deny access to the user whose account is locked.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_DefaultRole = Requirement( name='RQ.SRS-006.RBAC.User.DefaultRole', @@ -191,9 +3928,9 @@ RQ_SRS_006_RBAC_User_DefaultRole = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning a default role to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_RoleSelection = Requirement( name='RQ.SRS-006.RBAC.User.RoleSelection', @@ -205,9 +3942,9 @@ RQ_SRS_006_RBAC_User_RoleSelection = Requirement( description=( '[ClickHouse] SHALL support selection of one or more **roles** from the available roles\n' 'that are assigned to a **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_ShowCreate = Requirement( name='RQ.SRS-006.RBAC.User.ShowCreate', @@ -218,9 +3955,9 @@ RQ_SRS_006_RBAC_User_ShowCreate = Requirement( uid=None, description=( '[ClickHouse] SHALL support showing the command of how **user** account was created.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_ShowPrivileges = Requirement( name='RQ.SRS-006.RBAC.User.ShowPrivileges', @@ -231,9 +3968,9 @@ RQ_SRS_006_RBAC_User_ShowPrivileges = Requirement( uid=None, description=( '[ClickHouse] SHALL support listing the privileges of the **user**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role = Requirement( name='RQ.SRS-006.RBAC.Role', @@ -246,9 +3983,9 @@ RQ_SRS_006_RBAC_Role = Requirement( '[ClikHouse] SHALL support creation and manipulation of **roles**\n' 'to which privileges, settings profile, quotas and row policies can be\n' 'assigned.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Privileges = Requirement( name='RQ.SRS-006.RBAC.Role.Privileges', @@ -259,9 +3996,9 @@ RQ_SRS_006_RBAC_Role_Privileges = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more privileges to a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Variables = Requirement( name='RQ.SRS-006.RBAC.Role.Variables', @@ -272,9 +4009,9 @@ RQ_SRS_006_RBAC_Role_Variables = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more variables to a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_SettingsProfile = Requirement( name='RQ.SRS-006.RBAC.Role.SettingsProfile', @@ -286,9 +4023,9 @@ RQ_SRS_006_RBAC_Role_SettingsProfile = Requirement( description=( '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' 'to a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Quotas = Requirement( name='RQ.SRS-006.RBAC.Role.Quotas', @@ -299,9 +4036,9 @@ RQ_SRS_006_RBAC_Role_Quotas = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more **quotas** to a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_RowPolicies = Requirement( name='RQ.SRS-006.RBAC.Role.RowPolicies', @@ -312,9 +4049,9 @@ RQ_SRS_006_RBAC_Role_RowPolicies = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning one or more **row policies** to a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_PartialRevokes = Requirement( name='RQ.SRS-006.RBAC.PartialRevokes', @@ -326,9 +4063,9 @@ RQ_SRS_006_RBAC_PartialRevokes = Requirement( description=( '[ClickHouse] SHALL support partial revoking of privileges granted\n' 'to a **user** or a **role**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile', @@ -341,9 +4078,9 @@ RQ_SRS_006_RBAC_SettingsProfile = Requirement( '[ClickHouse] SHALL support creation and manipulation of **settings profiles**\n' 'that can include value definition for one or more variables and can\n' 'can be assigned to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Constraints = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Constraints', @@ -355,9 +4092,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Constraints = Requirement( description=( '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' 'for the variables specified in the **settings profile**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_ShowCreate = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreate', @@ -368,9 +4105,9 @@ RQ_SRS_006_RBAC_SettingsProfile_ShowCreate = Requirement( uid=None, description=( '[ClickHouse] SHALL support showing the command of how **setting profile** was created.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas = Requirement( name='RQ.SRS-006.RBAC.Quotas', @@ -383,9 +4120,9 @@ RQ_SRS_006_RBAC_Quotas = Requirement( '[ClickHouse] SHALL support creation and manipulation of **quotas**\n' 'that can be used to limit resource usage by a **user** or a **role**\n' 'over a period of time.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_Keyed = Requirement( name='RQ.SRS-006.RBAC.Quotas.Keyed', @@ -397,9 +4134,9 @@ RQ_SRS_006_RBAC_Quotas_Keyed = Requirement( description=( '[ClickHouse] SHALL support creating **quotas** that are keyed\n' 'so that a quota is tracked separately for each key value.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_Queries = Requirement( name='RQ.SRS-006.RBAC.Quotas.Queries', @@ -410,9 +4147,9 @@ RQ_SRS_006_RBAC_Quotas_Queries = Requirement( uid=None, description=( '[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_Errors = Requirement( name='RQ.SRS-006.RBAC.Quotas.Errors', @@ -423,9 +4160,9 @@ RQ_SRS_006_RBAC_Quotas_Errors = Requirement( uid=None, description=( '[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ResultRows = Requirement( name='RQ.SRS-006.RBAC.Quotas.ResultRows', @@ -437,9 +4174,9 @@ RQ_SRS_006_RBAC_Quotas_ResultRows = Requirement( description=( '[ClickHouse] SHALL support setting **result rows** quota to limit the\n' 'the total number of rows given as the result.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ReadRows = Requirement( name='RQ.SRS-006.RBAC.Quotas.ReadRows', @@ -451,9 +4188,9 @@ RQ_SRS_006_RBAC_Quotas_ReadRows = Requirement( description=( '[ClickHouse] SHALL support setting **read rows** quota to limit the total\n' 'number of source rows read from tables for running the query on all remote servers.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ResultBytes = Requirement( name='RQ.SRS-006.RBAC.Quotas.ResultBytes', @@ -465,9 +4202,9 @@ RQ_SRS_006_RBAC_Quotas_ResultBytes = Requirement( description=( '[ClickHouse] SHALL support setting **result bytes** quota to limit the total number\n' 'of bytes that can be returned as the result.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ReadBytes = Requirement( name='RQ.SRS-006.RBAC.Quotas.ReadBytes', @@ -479,9 +4216,9 @@ RQ_SRS_006_RBAC_Quotas_ReadBytes = Requirement( description=( '[ClickHouse] SHALL support setting **read bytes** quota to limit the total number\n' 'of source bytes read from tables for running the query on all remote servers.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ExecutionTime = Requirement( name='RQ.SRS-006.RBAC.Quotas.ExecutionTime', @@ -493,9 +4230,9 @@ RQ_SRS_006_RBAC_Quotas_ExecutionTime = Requirement( description=( '[ClickHouse] SHALL support setting **execution time** quota to limit the maximum\n' 'query execution time.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quotas_ShowCreate = Requirement( name='RQ.SRS-006.RBAC.Quotas.ShowCreate', @@ -506,9 +4243,9 @@ RQ_SRS_006_RBAC_Quotas_ShowCreate = Requirement( uid=None, description=( '[ClickHouse] SHALL support showing the command of how **quota** was created.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy = Requirement( name='RQ.SRS-006.RBAC.RowPolicy', @@ -521,9 +4258,9 @@ RQ_SRS_006_RBAC_RowPolicy = Requirement( '[ClickHouse] SHALL support creation and manipulation of table **row policies**\n' 'that can be used to limit access to the table contents for a **user** or a **role**\n' 'using a specified **condition**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Condition = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Condition', @@ -535,9 +4272,9 @@ RQ_SRS_006_RBAC_RowPolicy_Condition = Requirement( description=( '[ClickHouse] SHALL support row policy **conditions** that can be any SQL\n' 'expression that returns a boolean.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowCreate = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowCreate', @@ -548,9 +4285,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowCreate = Requirement( uid=None, description=( '[ClickHouse] SHALL support showing the command of how **row policy** was created.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Use_DefaultRole = Requirement( name='RQ.SRS-006.RBAC.User.Use.DefaultRole', @@ -562,9 +4299,9 @@ RQ_SRS_006_RBAC_User_Use_DefaultRole = Requirement( description=( '[ClickHouse] SHALL by default use default role or roles assigned\n' 'to the user if specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Use_AllRolesWhenNoDefaultRole = Requirement( name='RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole', @@ -576,9 +4313,9 @@ RQ_SRS_006_RBAC_User_Use_AllRolesWhenNoDefaultRole = Requirement( description=( '[ClickHouse] SHALL by default use all the roles assigned to the user\n' 'if no default role or roles are specified for the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create = Requirement( name='RQ.SRS-006.RBAC.User.Create', @@ -589,9 +4326,9 @@ RQ_SRS_006_RBAC_User_Create = Requirement( uid=None, description=( '[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_IfNotExists = Requirement( name='RQ.SRS-006.RBAC.User.Create.IfNotExists', @@ -605,9 +4342,9 @@ RQ_SRS_006_RBAC_User_Create_IfNotExists = Requirement( 'to skip raising an exception if a user with the same **name** already exists.\n' 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a user with the same **name** already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Replace = Requirement( name='RQ.SRS-006.RBAC.User.Create.Replace', @@ -619,9 +4356,9 @@ RQ_SRS_006_RBAC_User_Create_Replace = Requirement( description=( '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement\n' 'to replace existing user account if already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_NoPassword = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword', @@ -633,9 +4370,9 @@ RQ_SRS_006_RBAC_User_Create_Password_NoPassword = Requirement( description=( '[ClickHouse] SHALL support specifying no password when creating\n' 'user account using `IDENTIFIED WITH NO_PASSWORD` clause .\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_NoPassword_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login', @@ -647,9 +4384,9 @@ RQ_SRS_006_RBAC_User_Create_Password_NoPassword_Login = Requirement( description=( '[ClickHouse] SHALL use no password for the user when connecting to the server\n' 'when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_PlainText = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.PlainText', @@ -661,9 +4398,9 @@ RQ_SRS_006_RBAC_User_Create_Password_PlainText = Requirement( description=( '[ClickHouse] SHALL support specifying plaintext password when creating\n' 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_PlainText_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login', @@ -676,9 +4413,9 @@ RQ_SRS_006_RBAC_User_Create_Password_PlainText_Login = Requirement( '[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server\n' 'when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause\n' 'and compare the password with the one used in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_Sha256Password = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password', @@ -691,9 +4428,9 @@ RQ_SRS_006_RBAC_User_Create_Password_Sha256Password = Requirement( '[ClickHouse] SHALL support specifying the result of applying SHA256\n' 'to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY`\n' 'clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_Sha256Password_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login', @@ -706,9 +4443,9 @@ RQ_SRS_006_RBAC_User_Create_Password_Sha256Password_Login = Requirement( '[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server\n' "when an account was created with `IDENTIFIED WITH SHA256_PASSWORD` or with 'IDENTIFIED BY' clause\n" 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash', @@ -721,9 +4458,9 @@ RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash = Requirement( '[ClickHouse] SHALL support specifying the result of applying SHA256\n' 'to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH`\n' 'clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login', @@ -737,9 +4474,9 @@ RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash_Login = Requirement( 'the user when connecting to the server\n' 'when an account was created with `IDENTIFIED WITH SHA256_HASH` clause\n' 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password', @@ -752,9 +4489,9 @@ RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password = Requirement( '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' 'to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD`\n' 'clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login', @@ -768,9 +4505,9 @@ RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password_Login = Requirement( 'the user when connecting to the server\n' 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause\n' 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash', @@ -783,9 +4520,9 @@ RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash = Requirement( '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' 'to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH`\n' 'clause.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash_Login = Requirement( name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login', @@ -799,9 +4536,9 @@ RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash_Login = Requirement( 'the user when connecting to the server\n' 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause\n' 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Name = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Name', @@ -814,9 +4551,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Name = Requirement( '[ClickHouse] SHALL support specifying one or more hostnames from\n' 'which user can access the server using the `HOST NAME` clause\n' 'in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Regexp = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Regexp', @@ -829,9 +4566,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Regexp = Requirement( '[ClickHouse] SHALL support specifying one or more regular expressions\n' 'to match hostnames from which user can access the server\n' 'using the `HOST REGEXP` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_IP = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.IP', @@ -844,9 +4581,9 @@ RQ_SRS_006_RBAC_User_Create_Host_IP = Requirement( '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' 'which user can access the server using the `HOST IP` clause in the\n' '`CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Any = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Any', @@ -858,9 +4595,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Any = Requirement( description=( '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement\n' 'to indicate that user can access the server from any host.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_None = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.None', @@ -872,9 +4609,9 @@ RQ_SRS_006_RBAC_User_Create_Host_None = Requirement( description=( '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' '`CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Local = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Local', @@ -886,9 +4623,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Local = Requirement( description=( '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' '`CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Like = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Like', @@ -900,9 +4637,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Like = Requirement( description=( '[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the\n' '`HOST LIKE` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Host_Default = Requirement( name='RQ.SRS-006.RBAC.User.Create.Host.Default', @@ -914,9 +4651,9 @@ RQ_SRS_006_RBAC_User_Create_Host_Default = Requirement( description=( '[ClickHouse] SHALL support user access to server from any host\n' 'if no `HOST` clause is specified in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_DefaultRole = Requirement( name='RQ.SRS-006.RBAC.User.Create.DefaultRole', @@ -928,9 +4665,9 @@ RQ_SRS_006_RBAC_User_Create_DefaultRole = Requirement( description=( '[ClickHouse] SHALL support specifying one or more default roles\n' 'using `DEFAULT ROLE` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_DefaultRole_None = Requirement( name='RQ.SRS-006.RBAC.User.Create.DefaultRole.None', @@ -942,9 +4679,9 @@ RQ_SRS_006_RBAC_User_Create_DefaultRole_None = Requirement( description=( '[ClickHouse] SHALL support specifying no default roles\n' 'using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_DefaultRole_All = Requirement( name='RQ.SRS-006.RBAC.User.Create.DefaultRole.All', @@ -956,9 +4693,9 @@ RQ_SRS_006_RBAC_User_Create_DefaultRole_All = Requirement( description=( '[ClickHouse] SHALL support specifying all roles to be used as default\n' 'using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Settings = Requirement( name='RQ.SRS-006.RBAC.User.Create.Settings', @@ -970,9 +4707,9 @@ RQ_SRS_006_RBAC_User_Create_Settings = Requirement( description=( '[ClickHouse] SHALL support specifying settings and profile\n' 'using `SETTINGS` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_OnCluster = Requirement( name='RQ.SRS-006.RBAC.User.Create.OnCluster', @@ -984,9 +4721,9 @@ RQ_SRS_006_RBAC_User_Create_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying cluster on which the user\n' 'will be created using `ON CLUSTER` clause in the `CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Create_Syntax = Requirement( name='RQ.SRS-006.RBAC.User.Create.Syntax', @@ -1005,9 +4742,9 @@ RQ_SRS_006_RBAC_User_Create_Syntax = Requirement( ' [DEFAULT ROLE role [,...]]\n' " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter = Requirement( name='RQ.SRS-006.RBAC.User.Alter', @@ -1018,9 +4755,9 @@ RQ_SRS_006_RBAC_User_Alter = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_OrderOfEvaluation = Requirement( name='RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation', @@ -1033,9 +4770,9 @@ RQ_SRS_006_RBAC_User_Alter_OrderOfEvaluation = Requirement( '[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right\n' 'where things defined on the right override anything that was previously defined on\n' 'the left.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_IfExists = Requirement( name='RQ.SRS-006.RBAC.User.Alter.IfExists', @@ -1047,9 +4784,9 @@ RQ_SRS_006_RBAC_User_Alter_IfExists = Requirement( description=( '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement\n' 'to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist. If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Cluster = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Cluster', @@ -1061,9 +4798,9 @@ RQ_SRS_006_RBAC_User_Alter_Cluster = Requirement( description=( '[ClickHouse] SHALL support specifying the cluster the user is on\n' 'when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Rename = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Rename', @@ -1075,9 +4812,9 @@ RQ_SRS_006_RBAC_User_Alter_Rename = Requirement( description=( '[ClickHouse] SHALL support specifying a new name for the user when\n' 'altering user account using `RENAME` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Password_PlainText = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Password.PlainText', @@ -1090,9 +4827,9 @@ RQ_SRS_006_RBAC_User_Alter_Password_PlainText = Requirement( '[ClickHouse] SHALL support specifying plaintext password when altering\n' 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or\n' 'using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password', @@ -1105,9 +4842,9 @@ RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password = Requirement( '[ClickHouse] SHALL support specifying the result of applying SHA256\n' 'to some password as identification when altering user account using\n' '`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password', @@ -1120,9 +4857,9 @@ RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password = Requirement( '[ClickHouse] SHALL support specifying the result of applying Double SHA1\n' 'to some password as identification when altering user account using\n' '`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_AddDrop = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.AddDrop', @@ -1133,9 +4870,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_AddDrop = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering user by adding and dropping access to hosts with the `ADD HOST` or the `DROP HOST`in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_Local = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.Local', @@ -1147,9 +4884,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_Local = Requirement( description=( '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' '`ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_Name = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.Name', @@ -1162,9 +4899,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_Name = Requirement( '[ClickHouse] SHALL support specifying one or more hostnames from\n' 'which user can access the server using the `HOST NAME` clause\n' 'in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_Regexp = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.Regexp', @@ -1177,9 +4914,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_Regexp = Requirement( '[ClickHouse] SHALL support specifying one or more regular expressions\n' 'to match hostnames from which user can access the server\n' 'using the `HOST REGEXP` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_IP = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.IP', @@ -1192,9 +4929,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_IP = Requirement( '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' 'which user can access the server using the `HOST IP` clause in the\n' '`ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_Like = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.Like', @@ -1205,9 +4942,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_Like = Requirement( uid=None, description=( '[ClickHouse] SHALL support specifying sone or more similar hosts using `LIKE` command syntax using the `HOST LIKE` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_Any = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.Any', @@ -1219,9 +4956,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_Any = Requirement( description=( '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement\n' 'to indicate that user can access the server from any host.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Host_None = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Host.None', @@ -1233,9 +4970,9 @@ RQ_SRS_006_RBAC_User_Alter_Host_None = Requirement( description=( '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' '`ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_DefaultRole = Requirement( name='RQ.SRS-006.RBAC.User.Alter.DefaultRole', @@ -1247,9 +4984,9 @@ RQ_SRS_006_RBAC_User_Alter_DefaultRole = Requirement( description=( '[ClickHouse] SHALL support specifying one or more default roles\n' 'using `DEFAULT ROLE` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_DefaultRole_All = Requirement( name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.All', @@ -1261,9 +4998,9 @@ RQ_SRS_006_RBAC_User_Alter_DefaultRole_All = Requirement( description=( '[ClickHouse] SHALL support specifying all roles to be used as default\n' 'using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept = Requirement( name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept', @@ -1275,9 +5012,9 @@ RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept = Requirement( description=( '[ClickHouse] SHALL support specifying one or more roles which will not be used as default\n' 'using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Settings = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Settings', @@ -1289,9 +5026,9 @@ RQ_SRS_006_RBAC_User_Alter_Settings = Requirement( description=( '[ClickHouse] SHALL support specifying one or more variables\n' 'using `SETTINGS` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Settings_Min = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Settings.Min', @@ -1302,9 +5039,10 @@ RQ_SRS_006_RBAC_User_Alter_Settings_Min = Requirement( uid=None, description=( '[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement.\n' + '\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Settings_Max = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Settings.Max', @@ -1315,9 +5053,9 @@ RQ_SRS_006_RBAC_User_Alter_Settings_Max = Requirement( uid=None, description=( '[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Settings_Profile = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Settings.Profile', @@ -1328,9 +5066,9 @@ RQ_SRS_006_RBAC_User_Alter_Settings_Profile = Requirement( uid=None, description=( '[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Alter_Syntax = Requirement( name='RQ.SRS-006.RBAC.User.Alter.Syntax', @@ -1350,9 +5088,9 @@ RQ_SRS_006_RBAC_User_Alter_Syntax = Requirement( ' [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]\n' " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole', @@ -1365,9 +5103,9 @@ RQ_SRS_006_RBAC_SetDefaultRole = Requirement( '[ClickHouse] SHALL support setting or changing granted roles to default for one or more\n' 'users using `SET DEFAULT ROLE` statement which\n' 'SHALL permanently change the default roles for the user or users if successful.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser', @@ -1379,9 +5117,9 @@ RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser = Requirement( description=( '[ClickHouse] SHALL support setting or changing granted roles to default for\n' 'the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole_All = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole.All', @@ -1393,9 +5131,9 @@ RQ_SRS_006_RBAC_SetDefaultRole_All = Requirement( description=( '[ClickHouse] SHALL support setting or changing all granted roles to default\n' 'for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole_AllExcept = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole.AllExcept', @@ -1407,9 +5145,9 @@ RQ_SRS_006_RBAC_SetDefaultRole_AllExcept = Requirement( description=( '[ClickHouse] SHALL support setting or changing all granted roles except those specified\n' 'to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole_None = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole.None', @@ -1421,9 +5159,9 @@ RQ_SRS_006_RBAC_SetDefaultRole_None = Requirement( description=( '[ClickHouse] SHALL support removing all granted roles from default\n' 'for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetDefaultRole_Syntax = Requirement( name='RQ.SRS-006.RBAC.SetDefaultRole.Syntax', @@ -1441,9 +5179,9 @@ RQ_SRS_006_RBAC_SetDefaultRole_Syntax = Requirement( ' TO {user|CURRENT_USER} [,...]\n' '\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole = Requirement( name='RQ.SRS-006.RBAC.SetRole', @@ -1455,9 +5193,9 @@ RQ_SRS_006_RBAC_SetRole = Requirement( description=( '[ClickHouse] SHALL support activating role or roles for the current user\n' 'using `SET ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole_Default = Requirement( name='RQ.SRS-006.RBAC.SetRole.Default', @@ -1469,9 +5207,9 @@ RQ_SRS_006_RBAC_SetRole_Default = Requirement( description=( '[ClickHouse] SHALL support activating default roles for the current user\n' 'using `DEFAULT` clause in the `SET ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole_None = Requirement( name='RQ.SRS-006.RBAC.SetRole.None', @@ -1483,9 +5221,9 @@ RQ_SRS_006_RBAC_SetRole_None = Requirement( description=( '[ClickHouse] SHALL support activating no roles for the current user\n' 'using `NONE` clause in the `SET ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole_All = Requirement( name='RQ.SRS-006.RBAC.SetRole.All', @@ -1497,9 +5235,9 @@ RQ_SRS_006_RBAC_SetRole_All = Requirement( description=( '[ClickHouse] SHALL support activating all roles for the current user\n' 'using `ALL` clause in the `SET ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole_AllExcept = Requirement( name='RQ.SRS-006.RBAC.SetRole.AllExcept', @@ -1511,9 +5249,9 @@ RQ_SRS_006_RBAC_SetRole_AllExcept = Requirement( description=( '[ClickHouse] SHALL support activating all roles except those specified\n' 'for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SetRole_Syntax = Requirement( name='RQ.SRS-006.RBAC.SetRole.Syntax', @@ -1526,9 +5264,9 @@ RQ_SRS_006_RBAC_SetRole_Syntax = Requirement( '```sql\n' 'SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_ShowCreateUser = Requirement( name='RQ.SRS-006.RBAC.User.ShowCreateUser', @@ -1540,9 +5278,9 @@ RQ_SRS_006_RBAC_User_ShowCreateUser = Requirement( description=( '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object\n' 'using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_ShowCreateUser_For = Requirement( name='RQ.SRS-006.RBAC.User.ShowCreateUser.For', @@ -1554,9 +5292,9 @@ RQ_SRS_006_RBAC_User_ShowCreateUser_For = Requirement( description=( '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object\n' 'using the `FOR` clause in the `SHOW CREATE USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_ShowCreateUser_Syntax = Requirement( name='RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax', @@ -1571,9 +5309,9 @@ RQ_SRS_006_RBAC_User_ShowCreateUser_Syntax = Requirement( '```sql\n' 'SHOW CREATE USER [name | CURRENT_USER]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Drop = Requirement( name='RQ.SRS-006.RBAC.User.Drop', @@ -1584,9 +5322,9 @@ RQ_SRS_006_RBAC_User_Drop = Requirement( uid=None, description=( '[ClickHouse] SHALL support removing a user account using `DROP USER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Drop_IfExists = Requirement( name='RQ.SRS-006.RBAC.User.Drop.IfExists', @@ -1600,9 +5338,9 @@ RQ_SRS_006_RBAC_User_Drop_IfExists = Requirement( 'to skip raising an exception if the user account does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a user does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Drop_OnCluster = Requirement( name='RQ.SRS-006.RBAC.User.Drop.OnCluster', @@ -1614,9 +5352,9 @@ RQ_SRS_006_RBAC_User_Drop_OnCluster = Requirement( description=( '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement\n' 'to specify the name of the cluster the user should be dropped from.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_User_Drop_Syntax = Requirement( name='RQ.SRS-006.RBAC.User.Drop.Syntax', @@ -1631,9 +5369,9 @@ RQ_SRS_006_RBAC_User_Drop_Syntax = Requirement( '```sql\n' 'DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Create = Requirement( name='RQ.SRS-006.RBAC.Role.Create', @@ -1644,9 +5382,9 @@ RQ_SRS_006_RBAC_Role_Create = Requirement( uid=None, description=( '[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Create_IfNotExists = Requirement( name='RQ.SRS-006.RBAC.Role.Create.IfNotExists', @@ -1660,9 +5398,9 @@ RQ_SRS_006_RBAC_Role_Create_IfNotExists = Requirement( 'to raising an exception if a role with the same **name** already exists.\n' 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a role with the same **name** already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Create_Replace = Requirement( name='RQ.SRS-006.RBAC.Role.Create.Replace', @@ -1674,9 +5412,9 @@ RQ_SRS_006_RBAC_Role_Create_Replace = Requirement( description=( '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement\n' 'to replace existing role if it already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Create_Settings = Requirement( name='RQ.SRS-006.RBAC.Role.Create.Settings', @@ -1688,9 +5426,9 @@ RQ_SRS_006_RBAC_Role_Create_Settings = Requirement( description=( '[ClickHouse] SHALL support specifying settings and profile using `SETTINGS`\n' 'clause in the `CREATE ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Create_Syntax = Requirement( name='RQ.SRS-006.RBAC.Role.Create.Syntax', @@ -1706,9 +5444,9 @@ RQ_SRS_006_RBAC_Role_Create_Syntax = Requirement( 'CREATE ROLE [IF NOT EXISTS | OR REPLACE] name\n' " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter = Requirement( name='RQ.SRS-006.RBAC.Role.Alter', @@ -1719,9 +5457,9 @@ RQ_SRS_006_RBAC_Role_Alter = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter_IfExists = Requirement( name='RQ.SRS-006.RBAC.Role.Alter.IfExists', @@ -1733,9 +5471,9 @@ RQ_SRS_006_RBAC_Role_Alter_IfExists = Requirement( description=( '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception\n' 'will be thrown if the role does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter_Cluster = Requirement( name='RQ.SRS-006.RBAC.Role.Alter.Cluster', @@ -1747,9 +5485,9 @@ RQ_SRS_006_RBAC_Role_Alter_Cluster = Requirement( description=( '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the\n' 'cluster location of the specified role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter_Rename = Requirement( name='RQ.SRS-006.RBAC.Role.Alter.Rename', @@ -1762,9 +5500,9 @@ RQ_SRS_006_RBAC_Role_Alter_Rename = Requirement( '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the\n' 'role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the\n' '`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter_Settings = Requirement( name='RQ.SRS-006.RBAC.Role.Alter.Settings', @@ -1783,9 +5521,9 @@ RQ_SRS_006_RBAC_Role_Alter_Settings = Requirement( '```\n' '\n' 'One or more variables and profiles may be specified as shown above.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Alter_Syntax = Requirement( name='RQ.SRS-006.RBAC.Role.Alter.Syntax', @@ -1800,9 +5538,9 @@ RQ_SRS_006_RBAC_Role_Alter_Syntax = Requirement( ' [RENAME TO new_name]\n' " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Drop = Requirement( name='RQ.SRS-006.RBAC.Role.Drop', @@ -1813,9 +5551,9 @@ RQ_SRS_006_RBAC_Role_Drop = Requirement( uid=None, description=( '[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Drop_IfExists = Requirement( name='RQ.SRS-006.RBAC.Role.Drop.IfExists', @@ -1829,9 +5567,9 @@ RQ_SRS_006_RBAC_Role_Drop_IfExists = Requirement( 'to skip raising an exception if the role does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a role does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Drop_Cluster = Requirement( name='RQ.SRS-006.RBAC.Role.Drop.Cluster', @@ -1842,9 +5580,9 @@ RQ_SRS_006_RBAC_Role_Drop_Cluster = Requirement( uid=None, description=( '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_Drop_Syntax = Requirement( name='RQ.SRS-006.RBAC.Role.Drop.Syntax', @@ -1859,9 +5597,9 @@ RQ_SRS_006_RBAC_Role_Drop_Syntax = Requirement( '``` sql\n' 'DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_ShowCreate = Requirement( name='RQ.SRS-006.RBAC.Role.ShowCreate', @@ -1873,9 +5611,9 @@ RQ_SRS_006_RBAC_Role_ShowCreate = Requirement( description=( '[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE`\n' 'statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Role_ShowCreate_Syntax = Requirement( name='RQ.SRS-006.RBAC.Role.ShowCreate.Syntax', @@ -1890,9 +5628,9 @@ RQ_SRS_006_RBAC_Role_ShowCreate_Syntax = Requirement( '```sql\n' 'SHOW CREATE ROLE name\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_To = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.To', @@ -1904,9 +5642,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_To = Requirement( description=( '[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause\n' 'in the `GRANT PRIVILEGE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser', @@ -1918,9 +5656,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser = Requirement( description=( '[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause\n' 'in the `GRANT PRIVILEGE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Select = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Select', @@ -1932,9 +5670,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Select = Requirement( description=( '[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles\n' 'for a database or a table using the `GRANT SELECT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Insert = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Insert', @@ -1946,9 +5684,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Insert = Requirement( description=( '[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles\n' 'for a database or a table using the `GRANT INSERT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Alter = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Alter', @@ -1960,9 +5698,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Alter = Requirement( description=( '[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles\n' 'for a database or a table using the `GRANT ALTER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Create = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Create', @@ -1974,9 +5712,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Create = Requirement( description=( '[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles\n' 'for a database or a table using the `GRANT CREATE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Drop = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Drop', @@ -1988,9 +5726,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Drop = Requirement( description=( '[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles\n' 'for a database or a table using the `GRANT DROP` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Truncate = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Truncate', @@ -2002,9 +5740,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Truncate = Requirement( description=( '[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles\n' 'for a database or a table using `GRANT TRUNCATE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Optimize = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Optimize', @@ -2016,9 +5754,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Optimize = Requirement( description=( '[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles\n' 'for a database or a table using `GRANT OPTIMIZE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Show = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Show', @@ -2030,9 +5768,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Show = Requirement( description=( '[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles\n' 'for a database or a table using `GRANT SHOW` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_KillQuery = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.KillQuery', @@ -2044,9 +5782,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_KillQuery = Requirement( description=( '[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles\n' 'for a database or a table using `GRANT KILL QUERY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement', @@ -2058,9 +5796,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement = Requirement( description=( '[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles\n' 'for a database or a table using `GRANT ACCESS MANAGEMENT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_System = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.System', @@ -2072,9 +5810,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_System = Requirement( description=( '[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles\n' 'for a database or a table using `GRANT SYSTEM` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Introspection = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Introspection', @@ -2086,9 +5824,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Introspection = Requirement( description=( '[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles\n' 'for a database or a table using `GRANT INTROSPECTION` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Sources = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Sources', @@ -2100,9 +5838,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Sources = Requirement( description=( '[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles\n' 'for a database or a table using `GRANT SOURCES` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_DictGet = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.DictGet', @@ -2114,9 +5852,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_DictGet = Requirement( description=( '[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles\n' 'for a database or a table using `GRANT dictGet` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_None = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.None', @@ -2128,9 +5866,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_None = Requirement( description=( '[ClickHouse] SHALL support granting no privileges to one or more users or roles\n' 'for a database or a table using `GRANT NONE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_All = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.All', @@ -2142,9 +5880,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_All = Requirement( description=( '[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles\n' 'for a database or a table using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.GrantOption', @@ -2156,9 +5894,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_GrantOption = Requirement( description=( '[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles\n' 'for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_On = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.On', @@ -2177,9 +5915,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_On = Requirement( '* `database.table` specific table in the specified database\n' '* `*` any table in the current database\n' '* `table` specific table in the current database\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_PrivilegeColumns = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns', @@ -2193,9 +5931,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_PrivilegeColumns = Requirement( 'for a database or a table using the `GRANT some_privilege(column)` statement for one column.\n' 'Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement.\n' 'The privileges will be granted for only the specified columns.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_OnCluster = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.OnCluster', @@ -2207,9 +5945,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER`\n' 'clause in the `GRANT PRIVILEGE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Privilege_Syntax = Requirement( name='RQ.SRS-006.RBAC.Grant.Privilege.Syntax', @@ -2228,9 +5966,9 @@ RQ_SRS_006_RBAC_Grant_Privilege_Syntax = Requirement( ' TO {user | role | CURRENT_USER} [,...]\n' ' [WITH GRANT OPTION]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Cluster = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Cluster', @@ -2242,9 +5980,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Cluster = Requirement( description=( '[ClickHouse] SHALL support revoking privileges to one or more users or roles\n' 'for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Any = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Any', @@ -2259,9 +5997,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Any = Requirement( '**some_privilege** refers to any Clickhouse defined privilege, whose hierarchy includes\n' 'SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n' 'SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Select = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Select', @@ -2273,9 +6011,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Select = Requirement( description=( '[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE SELECT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Insert = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Insert', @@ -2287,9 +6025,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Insert = Requirement( description=( '[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE INSERT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Alter = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Alter', @@ -2301,9 +6039,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Alter = Requirement( description=( '[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE ALTER` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Create = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Create', @@ -2315,9 +6053,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Create = Requirement( description=( '[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE CREATE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Drop = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Drop', @@ -2329,9 +6067,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Drop = Requirement( description=( '[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE DROP` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Truncate = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Truncate', @@ -2343,9 +6081,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Truncate = Requirement( description=( '[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE TRUNCATE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Optimize = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Optimize', @@ -2357,9 +6095,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Optimize = Requirement( description=( '[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE OPTIMIZE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Show = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Show', @@ -2371,9 +6109,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Show = Requirement( description=( '[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE SHOW` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery', @@ -2385,9 +6123,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery = Requirement( description=( '[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE KILL QUERY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement', @@ -2399,9 +6137,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement = Requirement( description=( '[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_System = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.System', @@ -2413,9 +6151,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_System = Requirement( description=( '[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE SYSTEM` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Introspection = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Introspection', @@ -2427,9 +6165,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Introspection = Requirement( description=( '[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE INTROSPECTION` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Sources = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Sources', @@ -2441,9 +6179,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Sources = Requirement( description=( '[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE SOURCES` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_DictGet = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.DictGet', @@ -2455,9 +6193,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_DictGet = Requirement( description=( '[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles\n' 'for a database or a table using the `REVOKE dictGet` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_PrivelegeColumns = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns', @@ -2471,9 +6209,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_PrivelegeColumns = Requirement( 'for a database or a table using the `REVOKE some_privilege(column)` statement for one column.\n' 'Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement.\n' 'The privileges will be revoked for only the specified columns.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Multiple = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Multiple', @@ -2488,9 +6226,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Multiple = Requirement( '**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes\n' 'SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n' 'SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_All = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.All', @@ -2502,9 +6240,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_All = Requirement( description=( '[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles\n' 'for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_None = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.None', @@ -2516,9 +6254,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_None = Requirement( description=( '[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles\n' 'for a database or a table using the `REVOKE NONE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_On = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.On', @@ -2537,9 +6275,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_On = Requirement( '* `*.*` any table in any database\n' '* `table` specific table in the current database\n' '* `*` any table in the current database\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_From = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.From', @@ -2556,9 +6294,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_From = Requirement( '* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user\n' '* `ALL` all users\n' '* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Privilege_Syntax = Requirement( name='RQ.SRS-006.RBAC.Revoke.Privilege.Syntax', @@ -2577,9 +6315,9 @@ RQ_SRS_006_RBAC_Revoke_Privilege_Syntax = Requirement( ' ON {db.table|db.*|*.*|table|*}\n' ' FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_PartialRevoke_Syntax = Requirement( name='RQ.SRS-006.RBAC.PartialRevoke.Syntax', @@ -2603,9 +6341,9 @@ RQ_SRS_006_RBAC_PartialRevoke_Syntax = Requirement( '```sql\n' 'SET partial_revokes = 1\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Role = Requirement( name='RQ.SRS-006.RBAC.Grant.Role', @@ -2617,9 +6355,9 @@ RQ_SRS_006_RBAC_Grant_Role = Requirement( description=( '[ClickHouse] SHALL support granting one or more roles to\n' 'one or more users or roles using the `GRANT` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Role_CurrentUser = Requirement( name='RQ.SRS-006.RBAC.Grant.Role.CurrentUser', @@ -2631,9 +6369,9 @@ RQ_SRS_006_RBAC_Grant_Role_CurrentUser = Requirement( description=( '[ClickHouse] SHALL support granting one or more roles to current user using\n' '`TO CURRENT_USER` clause in the `GRANT` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Role_AdminOption = Requirement( name='RQ.SRS-006.RBAC.Grant.Role.AdminOption', @@ -2646,9 +6384,9 @@ RQ_SRS_006_RBAC_Grant_Role_AdminOption = Requirement( '[ClickHouse] SHALL support granting `admin option` privilege\n' 'to one or more users or roles using the `WITH ADMIN OPTION` clause\n' 'in the `GRANT` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Role_OnCluster = Requirement( name='RQ.SRS-006.RBAC.Grant.Role.OnCluster', @@ -2660,9 +6398,9 @@ RQ_SRS_006_RBAC_Grant_Role_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles\n' 'using `ON CLUSTER` clause in the `GRANT` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Grant_Role_Syntax = Requirement( name='RQ.SRS-006.RBAC.Grant.Role.Syntax', @@ -2681,9 +6419,9 @@ RQ_SRS_006_RBAC_Grant_Role_Syntax = Requirement( ' TO {user | role | CURRENT_USER} [,...]\n' ' [WITH ADMIN OPTION]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Role = Requirement( name='RQ.SRS-006.RBAC.Revoke.Role', @@ -2695,9 +6433,9 @@ RQ_SRS_006_RBAC_Revoke_Role = Requirement( description=( '[ClickHouse] SHALL support revoking one or more roles from\n' 'one or more users or roles using the `REVOKE` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Role_Keywords = Requirement( name='RQ.SRS-006.RBAC.Revoke.Role.Keywords', @@ -2710,9 +6448,9 @@ RQ_SRS_006_RBAC_Revoke_Role_Keywords = Requirement( '[ClickHouse] SHALL support revoking one or more roles from\n' 'special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`,\n' 'and `CURRENT_USER` keywords.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Role_Cluster = Requirement( name='RQ.SRS-006.RBAC.Revoke.Role.Cluster', @@ -2725,9 +6463,9 @@ RQ_SRS_006_RBAC_Revoke_Role_Cluster = Requirement( '[ClickHouse] SHALL support revoking one or more roles from\n' 'one or more users or roles from one or more clusters\n' 'using the `REVOKE ON CLUSTER` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_AdminOption = Requirement( name='RQ.SRS-006.RBAC.Revoke.AdminOption', @@ -2740,9 +6478,9 @@ RQ_SRS_006_RBAC_Revoke_AdminOption = Requirement( '[ClickHouse] SHALL support revoking `admin option` privilege\n' 'in one or more users or roles using the `ADMIN OPTION FOR` clause\n' 'in the `REVOKE` role statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Revoke_Role_Syntax = Requirement( name='RQ.SRS-006.RBAC.Revoke.Role.Syntax', @@ -2759,9 +6497,9 @@ RQ_SRS_006_RBAC_Revoke_Role_Syntax = Requirement( ' role [,...]\n' ' FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Show_Grants = Requirement( name='RQ.SRS-006.RBAC.Show.Grants', @@ -2773,9 +6511,9 @@ RQ_SRS_006_RBAC_Show_Grants = Requirement( description=( '[ClickHouse] SHALL support listing all the privileges granted to current user and role\n' 'using the `SHOW GRANTS` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Show_Grants_For = Requirement( name='RQ.SRS-006.RBAC.Show.Grants.For', @@ -2787,9 +6525,9 @@ RQ_SRS_006_RBAC_Show_Grants_For = Requirement( description=( '[ClickHouse] SHALL support listing all the privileges granted to a user or a role\n' 'using the `FOR` clause in the `SHOW GRANTS` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Show_Grants_Syntax = Requirement( name='RQ.SRS-006.RBAC.Show.Grants.Syntax', @@ -2804,9 +6542,9 @@ RQ_SRS_006_RBAC_Show_Grants_Syntax = Requirement( '``` sql\n' 'SHOW GRANTS [FOR user_or_role]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create', @@ -2817,9 +6555,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create = Requirement( uid=None, description=( '[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists', @@ -2833,9 +6571,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists = Requirement( 'to skip raising an exception if a settings profile with the same **name** already exists.\n' 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' 'a settings profile with the same **name** already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Replace = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Replace', @@ -2847,9 +6585,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Replace = Requirement( description=( '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement\n' 'to replace existing settings profile if it already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables', @@ -2861,9 +6599,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Variables = Requirement( description=( '[ClickHouse] SHALL support assigning values and constraints to one or more\n' 'variables in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value', @@ -2874,9 +6612,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value = Requirement( uid=None, description=( '[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints', @@ -2888,9 +6626,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints = Requirement( description=( '[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' 'constraints for the variables in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment', @@ -2902,9 +6640,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment = Requirement( description=( '[ClickHouse] SHALL support assigning settings profile to one or more users\n' 'or roles in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None', @@ -2916,9 +6654,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support assigning settings profile to no users or roles using\n' '`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All', @@ -2930,9 +6668,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support assigning settings profile to all current users and roles\n' 'using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept', @@ -2944,9 +6682,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept = Requirement( description=( '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' 'the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit', @@ -2958,9 +6696,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit = Requirement( description=( '[ClickHouse] SHALL support inheriting profile settings from indicated profile using\n' 'the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster', @@ -2972,9 +6710,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying what cluster to create settings profile on\n' 'using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Create_Syntax = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax', @@ -2992,9 +6730,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Create_Syntax = Requirement( " [SET varname [= value] [MIN min] [MAX max] [READONLY|WRITABLE] | [INHERIT 'profile_name'] [,...]]\n" ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter', @@ -3005,9 +6743,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists', @@ -3021,9 +6759,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists = Requirement( 'to not raise exception if a settings profile does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a settings profile does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename', @@ -3035,9 +6773,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename = Requirement( description=( '[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause\n' 'in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables', @@ -3049,9 +6787,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables = Requirement( description=( '[ClickHouse] SHALL support altering values and constraints of one or more\n' 'variables in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value', @@ -3062,9 +6800,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints', @@ -3076,9 +6814,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints = Requirement( description=( '[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' 'constraints for the variables in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment', @@ -3090,9 +6828,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment = Requirement( description=( '[ClickHouse] SHALL support reassigning settings profile to one or more users\n' 'or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None', @@ -3104,9 +6842,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support reassigning settings profile to no users or roles using the\n' '`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All', @@ -3118,9 +6856,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support reassigning settings profile to all current users and roles\n' 'using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept', @@ -3132,9 +6870,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept = Requirement( description=( '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' 'the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit', @@ -3146,9 +6884,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit = Requirement( description=( '[ClickHouse] SHALL support altering the settings profile by inheriting settings from\n' 'specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster', @@ -3160,9 +6898,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster = Requirement( description=( '[ClickHouse] SHALL support altering the settings profile on a specified cluster using\n' '`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Alter_Syntax = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax', @@ -3181,9 +6919,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Alter_Syntax = Requirement( " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]\n" ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]}\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Drop = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Drop', @@ -3194,9 +6932,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Drop = Requirement( uid=None, description=( '[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists', @@ -3210,9 +6948,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists = Requirement( 'to skip raising an exception if the settings profile does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if a settings profile does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster', @@ -3224,9 +6962,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster = Requirement( description=( '[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using\n' '`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_Drop_Syntax = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax', @@ -3241,9 +6979,9 @@ RQ_SRS_006_RBAC_SettingsProfile_Drop_Syntax = Requirement( '``` sql\n' 'DROP SETTINGS PROFILE [IF EXISTS] name [,name,...]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile = Requirement( name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile', @@ -3259,9 +6997,9 @@ RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile = Requirement( '``` sql\n' 'SHOW CREATE SETTINGS PROFILE name\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create = Requirement( name='RQ.SRS-006.RBAC.Quota.Create', @@ -3272,9 +7010,9 @@ RQ_SRS_006_RBAC_Quota_Create = Requirement( uid=None, description=( '[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_IfNotExists = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.IfNotExists', @@ -3288,9 +7026,9 @@ RQ_SRS_006_RBAC_Quota_Create_IfNotExists = Requirement( 'to skip raising an exception if a quota with the same **name** already exists.\n' 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' 'a quota with the same **name** already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Replace = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Replace', @@ -3302,9 +7040,9 @@ RQ_SRS_006_RBAC_Quota_Create_Replace = Requirement( description=( '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement\n' 'to replace existing quota if it already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Cluster = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Cluster', @@ -3316,9 +7054,9 @@ RQ_SRS_006_RBAC_Quota_Create_Cluster = Requirement( description=( '[ClickHouse] SHALL support creating quotas on a specific cluster with the\n' '`ON CLUSTER` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Interval = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Interval', @@ -3337,9 +7075,10 @@ RQ_SRS_006_RBAC_Quota_Create_Interval = Requirement( '\n' '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' 'to define the interval.\n' + '\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized', @@ -3358,9 +7097,9 @@ RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized = Requirement( '\n' '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' 'real number to define the interval.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Queries = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Queries', @@ -3372,9 +7111,9 @@ RQ_SRS_006_RBAC_Quota_Create_Queries = Requirement( description=( '[ClickHouse] SHALL support limiting number of requests over a period of time\n' 'using the `QUERIES` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Errors = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Errors', @@ -3386,9 +7125,9 @@ RQ_SRS_006_RBAC_Quota_Create_Errors = Requirement( description=( '[ClickHouse] SHALL support limiting number of queries that threw an exception\n' 'using the `ERRORS` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_ResultRows = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.ResultRows', @@ -3400,9 +7139,9 @@ RQ_SRS_006_RBAC_Quota_Create_ResultRows = Requirement( description=( '[ClickHouse] SHALL support limiting the total number of rows given as the result\n' 'using the `RESULT ROWS` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_ReadRows = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.ReadRows', @@ -3415,9 +7154,9 @@ RQ_SRS_006_RBAC_Quota_Create_ReadRows = Requirement( '[ClickHouse] SHALL support limiting the total number of source rows read from tables\n' 'for running the query on all remote servers\n' 'using the `READ ROWS` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_ResultBytes = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.ResultBytes', @@ -3429,9 +7168,9 @@ RQ_SRS_006_RBAC_Quota_Create_ResultBytes = Requirement( description=( '[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result\n' 'using the `RESULT BYTES` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_ReadBytes = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.ReadBytes', @@ -3444,9 +7183,9 @@ RQ_SRS_006_RBAC_Quota_Create_ReadBytes = Requirement( '[ClickHouse] SHALL support limiting the total number of source bytes read from tables\n' 'for running the query on all remote servers\n' 'using the `READ BYTES` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_ExecutionTime = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.ExecutionTime', @@ -3458,9 +7197,9 @@ RQ_SRS_006_RBAC_Quota_Create_ExecutionTime = Requirement( description=( '[ClickHouse] SHALL support limiting the maximum query execution time\n' 'using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_NoLimits = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.NoLimits', @@ -3472,9 +7211,9 @@ RQ_SRS_006_RBAC_Quota_Create_NoLimits = Requirement( description=( '[ClickHouse] SHALL support limiting the maximum query execution time\n' 'using the `NO LIMITS` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_TrackingOnly = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.TrackingOnly', @@ -3486,9 +7225,9 @@ RQ_SRS_006_RBAC_Quota_Create_TrackingOnly = Requirement( description=( '[ClickHouse] SHALL support limiting the maximum query execution time\n' 'using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_KeyedBy = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.KeyedBy', @@ -3500,9 +7239,9 @@ RQ_SRS_006_RBAC_Quota_Create_KeyedBy = Requirement( description=( '[ClickHouse] SHALL support to track quota for some key\n' 'following the `KEYED BY` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions', @@ -3517,9 +7256,9 @@ RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions = Requirement( '\n' "'parameter' can be one of:\n" "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Assignment = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Assignment', @@ -3531,9 +7270,9 @@ RQ_SRS_006_RBAC_Quota_Create_Assignment = Requirement( description=( '[ClickHouse] SHALL support assigning quota to one or more users\n' 'or roles using the `TO` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Assignment.None', @@ -3545,9 +7284,9 @@ RQ_SRS_006_RBAC_Quota_Create_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support assigning quota to no users or roles using\n' '`TO NONE` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Assignment.All', @@ -3559,9 +7298,9 @@ RQ_SRS_006_RBAC_Quota_Create_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support assigning quota to all current users and roles\n' 'using `TO ALL` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Assignment_Except = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Assignment.Except', @@ -3573,9 +7312,9 @@ RQ_SRS_006_RBAC_Quota_Create_Assignment_Except = Requirement( description=( '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' 'the `EXCEPT` clause in the `CREATE QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Create_Syntax = Requirement( name='RQ.SRS-006.RBAC.Quota.Create.Syntax', @@ -3595,9 +7334,9 @@ RQ_SRS_006_RBAC_Quota_Create_Syntax = Requirement( ' NO LIMITS | TRACKING ONLY} [,...]]\n' ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter', @@ -3608,9 +7347,9 @@ RQ_SRS_006_RBAC_Quota_Alter = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_IfExists = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.IfExists', @@ -3624,9 +7363,9 @@ RQ_SRS_006_RBAC_Quota_Alter_IfExists = Requirement( 'to skip raising an exception if a quota does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' 'a quota does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Rename = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Rename', @@ -3638,9 +7377,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Rename = Requirement( description=( '[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement\n' 'to rename the quota to the specified name.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Cluster = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Cluster', @@ -3652,9 +7391,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Cluster = Requirement( description=( '[ClickHouse] SHALL support altering quotas on a specific cluster with the\n' '`ON CLUSTER` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Interval = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Interval', @@ -3673,9 +7412,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Interval = Requirement( '\n' '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' 'to define the interval.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized', @@ -3694,9 +7433,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized = Requirement( '\n' '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' 'real number to define the interval.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Queries = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Queries', @@ -3708,9 +7447,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Queries = Requirement( description=( '[ClickHouse] SHALL support altering the limit of number of requests over a period of time\n' 'using the `QUERIES` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Errors = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Errors', @@ -3722,9 +7461,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Errors = Requirement( description=( '[ClickHouse] SHALL support altering the limit of number of queries that threw an exception\n' 'using the `ERRORS` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_ResultRows = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.ResultRows', @@ -3736,9 +7475,9 @@ RQ_SRS_006_RBAC_Quota_Alter_ResultRows = Requirement( description=( '[ClickHouse] SHALL support altering the limit of the total number of rows given as the result\n' 'using the `RESULT ROWS` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_ReadRows = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.ReadRows', @@ -3751,9 +7490,9 @@ RQ_SRS_006_RBAC_Quota_Alter_ReadRows = Requirement( '[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables\n' 'for running the query on all remote servers\n' 'using the `READ ROWS` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ALter_ResultBytes = Requirement( name='RQ.SRS-006.RBAC.Quota.ALter.ResultBytes', @@ -3765,9 +7504,9 @@ RQ_SRS_006_RBAC_Quota_ALter_ResultBytes = Requirement( description=( '[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result\n' 'using the `RESULT BYTES` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_ReadBytes = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.ReadBytes', @@ -3780,9 +7519,9 @@ RQ_SRS_006_RBAC_Quota_Alter_ReadBytes = Requirement( '[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables\n' 'for running the query on all remote servers\n' 'using the `READ BYTES` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime', @@ -3794,9 +7533,9 @@ RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime = Requirement( description=( '[ClickHouse] SHALL support altering the limit of the maximum query execution time\n' 'using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_NoLimits = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.NoLimits', @@ -3808,9 +7547,9 @@ RQ_SRS_006_RBAC_Quota_Alter_NoLimits = Requirement( description=( '[ClickHouse] SHALL support limiting the maximum query execution time\n' 'using the `NO LIMITS` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly', @@ -3822,9 +7561,9 @@ RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly = Requirement( description=( '[ClickHouse] SHALL support limiting the maximum query execution time\n' 'using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_KeyedBy = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.KeyedBy', @@ -3836,9 +7575,9 @@ RQ_SRS_006_RBAC_Quota_Alter_KeyedBy = Requirement( description=( '[ClickHouse] SHALL support altering quota to track quota separately for some key\n' 'following the `KEYED BY` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions', @@ -3853,9 +7592,9 @@ RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions = Requirement( '\n' "'parameter' can be one of:\n" "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Assignment = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Assignment', @@ -3867,9 +7606,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Assignment = Requirement( description=( '[ClickHouse] SHALL support reassigning quota to one or more users\n' 'or roles using the `TO` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.None', @@ -3881,9 +7620,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support reassigning quota to no users or roles using\n' '`TO NONE` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.All', @@ -3895,9 +7634,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support reassigning quota to all current users and roles\n' 'using `TO ALL` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except', @@ -3909,9 +7648,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except = Requirement( description=( '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' 'the `EXCEPT` clause in the `ALTER QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Alter_Syntax = Requirement( name='RQ.SRS-006.RBAC.Quota.Alter.Syntax', @@ -3929,9 +7668,9 @@ RQ_SRS_006_RBAC_Quota_Alter_Syntax = Requirement( ' [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY]\n' ' [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Drop = Requirement( name='RQ.SRS-006.RBAC.Quota.Drop', @@ -3942,9 +7681,9 @@ RQ_SRS_006_RBAC_Quota_Drop = Requirement( uid=None, description=( '[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Drop_IfExists = Requirement( name='RQ.SRS-006.RBAC.Quota.Drop.IfExists', @@ -3958,9 +7697,9 @@ RQ_SRS_006_RBAC_Quota_Drop_IfExists = Requirement( 'to skip raising an exception when the quota does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if the quota does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Drop_Cluster = Requirement( name='RQ.SRS-006.RBAC.Quota.Drop.Cluster', @@ -3972,9 +7711,9 @@ RQ_SRS_006_RBAC_Quota_Drop_Cluster = Requirement( description=( '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement\n' 'to indicate the cluster the quota to be dropped is located on.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_Drop_Syntax = Requirement( name='RQ.SRS-006.RBAC.Quota.Drop.Syntax', @@ -3989,9 +7728,9 @@ RQ_SRS_006_RBAC_Quota_Drop_Syntax = Requirement( '``` sql\n' 'DROP QUOTA [IF EXISTS] name [,name...]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowQuotas = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowQuotas', @@ -4003,9 +7742,9 @@ RQ_SRS_006_RBAC_Quota_ShowQuotas = Requirement( description=( '[ClickHouse] SHALL support showing all of the current quotas\n' 'using the `SHOW QUOTAS` statement with the following syntax\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile', @@ -4016,9 +7755,9 @@ RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile = Requirement( uid=None, description=( '[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowQuotas_Format = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Format', @@ -4032,9 +7771,9 @@ RQ_SRS_006_RBAC_Quota_ShowQuotas_Format = Requirement( '\n' 'The types of valid formats are many, listed in output column:\n' 'https://clickhouse.tech/docs/en/interfaces/formats/\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowQuotas_Settings = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings', @@ -4045,9 +7784,10 @@ RQ_SRS_006_RBAC_Quota_ShowQuotas_Settings = Requirement( uid=None, description=( '[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas.\n' + '\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowQuotas_Syntax = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax', @@ -4063,8 +7803,7 @@ RQ_SRS_006_RBAC_Quota_ShowQuotas_Syntax = Requirement( 'SHOW QUOTAS\n' '```\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name', @@ -4080,9 +7819,9 @@ RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name = Requirement( '``` sql\n' 'SHOW CREATE QUOTA name\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current', @@ -4095,9 +7834,9 @@ RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current = Requirement( '[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota\n' 'using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form\n' '`SHOW CREATE QUOTA`\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Syntax = Requirement( name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax', @@ -4113,9 +7852,9 @@ RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Syntax = Requirement( '```sql\n' 'SHOW CREATE QUOTA [name | CURRENT]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create', @@ -4126,9 +7865,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create = Requirement( uid=None, description=( '[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists', @@ -4142,9 +7881,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists = Requirement( 'to skip raising an exception if a row policy with the same **name** already exists.\n' 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' 'a row policy with the same **name** already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Replace = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Replace', @@ -4156,9 +7895,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Replace = Requirement( description=( '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement\n' 'to replace existing row policy if it already exists.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster', @@ -4170,9 +7909,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying cluster on which to create the role policy\n' 'using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_On = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.On', @@ -4184,9 +7923,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_On = Requirement( description=( '[ClickHouse] SHALL support specifying table on which to create the role policy\n' 'using the `ON` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Access = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Access', @@ -4198,9 +7937,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Access = Requirement( description=( '[ClickHouse] SHALL support allowing or restricting access to rows using the\n' '`AS` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive', @@ -4212,9 +7951,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive = Requirement( description=( '[ClickHouse] SHALL support allowing access to rows using the\n' '`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive', @@ -4226,9 +7965,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive = Requirement( description=( '[ClickHouse] SHALL support restricting access to rows using the\n' '`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect', @@ -4241,9 +7980,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect = Requirement( '[ClickHouse] SHALL support specifying which rows are affected\n' 'using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement.\n' 'REQUIRES CONFIRMATION\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Condition = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Condition', @@ -4256,9 +7995,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Condition = Requirement( '[ClickHouse] SHALL support specifying a condition that\n' 'that can be any SQL expression which returns a boolean using the `USING`\n' 'clause in the `CREATE ROW POLOCY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment', @@ -4270,9 +8009,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment = Requirement( description=( '[ClickHouse] SHALL support assigning row policy to one or more users\n' 'or roles using the `TO` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None', @@ -4284,9 +8023,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support assigning row policy to no users or roles using\n' 'the `TO NONE` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All', @@ -4298,9 +8037,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support assigning row policy to all current users and roles\n' 'using `TO ALL` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept', @@ -4312,9 +8051,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept = Requirement( description=( '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' 'the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Create_Syntax = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Create.Syntax', @@ -4333,9 +8072,9 @@ RQ_SRS_006_RBAC_RowPolicy_Create_Syntax = Requirement( ' [USING condition]\n' ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter', @@ -4346,9 +8085,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter = Requirement( uid=None, description=( '[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists', @@ -4362,9 +8101,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists = Requirement( 'to skip raising an exception if a row policy does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' 'a row policy does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect', @@ -4377,9 +8116,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect = Requirement( '[ClickHouse] SHALL support modifying rows on which to apply the row policy\n' 'using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement.\n' 'REQUIRES FUNCTION CONFIRMATION.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster', @@ -4391,9 +8130,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster = Requirement( description=( '[ClickHouse] SHALL support specifying cluster on which to alter the row policy\n' 'using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_On = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.On', @@ -4405,9 +8144,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_On = Requirement( description=( '[ClickHouse] SHALL support specifying table on which to alter the row policy\n' 'using the `ON` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Rename = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Rename', @@ -4419,9 +8158,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Rename = Requirement( description=( '[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause\n' 'in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Access = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access', @@ -4433,9 +8172,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Access = Requirement( description=( '[ClickHouse] SHALL support altering access to rows using the\n' '`AS` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive', @@ -4447,9 +8186,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive = Requirement( description=( '[ClickHouse] SHALL support permitting access to rows using the\n' '`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive', @@ -4461,9 +8200,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive = Requirement( description=( '[ClickHouse] SHALL support restricting access to rows using the\n' '`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Condition = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition', @@ -4475,9 +8214,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Condition = Requirement( description=( '[ClickHouse] SHALL support re-specifying the row policy condition\n' 'using the `USING` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None', @@ -4489,9 +8228,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None = Requirement( description=( '[ClickHouse] SHALL support removing the row policy condition\n' 'using the `USING NONE` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment', @@ -4503,9 +8242,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment = Requirement( description=( '[ClickHouse] SHALL support reassigning row policy to one or more users\n' 'or roles using the `TO` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None', @@ -4517,9 +8256,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None = Requirement( description=( '[ClickHouse] SHALL support reassigning row policy to no users or roles using\n' 'the `TO NONE` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All', @@ -4531,9 +8270,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All = Requirement( description=( '[ClickHouse] SHALL support reassigning row policy to all current users and roles\n' 'using the `TO ALL` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept', @@ -4545,9 +8284,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept = Requirement( description=( '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' 'the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Alter_Syntax = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax', @@ -4567,9 +8306,9 @@ RQ_SRS_006_RBAC_RowPolicy_Alter_Syntax = Requirement( ' [USING {condition | NONE}][,...]\n' ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Drop = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Drop', @@ -4580,9 +8319,9 @@ RQ_SRS_006_RBAC_RowPolicy_Drop = Requirement( uid=None, description=( '[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists', @@ -4596,9 +8335,9 @@ RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists = Requirement( 'to skip raising an exception when the row policy does not exist.\n' 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' 'raised if the row policy does not exist.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Drop_On = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Drop.On', @@ -4610,9 +8349,9 @@ RQ_SRS_006_RBAC_RowPolicy_Drop_On = Requirement( description=( '[ClickHouse] SHALL support removing row policy from one or more specified tables\n' 'using the `ON` clause in the `DROP ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster', @@ -4624,9 +8363,9 @@ RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster = Requirement( description=( '[ClickHouse] SHALL support removing row policy from specified cluster\n' 'using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_Drop_Syntax = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax', @@ -4641,9 +8380,9 @@ RQ_SRS_006_RBAC_RowPolicy_Drop_Syntax = Requirement( '``` sql\n' 'DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy', @@ -4655,9 +8394,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy = Requirement( description=( '[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy\n' 'using the `SHOW CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On', @@ -4669,9 +8408,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On = Requirement( description=( '[ClickHouse] SHALL support showing statement used to create row policy on specific table\n' 'using the `ON` in the `SHOW CREATE ROW POLICY` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_Syntax = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax', @@ -4686,9 +8425,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_Syntax = Requirement( '``` sql\n' 'SHOW CREATE [ROW] POLICY name ON [database.]table\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies', @@ -4699,9 +8438,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies = Requirement( uid=None, description=( '[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On', @@ -4713,9 +8452,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On = Requirement( description=( '[ClickHouse] SHALL support showing row policies on a specific table\n' 'using the `ON` clause in the `SHOW ROW POLICIES` statement.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_Syntax = Requirement( name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax', @@ -4730,9 +8469,9 @@ RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_Syntax = Requirement( '```sql\n' 'SHOW [ROW] POLICIES [ON [database.]table]\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_PublicTables = Requirement( name='RQ.SRS-006.RBAC.Table.PublicTables', @@ -4748,9 +8487,9 @@ RQ_SRS_006_RBAC_Table_PublicTables = Requirement( '* system.numbers\n' '* system.contributors\n' '* system.functions\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_ShowTables = Requirement( name='RQ.SRS-006.RBAC.Table.ShowTables', @@ -4762,9 +8501,9 @@ RQ_SRS_006_RBAC_Table_ShowTables = Requirement( description=( '[ClickHouse] SHALL add a table to the list of tables accessible through `SHOW TABLES` by a user if and only if\n' 'that user has any privilege on that table, either directly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_Create = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.Create', @@ -4776,9 +8515,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_Create = Requirement( description=( '[ClickHouse] SHALL successfully `CREATE` a distributed table if and only if\n' 'the user has **create table** privilege on the table and **remote** privilege on *.*.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_Select = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.Select', @@ -4792,9 +8531,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_Select = Requirement( 'the user has **select** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table.\n' '\n' 'Does not require **select** privilege for the remote table if the remote table does not exist on the same server as the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_Insert = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.Insert', @@ -4809,9 +8548,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_Insert = Requirement( '\n' 'Does not require **insert** privilege for the remote table if the remote table does not exist on the same server as the user,\n' 'insert executes into the remote table on a different server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_SpecialTables = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.SpecialTables', @@ -4827,9 +8566,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_SpecialTables = Requirement( '* materialized view\n' '* distributed table\n' '* source table of a materialized view\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_LocalUser = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.LocalUser', @@ -4841,9 +8580,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_LocalUser = Requirement( description=( '[ClickHouse] SHALL successfully execute a query using a distributed table from\n' 'a user present locally, but not remotely.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Table_DistributedTable_SameUserDifferentNodesDifferentPrivileges = Requirement( name='RQ.SRS-006.RBAC.Table.DistributedTable.SameUserDifferentNodesDifferentPrivileges', @@ -4855,9 +8594,9 @@ RQ_SRS_006_RBAC_Table_DistributedTable_SameUserDifferentNodesDifferentPrivileges description=( '[ClickHouse] SHALL successfully execute a query using a distributed table by a user that exists on multiple nodes\n' 'if and only if the user has the required privileges on the node the query is being executed from.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_View = Requirement( name='RQ.SRS-006.RBAC.View', @@ -4869,9 +8608,9 @@ RQ_SRS_006_RBAC_View = Requirement( description=( '[ClickHouse] SHALL support controlling access to **create**, **select** and **drop**\n' 'privileges for a view for users or roles.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_View_Create = Requirement( name='RQ.SRS-006.RBAC.View.Create', @@ -4895,9 +8634,9 @@ RQ_SRS_006_RBAC_View_Create = Requirement( 'CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' 'CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_View_Select = Requirement( name='RQ.SRS-006.RBAC.View.Select', @@ -4923,9 +8662,9 @@ RQ_SRS_006_RBAC_View_Select = Requirement( '\n' 'SELECT * FROM view\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_View_Drop = Requirement( name='RQ.SRS-006.RBAC.View.Drop', @@ -4937,9 +8676,9 @@ RQ_SRS_006_RBAC_View_Drop = Requirement( description=( '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' 'the user has **drop view** privilege on that view either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView = Requirement( name='RQ.SRS-006.RBAC.MaterializedView', @@ -4951,9 +8690,9 @@ RQ_SRS_006_RBAC_MaterializedView = Requirement( description=( '[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n' 'privileges for a materialized view for users or roles.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Create = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Create', @@ -4991,9 +8730,9 @@ RQ_SRS_006_RBAC_MaterializedView_Create = Requirement( '```sql\n' 'CREATE MATERIALIZED VIEW view TO target_table AS SELECT * FROM source_table\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Select = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Select', @@ -5019,9 +8758,9 @@ RQ_SRS_006_RBAC_MaterializedView_Select = Requirement( '\n' 'SELECT * FROM view\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable', @@ -5033,9 +8772,9 @@ RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable = Requirement( description=( '[ClickHouse] SHALL only successfully `SELECT` from the target table, implicit or explicit, of a materialized view if and only if\n' 'the user has `SELECT` privilege for the table, either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Select_SourceTable = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable', @@ -5047,9 +8786,9 @@ RQ_SRS_006_RBAC_MaterializedView_Select_SourceTable = Requirement( description=( '[ClickHouse] SHALL only successfully `SELECT` from the source table of a materialized view if and only if\n' 'the user has `SELECT` privilege for the table, either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Drop = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Drop', @@ -5061,9 +8800,9 @@ RQ_SRS_006_RBAC_MaterializedView_Drop = Requirement( description=( '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' 'the user has **drop view** privilege on that view either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_ModifyQuery = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.ModifyQuery', @@ -5082,9 +8821,9 @@ RQ_SRS_006_RBAC_MaterializedView_ModifyQuery = Requirement( '```sql\n' 'ALTER TABLE view MODIFY QUERY SELECT * FROM source_table\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Insert = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Insert', @@ -5096,9 +8835,9 @@ RQ_SRS_006_RBAC_MaterializedView_Insert = Requirement( description=( '[ClickHouse] SHALL only succesfully `INSERT` into a materialized view if and only if\n' 'the user has `INSERT` privilege on the view, either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Insert_SourceTable = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable', @@ -5110,9 +8849,9 @@ RQ_SRS_006_RBAC_MaterializedView_Insert_SourceTable = Requirement( description=( '[ClickHouse] SHALL only succesfully `INSERT` into a source table of a materialized view if and only if\n' 'the user has `INSERT` privilege on the source table, either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_MaterializedView_Insert_TargetTable = Requirement( name='RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable', @@ -5124,9 +8863,9 @@ RQ_SRS_006_RBAC_MaterializedView_Insert_TargetTable = Requirement( description=( '[ClickHouse] SHALL only succesfully `INSERT` into a target table of a materialized view if and only if\n' 'the user has `INSERT` privelege on the target table, either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_LiveView = Requirement( name='RQ.SRS-006.RBAC.LiveView', @@ -5138,9 +8877,9 @@ RQ_SRS_006_RBAC_LiveView = Requirement( description=( '[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n' 'privileges for a live view for users or roles.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_LiveView_Create = Requirement( name='RQ.SRS-006.RBAC.LiveView.Create', @@ -5164,9 +8903,9 @@ RQ_SRS_006_RBAC_LiveView_Create = Requirement( 'CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' 'CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_LiveView_Select = Requirement( name='RQ.SRS-006.RBAC.LiveView.Select', @@ -5192,9 +8931,9 @@ RQ_SRS_006_RBAC_LiveView_Select = Requirement( '\n' 'SELECT * FROM view\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_LiveView_Drop = Requirement( name='RQ.SRS-006.RBAC.LiveView.Drop', @@ -5206,9 +8945,9 @@ RQ_SRS_006_RBAC_LiveView_Drop = Requirement( description=( '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' 'the user has **drop view** privilege on that view either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_LiveView_Refresh = Requirement( name='RQ.SRS-006.RBAC.LiveView.Refresh', @@ -5220,9 +8959,9 @@ RQ_SRS_006_RBAC_LiveView_Refresh = Requirement( description=( '[ClickHouse] SHALL only successfully execute an `ALTER LIVE VIEW REFRESH` command if and only if\n' 'the user has **refresh** privilege on that view either explicitly or through a role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Usage = Requirement( name='RQ.SRS-006.RBAC.Privileges.Usage', @@ -5234,9 +8973,9 @@ RQ_SRS_006_RBAC_Privileges_Usage = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **usage** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select', @@ -5251,9 +8990,9 @@ RQ_SRS_006_RBAC_Privileges_Select = Requirement( 'Any `SELECT INTO` statements SHALL not to be executed, unless the user\n' 'has the **select** privilege for the destination table\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.Grant', @@ -5265,9 +9004,9 @@ RQ_SRS_006_RBAC_Privileges_Select_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **select** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.Revoke', @@ -5279,9 +9018,9 @@ RQ_SRS_006_RBAC_Privileges_Select_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **select** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_Column = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.Column', @@ -5296,9 +9035,9 @@ RQ_SRS_006_RBAC_Privileges_Select_Column = Requirement( 'Any `SELECT INTO` statements SHALL not to be executed, unless the user\n' 'has the **select** privilege for the destination column\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.Cluster', @@ -5312,9 +9051,9 @@ RQ_SRS_006_RBAC_Privileges_Select_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `SELECT INTO` statements SHALL succeed only on nodes where\n' 'the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.GrantOption', @@ -5329,9 +9068,9 @@ RQ_SRS_006_RBAC_Privileges_Select_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **select** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Grant', @@ -5346,9 +9085,9 @@ RQ_SRS_006_RBAC_Privileges_Select_GrantOption_Grant = Requirement( 'or smaller scope that they have access to. Any `SELECT INTO` statements SHALL succeed\n' 'when done by a user with privilege granted by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.GrantOption.Revoke', @@ -5363,9 +9102,9 @@ RQ_SRS_006_RBAC_Privileges_Select_GrantOption_Revoke = Requirement( 'or smaller scope that they have access to. Any `SELECT INTO` statements SHALL fail\n' 'when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Select_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.Select.TableEngines', @@ -5392,9 +9131,9 @@ RQ_SRS_006_RBAC_Privileges_Select_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert', @@ -5409,9 +9148,9 @@ RQ_SRS_006_RBAC_Privileges_Insert = Requirement( 'Any `INSERT INTO` statements SHALL not to be executed, unless the user\n' 'has the **insert** privilege for the destination table\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.Grant', @@ -5423,9 +9162,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **insert** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.Revoke', @@ -5437,9 +9176,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **insert** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_Column = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.Column', @@ -5454,9 +9193,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_Column = Requirement( 'Any `INSERT INTO` statements SHALL not to be executed, unless the user\n' 'has the **insert** privilege for the destination column\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.Cluster', @@ -5470,9 +9209,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `INSERT INTO` statements SHALL succeed only on nodes where\n' 'the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.GrantOption', @@ -5487,9 +9226,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **insert** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Grant', @@ -5504,9 +9243,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_GrantOption_Grant = Requirement( 'or smaller scope that they have access to. Any `INSERT INTO` statements SHALL succeed\n' 'when done by a user with privilege granted by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.GrantOption.Revoke', @@ -5521,9 +9260,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_GrantOption_Revoke = Requirement( 'or smaller scope that they have access to. Any `INSERT INTO` statements SHALL fail\n' 'when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Insert_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.Insert.TableEngines', @@ -5550,9 +9289,9 @@ RQ_SRS_006_RBAC_Privileges_Insert_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn', @@ -5568,9 +9307,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn = Requirement( 'return an error, unless the user has the **alter column** privilege for\n' 'the destination table either because of the explicit grant or through one of\n' 'the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant', @@ -5582,9 +9321,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **alter column** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke', @@ -5596,9 +9335,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **alter column** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_Column = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Column', @@ -5613,9 +9352,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_Column = Requirement( 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL return an error,\n' ' unless the user has the **alter column** privilege for the destination column\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster', @@ -5629,9 +9368,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN`\n' 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption', @@ -5646,9 +9385,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **alter column** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Grant', @@ -5664,9 +9403,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption_Grant = Requirement( 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL\n' 'succeed when done by a user with privilege granted by a user with\n' '`GRANT OPTION`, either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.GrantOption.Revoke', @@ -5681,9 +9420,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_GrantOption_Revoke = Requirement( 'or smaller scope that they have access to. Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL fail\n' 'when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterColumn_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines', @@ -5710,9 +9449,9 @@ RQ_SRS_006_RBAC_Privileges_AlterColumn_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex', @@ -5728,9 +9467,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex = Requirement( 'return an error, unless the user has the **alter index** privilege for\n' 'the destination table either because of the explicit grant or through one of\n' 'the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant', @@ -5742,9 +9481,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **alter index** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke', @@ -5756,9 +9495,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **alter index** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster', @@ -5772,9 +9511,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX`\n' 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption', @@ -5789,9 +9528,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **alter index** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Grant', @@ -5807,9 +9546,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption_Grant = Requirement( 'Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements SHALL\n' 'succeed when done by a user with privilege granted by a user with\n' '`GRANT OPTION`, either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.GrantOption.Revoke', @@ -5825,9 +9564,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_GrantOption_Revoke = Requirement( 'Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements\n' 'SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterIndex_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines', @@ -5854,9 +9593,9 @@ RQ_SRS_006_RBAC_Privileges_AlterIndex_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint', @@ -5872,9 +9611,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint = Requirement( 'return an error, unless the user has the **alter constraint** privilege for\n' 'the destination table either because of the explicit grant or through one of\n' 'the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant', @@ -5886,9 +9625,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **alter constraint** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke', @@ -5900,9 +9639,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **alter constraint** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster', @@ -5916,9 +9655,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `ALTER TABLE ... ADD|DROP CONSTRAINT`\n' 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption', @@ -5933,9 +9672,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **alter constraint** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Grant', @@ -5951,9 +9690,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption_Grant = Requirement( 'Any `ALTER TABLE ... ADD|DROP CONSTRAINT` statements SHALL\n' 'succeed when done by a user with privilege granted by a user with\n' '`GRANT OPTION`, either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.GrantOption.Revoke', @@ -5969,9 +9708,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_GrantOption_Revoke = Requirement( 'Any `ALTER TABLE ... ADD|DROP CONSTRAINT` statements\n' 'SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterConstraint_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines', @@ -5998,9 +9737,9 @@ RQ_SRS_006_RBAC_Privileges_AlterConstraint_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL', @@ -6016,9 +9755,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL = Requirement( 'return an error, unless the user has the **alter ttl** or **alter materialize ttl** privilege for\n' 'the destination table either because of the explicit grant or through one of\n' 'the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant', @@ -6030,9 +9769,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **alter ttl** or **alter materialize ttl** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke', @@ -6044,9 +9783,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **alter ttl** or **alter materialize ttl** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster', @@ -6060,9 +9799,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL`\n' 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption', @@ -6077,9 +9816,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **alter ttl** or **alter materialize ttl** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Grant', @@ -6095,9 +9834,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption_Grant = Requirement( 'Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements SHALL\n' 'succeed when done by a user with privilege granted by a user with\n' '`GRANT OPTION`, either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.GrantOption.Revoke', @@ -6113,9 +9852,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_GrantOption_Revoke = Requirement( 'Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements\n' 'SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterTTL_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines', @@ -6129,9 +9868,9 @@ RQ_SRS_006_RBAC_Privileges_AlterTTL_TableEngines = Requirement( 'on tables created using the following engines\n' '\n' '* MergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings', @@ -6148,9 +9887,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings = Requirement( 'the destination table either because of the explicit grant or through one of\n' 'the roles assigned to the user. The **alter settings** privilege allows\n' 'modifying table engine settings. It doesn’t affect settings or server configuration parameters.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant', @@ -6162,9 +9901,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_Grant = Requirement( description=( '[ClickHouse] SHALL support granting **alter settings** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke', @@ -6176,9 +9915,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_Revoke = Requirement( description=( '[ClickHouse] SHALL support revoking **alter settings** privilege\n' 'for a database or a specific table to one or more **users** or **roles**\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_Cluster = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster', @@ -6192,9 +9931,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_Cluster = Requirement( 'on a specified cluster to one or more **users** or **roles**.\n' 'Any `ALTER TABLE ... MODIFY SETTING setting`\n' 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption', @@ -6209,9 +9948,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption = Requirement( 'with a `GRANT OPTION` clause. User with **grant option** privilege SHALL be able to\n' 'change access to the **alter settings** privilege by another user or role\n' 'on the same or smaller scope that they have access to.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption_Grant = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Grant', @@ -6227,9 +9966,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption_Grant = Requirement( 'Any `ALTER TABLE ... MODIFY SETTING setting` statements SHALL\n' 'succeed when done by a user with privilege granted by a user with\n' '`GRANT OPTION`, either directly or through an assigned role.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption_Revoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.GrantOption.Revoke', @@ -6245,9 +9984,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_GrantOption_Revoke = Requirement( 'Any `ALTER TABLE ... MODIFY SETTING setting` statements\n' 'SHALL fail when done by a user with privilege revoke by a user with `GRANT OPTION`,\n' 'either directly or through an assigned role, unless they have access otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AlterSettings_TableEngines = Requirement( name='RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines', @@ -6274,9 +10013,9 @@ RQ_SRS_006_RBAC_Privileges_AlterSettings_TableEngines = Requirement( '* ReplicatedCollapsingMergeTree\n' '* ReplicatedVersionedCollapsingMergeTree\n' '* ReplicatedGraphiteMergeTree\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Delete = Requirement( name='RQ.SRS-006.RBAC.Privileges.Delete', @@ -6288,9 +10027,9 @@ RQ_SRS_006_RBAC_Privileges_Delete = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **delete** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Alter = Requirement( name='RQ.SRS-006.RBAC.Privileges.Alter', @@ -6302,9 +10041,9 @@ RQ_SRS_006_RBAC_Privileges_Alter = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **alter** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Create = Requirement( name='RQ.SRS-006.RBAC.Privileges.Create', @@ -6316,9 +10055,9 @@ RQ_SRS_006_RBAC_Privileges_Create = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **create** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_Drop = Requirement( name='RQ.SRS-006.RBAC.Privileges.Drop', @@ -6330,9 +10069,9 @@ RQ_SRS_006_RBAC_Privileges_Drop = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **drop** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_All = Requirement( name='RQ.SRS-006.RBAC.Privileges.All', @@ -6345,9 +10084,9 @@ RQ_SRS_006_RBAC_Privileges_All = Requirement( '[ClickHouse] SHALL include in the **all** privilege the same rights\n' 'as provided by **usage**, **select**, **select columns**,\n' '**insert**, **delete**, **alter**, **create**, and **drop** privileges.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_All_GrantRevoke = Requirement( name='RQ.SRS-006.RBAC.Privileges.All.GrantRevoke', @@ -6359,9 +10098,9 @@ RQ_SRS_006_RBAC_Privileges_All_GrantRevoke = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **all** privileges\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_GrantOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.GrantOption', @@ -6373,9 +10112,9 @@ RQ_SRS_006_RBAC_Privileges_GrantOption = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **grant option** privilege\n' 'for a database or a specific table to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_Privileges_AdminOption = Requirement( name='RQ.SRS-006.RBAC.Privileges.AdminOption', @@ -6387,9 +10126,9 @@ RQ_SRS_006_RBAC_Privileges_AdminOption = Requirement( description=( '[ClickHouse] SHALL support granting or revoking **admin option** privilege\n' 'to one or more **users** or **roles**.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Create = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Create', @@ -6402,9 +10141,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Create = Requirement( '[ClickHouse] SHALL not allow any `CREATE` statements\n' 'to be executed unless the user has the **create** privilege for the destination database\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Alter = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Alter', @@ -6417,9 +10156,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Alter = Requirement( '[ClickHouse] SHALL not allow any `ALTER` statements\n' 'to be executed unless the user has the **alter** privilege for the destination table\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Drop = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Drop', @@ -6432,9 +10171,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Drop = Requirement( '[ClickHouse] SHALL not allow any `DROP` statements\n' 'to be executed unless the user has the **drop** privilege for the destination database\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Drop_Table = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table', @@ -6447,9 +10186,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Drop_Table = Requirement( '[ClickHouse] SHALL not allow any `DROP TABLE` statements\n' 'to be executed unless the user has the **drop** privilege for the destination database or the table\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_GrantRevoke = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke', @@ -6463,9 +10202,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_GrantRevoke = Requirement( 'to be executed unless the user has the **grant option** privilege\n' 'for the privilege of the destination table\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Use = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Use', @@ -6479,9 +10218,9 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Use = Requirement( 'unless the user has at least one of the privileges for the database\n' 'or the table inside that database\n' 'either because of the explicit grant or through one of the roles assigned to the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_006_RBAC_RequiredPrivileges_Admin = Requirement( name='RQ.SRS-006.RBAC.RequiredPrivileges.Admin', @@ -6508,6 +10247,6 @@ RQ_SRS_006_RBAC_RequiredPrivileges_Admin = Requirement( '\n' 'to be executed unless the user has the **admin option** privilege\n' 'through one of the roles with **admin option** privilege assigned to the user.\n' + '\n' ), - link=None - ) + link=None) From 222a4d2e9b85b214351d9a6c3e6db70940fe0458 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 22 Oct 2020 01:15:24 +0300 Subject: [PATCH 176/432] Update AggregateFunctionRankCorrelation.h --- .../AggregateFunctionRankCorrelation.h | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h index 4ce4d7199dc..75592cf5c9b 100644 --- a/src/AggregateFunctions/AggregateFunctionRankCorrelation.h +++ b/src/AggregateFunctions/AggregateFunctionRankCorrelation.h @@ -134,18 +134,18 @@ public: const auto & value = this->data(place).values; size_t size = this->data(place).size_x; - //create a copy of values not to format data + // create a copy of values not to format data PODArrayWithStackMemory, 32> tmp_values; tmp_values.resize(size); for (size_t j = 0; j < size; ++ j) tmp_values[j] = static_cast>(value[j]); - //sort x_values + // sort x_values std::sort(std::begin(tmp_values), std::end(tmp_values), ComparePairFirst{}); for (size_t j = 0; j < size;) { - //replace x_values with their ranks + // replace x_values with their ranks size_t rank = j + 1; size_t same = 1; size_t cur_sum = rank; @@ -157,9 +157,9 @@ public: { // rank of (j + 1)th number rank += 1; - same++; + ++same; cur_sum += rank; - j++; + ++j; } else break; @@ -169,16 +169,16 @@ public: Float64 insert_rank = static_cast(cur_sum) / same; for (size_t i = cur_start; i <= j; ++i) tmp_values[i].first = insert_rank; - j++; + ++j; } - //sort y_values + // sort y_values std::sort(std::begin(tmp_values), std::end(tmp_values), ComparePairSecond{}); - //replace y_values with their ranks + // replace y_values with their ranks for (size_t j = 0; j < size;) { - //replace x_values with their ranks + // replace x_values with their ranks size_t rank = j + 1; size_t same = 1; size_t cur_sum = rank; @@ -190,9 +190,9 @@ public: { // rank of (j + 1)th number rank += 1; - same++; + ++same; cur_sum += rank; - j++; + ++j; } else { @@ -204,10 +204,10 @@ public: Float64 insert_rank = static_cast(cur_sum) / same; for (size_t i = cur_start; i <= j; ++i) tmp_values[i].second = insert_rank; - j++; + ++j; } - //count d^2 sum + // count d^2 sum Float64 answer = static_cast(0); for (size_t j = 0; j < size; ++ j) answer += (tmp_values[j].first - tmp_values[j].second) * (tmp_values[j].first - tmp_values[j].second); From 6f78243e328ad1da5246b79cfe0dc2294de6fc36 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 22 Oct 2020 01:20:09 +0300 Subject: [PATCH 177/432] Use std::nullptr_t over nullptr_t --- src/Functions/FunctionsAES.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Functions/FunctionsAES.h b/src/Functions/FunctionsAES.h index 667fd38fab9..6600931118e 100644 --- a/src/Functions/FunctionsAES.h +++ b/src/Functions/FunctionsAES.h @@ -301,14 +301,14 @@ private: { const auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r)); auto iv_value = StringRef{}; - if constexpr (!std::is_same_v>) + if constexpr (!std::is_same_v>) { iv_value = iv_column->getDataAt(r); } const auto input_value = input_column->getDataAt(r); auto aad_value = StringRef{}; - if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM && !std::is_same_v>) + if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM && !std::is_same_v>) { aad_value = aad_column->getDataAt(r); } @@ -348,7 +348,7 @@ private: onError("Failed to set key and IV"); // 1.a.2 Set AAD - if constexpr (!std::is_same_v>) + if constexpr (!std::is_same_v>) { const auto aad_data = aad_column->getDataAt(r); int tmp_len = 0; @@ -574,7 +574,7 @@ private: // 0: prepare key if required auto key_value = key_holder.setKey(key_size, key_column->getDataAt(r)); auto iv_value = StringRef{}; - if constexpr (!std::is_same_v>) + if constexpr (!std::is_same_v>) { iv_value = iv_column->getDataAt(r); } @@ -626,7 +626,7 @@ private: onError("Failed to set key and IV"); // 1.a.2: Set AAD if present - if constexpr (!std::is_same_v>) + if constexpr (!std::is_same_v>) { const auto aad_data = aad_column->getDataAt(r); int tmp_len = 0; From 691b28e98bbfa1349876fc04200b9e9f63787b33 Mon Sep 17 00:00:00 2001 From: Denis Glazachev Date: Thu, 22 Oct 2020 02:43:02 +0400 Subject: [PATCH 178/432] Add a log message after a storage is added --- src/Access/AccessControlManager.cpp | 4 ++++ src/Access/AccessControlManager.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index 56d225f64f4..0913717808a 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -137,6 +137,10 @@ AccessControlManager::AccessControlManager() AccessControlManager::~AccessControlManager() = default; +void AccessControlManager::addStorage(const StoragePtr & new_storage) { + MultipleAccessStorage::addStorage(new_storage); + LOG_DEBUG(getLogger(), "Added storage '{}' of type '{}'", new_storage->getStorageName(), String(new_storage->getStorageType())); +} void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config_) { diff --git a/src/Access/AccessControlManager.h b/src/Access/AccessControlManager.h index c960b330ee6..dd710614ece 100644 --- a/src/Access/AccessControlManager.h +++ b/src/Access/AccessControlManager.h @@ -149,6 +149,8 @@ public: const ExternalAuthenticators & getExternalAuthenticators() const; private: + void addStorage(const StoragePtr & new_storage); + class ContextAccessCache; class CustomSettingsPrefixes; From fb0c7e80aa2107c3bb1b6f6fdd1b522c29c39ce6 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Thu, 22 Oct 2020 03:22:00 +0300 Subject: [PATCH 179/432] Fixed flappy `test_background_move` test. --- tests/integration/test_multiple_disks/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 209b6539c52..1a50e12a3f6 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -493,6 +493,8 @@ def test_background_move(start_cluster, name, engine): SETTINGS storage_policy='moving_jbod_with_external' """.format(name=name, engine=engine)) + node1.query(f"SYSTEM START MERGES {name}") + for i in range(5): data = [] # 5MB in total for i in range(5): @@ -521,6 +523,8 @@ def test_background_move(start_cluster, name, engine): # first (oldest) part was moved to external assert path.startswith("/external") + node1.query(f"SYSTEM START MERGES {name}") + finally: node1.query(f"DROP TABLE IF EXISTS {name} SYNC") From f351b528512ceefd453048ff8ff3524b5c1f5e06 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Thu, 22 Oct 2020 07:03:17 +0300 Subject: [PATCH 180/432] Update AccessControlManager.cpp --- src/Access/AccessControlManager.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Access/AccessControlManager.cpp b/src/Access/AccessControlManager.cpp index 0913717808a..9bfc54fe69d 100644 --- a/src/Access/AccessControlManager.cpp +++ b/src/Access/AccessControlManager.cpp @@ -137,7 +137,8 @@ AccessControlManager::AccessControlManager() AccessControlManager::~AccessControlManager() = default; -void AccessControlManager::addStorage(const StoragePtr & new_storage) { +void AccessControlManager::addStorage(const StoragePtr & new_storage) +{ MultipleAccessStorage::addStorage(new_storage); LOG_DEBUG(getLogger(), "Added storage '{}' of type '{}'", new_storage->getStorageName(), String(new_storage->getStorageType())); } From 4a250ef9c60ff94ec571844dda85934ca0e8aa0e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 22 Oct 2020 09:58:20 +0300 Subject: [PATCH 181/432] Fix tests. --- src/Interpreters/ActionsVisitor.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index a1bde054923..63799d1ca5a 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -564,6 +564,11 @@ void ActionsMatcher::visit(const ASTPtr & ast, Data & data) visit(*literal, ast, data); else if (auto * expression_list = ast->as()) visit(*expression_list, ast, data); + else + { + for (auto & child : ast->children) + visit(child, data); + } } std::optional ActionsMatcher::getNameAndTypeFromAST(const ASTPtr & ast, Data & data) From 111b553ee559cceca35f71bbb15399cc6fc5063f Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Thu, 22 Oct 2020 09:37:03 +0200 Subject: [PATCH 182/432] Better --- programs/local/LocalServer.cpp | 86 +++++++++++++++++----------------- programs/local/LocalServer.h | 6 +-- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index bb9918d633f..77deb274c65 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -57,8 +57,8 @@ LocalServer::LocalServer() = default; LocalServer::~LocalServer() { - if (context) - context->shutdown(); /// required for properly exception handling + if (global_context) + global_context->shutdown(); /// required for properly exception handling } @@ -95,9 +95,9 @@ void LocalServer::initialize(Poco::Util::Application & self) } } -void LocalServer::applyCmdSettings() +void LocalServer::applyCmdSettings(Context & context) { - context->applySettingsChanges(cmd_settings.changes()); + context.applySettingsChanges(cmd_settings.changes()); } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -151,12 +151,12 @@ void LocalServer::tryInitPath() if (path.back() != '/') path += '/'; - context->setPath(path); + global_context->setPath(path); - context->setTemporaryStorage(path + "tmp"); - context->setFlagsPath(path + "flags"); + global_context->setTemporaryStorage(path + "tmp"); + global_context->setFlagsPath(path + "flags"); - context->setUserFilesPath(""); // user's files are everywhere + global_context->setUserFilesPath(""); // user's files are everywhere } @@ -190,9 +190,9 @@ try } shared_context = Context::createShared(); - context = std::make_unique(Context::createGlobal(shared_context.get())); - context->makeGlobalContext(); - context->setApplicationType(Context::ApplicationType::LOCAL); + global_context = std::make_unique(Context::createGlobal(shared_context.get())); + global_context->makeGlobalContext(); + global_context->setApplicationType(Context::ApplicationType::LOCAL); tryInitPath(); std::optional status; @@ -214,32 +214,32 @@ try /// Maybe useless if (config().has("macros")) - context->setMacros(std::make_unique(config(), "macros", log)); + global_context->setMacros(std::make_unique(config(), "macros", log)); /// Skip networking /// Sets external authenticators config (LDAP). - context->setExternalAuthenticatorsConfig(config()); + global_context->setExternalAuthenticatorsConfig(config()); setupUsers(); /// Limit on total number of concurrently executing queries. /// There is no need for concurrent queries, override max_concurrent_queries. - context->getProcessList().setMaxSize(0); + global_context->getProcessList().setMaxSize(0); /// Size of cache for uncompressed blocks. Zero means disabled. size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0); if (uncompressed_cache_size) - context->setUncompressedCache(uncompressed_cache_size); + global_context->setUncompressedCache(uncompressed_cache_size); /// Size of cache for marks (index of MergeTree family of tables). It is necessary. /// Specify default value for mark_cache_size explicitly! size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120); if (mark_cache_size) - context->setMarkCache(mark_cache_size); + global_context->setMarkCache(mark_cache_size); /// Load global settings from default_profile and system_profile. - context->setDefaultProfiles(config()); + global_context->setDefaultProfiles(config()); /** Init dummy default DB * NOTE: We force using isolated default database to avoid conflicts with default database from server environment @@ -247,34 +247,34 @@ try * if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons. */ std::string default_database = config().getString("default_database", "_local"); - DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *context)); - context->setCurrentDatabase(default_database); - applyCmdOptions(); + DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, *global_context)); + global_context->setCurrentDatabase(default_database); + applyCmdOptions(*global_context); - String path = context->getPath(); + String path = global_context->getPath(); if (!path.empty()) { /// Lock path directory before read - status.emplace(context->getPath() + "status", StatusFile::write_full_info); + status.emplace(global_context->getPath() + "status", StatusFile::write_full_info); LOG_DEBUG(log, "Loading metadata from {}", path); Poco::File(path + "data/").createDirectories(); Poco::File(path + "metadata/").createDirectories(); - loadMetadataSystem(*context); - attachSystemTables(*context); - loadMetadata(*context); + loadMetadataSystem(*global_context); + attachSystemTables(*global_context); + loadMetadata(*global_context); DatabaseCatalog::instance().loadDatabases(); LOG_DEBUG(log, "Loaded metadata."); } else { - attachSystemTables(*context); + attachSystemTables(*global_context); } processQueries(); - context->shutdown(); - context.reset(); + global_context->shutdown(); + global_context.reset(); status.reset(); cleanup(); @@ -327,7 +327,7 @@ void LocalServer::processQueries() String initial_create_query = getInitialCreateTableQuery(); String queries_str = initial_create_query + config().getRawString("query"); - const auto & settings = context->getSettingsRef(); + const auto & settings = global_context->getSettingsRef(); std::vector queries; auto parse_res = splitMultipartQuery(queries_str, queries, settings.max_query_size, settings.max_parser_depth); @@ -335,19 +335,19 @@ void LocalServer::processQueries() if (!parse_res.second) throw Exception("Cannot parse and execute the following part of query: " + String(parse_res.first), ErrorCodes::SYNTAX_ERROR); - /// we can't mutate global context (due to possible races), so we can't reuse it safely as a query context - /// so we need a copy here - auto query_context = Context(context.get()); + /// we can't mutate global global_context (can lead to races, as it was already passed to some background threads) + /// so we can't reuse it safely as a query global_context and need a copy here + auto context = Context(*global_context); - query_context->makeSessionContext(); - query_context->makeQueryContext(); + context.makeSessionContext(); + context.makeQueryContext(); - query_context->setUser("default", "", Poco::Net::SocketAddress{}); - query_context->setCurrentQueryId(""); - applyCmdSettings(); + context.setUser("default", "", Poco::Net::SocketAddress{}); + context.setCurrentQueryId(""); + applyCmdSettings(context); /// Use the same query_id (and thread group) for all queries - CurrentThread::QueryScope query_scope_holder(*query_context); + CurrentThread::QueryScope query_scope_holder(context); bool echo_queries = config().hasOption("echo") || config().hasOption("verbose"); std::exception_ptr exception; @@ -366,7 +366,7 @@ void LocalServer::processQueries() try { - executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, *query_context, {}); + executeQuery(read_buf, write_buf, /* allow_into_outfile = */ true, context, {}); } catch (...) { @@ -431,7 +431,7 @@ void LocalServer::setupUsers() } if (users_config) - context->setUsersConfig(users_config); + global_context->setUsersConfig(users_config); else throw Exception("Can't load config for users", ErrorCodes::CANNOT_LOAD_CONFIG); } @@ -585,10 +585,10 @@ void LocalServer::init(int argc, char ** argv) argsToConfig(arguments, config(), 100); } -void LocalServer::applyCmdOptions() +void LocalServer::applyCmdOptions(Context & context) { - context->setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); - applyCmdSettings(); + context.setDefaultFormat(config().getString("output-format", config().getString("format", "TSV"))); + applyCmdSettings(context); } } diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index a8908754369..02778bd86cb 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -36,15 +36,15 @@ private: std::string getInitialCreateTableQuery(); void tryInitPath(); - void applyCmdOptions(); - void applyCmdSettings(); + void applyCmdOptions(Context & context); + void applyCmdSettings(Context & context); void processQueries(); void setupUsers(); void cleanup(); protected: SharedContextHolder shared_context; - std::unique_ptr context; + std::unique_ptr global_context; /// Settings specified via command line args Settings cmd_settings; From fd48d1002914aa2127217cd9de9552d66dffc1f4 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Thu, 22 Oct 2020 10:37:51 +0300 Subject: [PATCH 183/432] Diagnostics (to be dropped). --- tests/integration/test_multiple_disks/test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 1a50e12a3f6..b0159d16501 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -459,6 +459,9 @@ def test_jbod_overflow(start_cluster, name, engine): node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + for p in ("/jbod1", "/jbod2", "/external"): + print(node1.exec_in_container([f"bash", "-c", f"find {p} | xargs -n1 du -sh"])) + used_disks = get_used_disks_for_table(node1, name) assert used_disks[-1] == 'external' From 48142df26c5ccf0054ffe2b560c359589d76ddce Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 22 Oct 2020 11:25:24 +0300 Subject: [PATCH 184/432] Fix tests. --- src/Interpreters/ActionsVisitor.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 63799d1ca5a..13f8be8a13b 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -567,7 +567,8 @@ void ActionsMatcher::visit(const ASTPtr & ast, Data & data) else { for (auto & child : ast->children) - visit(child, data); + if (needChildVisit(ast, child)) + visit(child, data); } } From 77f66e5a09396286ac8fa5e66ddc4398f27de54e Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Thu, 22 Oct 2020 11:02:14 +0200 Subject: [PATCH 185/432] Split test into 2 --- .../01527_clickhouse_local_optimize.reference | 8 -- .../01527_clickhouse_local_optimize.sh | 42 +--------- ...8_clickhouse_local_prepare_parts.reference | 19 +++++ .../01528_clickhouse_local_prepare_parts.sh | 83 +++++++++++++++++++ 4 files changed, 104 insertions(+), 48 deletions(-) create mode 100644 tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.reference create mode 100755 tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference index e7315547841..e69de29bb2d 100644 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.reference @@ -1,8 +0,0 @@ -1 2020-01-01 String -2 2020-02-02 Another string -3 2020-03-03 One more string -4 2020-01-02 String for first partition -1 2020-01-01 String -2 2020-02-02 Another string -3 2020-03-03 One more string -4 2020-01-02 String for first partition diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh index 13e8c847e71..bbbdf9c65d6 100755 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh @@ -4,48 +4,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh WORKING_FOLDER_01527="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" - rm -rf "${WORKING_FOLDER_01527}" -mkdir -p "${WORKING_FOLDER_01527}/metadata/local/" +mkdir -p "${WORKING_FOLDER_01527}" # OPTIMIZE was crashing due to lack of temporary volume in local ${CLICKHOUSE_LOCAL} --query "drop database if exists d; create database d; create table d.t engine MergeTree order by a as select 1 a; optimize table d.t final" -- --path="${WORKING_FOLDER_01527}" -# Some extra (unrealted) scenarios of clickhouse-local usage. - -## 1. Imagine we want to process this file: -cat < "${WORKING_FOLDER_01527}/data.csv" -1,2020-01-01,"String" -2,2020-02-02,"Another string" -3,2020-03-03,"One more string" -4,2020-01-02,"String for first partition" -EOF - -## 2. that is the metadata for the table we want to fill -## schema should match the schema of the table from server -## (the easiest way is just to copy it from the server) -cat < "${WORKING_FOLDER_01527}/metadata/local/test.sql" -ATTACH TABLE local.test (id UInt64, d Date, s String) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); -EOF - -## 3a. that is the metadata for the input file we want to read -## it should match the structure of source file -## use stdin to read from pipe -cat < "${WORKING_FOLDER_01527}/metadata/local/stdin.sql" -ATTACH TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin); -EOF - -## 3b. Instead of stdin you can use file path -cat < "${WORKING_FOLDER_01527}/metadata/local/data_csv.sql" -ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '${WORKING_FOLDER_01527}/data.csv'); -EOF - -## All preparations done, the rest is simple: - -# option a (if 3a used) with pipe / reading stdin (truncate was added for the test) -cat "${WORKING_FOLDER_01527}/data.csv" | ${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.stdin; SELECT * FROM local.test ORDER BY id; TRUNCATE TABLE local.test;" -- --path="${WORKING_FOLDER_01527}" - -# option b (if 3b used) 0 with filepath (truncate was added for the test) -${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.data_csv; SELECT * FROM local.test ORDER BY id; TRUNCATE TABLE local.test;" -- --path="${WORKING_FOLDER_01527}" - -rm -rf "${WORKING_FOLDER_01527}" \ No newline at end of file +rm -rf "${WORKING_FOLDER_01527}" diff --git a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.reference b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.reference new file mode 100644 index 00000000000..64a56d9b949 --- /dev/null +++ b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.reference @@ -0,0 +1,19 @@ +Option 1. Prepare parts from from table with Engine=File defined in metadata, read from an arbitrary path +1 2020-01-01 String +2 2020-02-02 Another string +3 2020-03-03 One more string +4 2020-01-02 String for first partition +Option 2. Prepare parts from from table with Engine=File defined in metadata, read from stdin (pipe) +11 2020-01-01 String +12 2020-02-02 Another string +13 2020-03-03 One more string +14 2020-01-02 String for first partition +Option 3. Prepare parts from from table with Engine=File defined via command line, read from stdin (pipe) +21 2020-01-01 String +22 2020-02-02 Another string +23 2020-03-03 One more string +24 2020-01-02 String for first partition +Possibility to run optimize on prepared parts before sending parts to server +202001 1 +202002 1 +202003 1 diff --git a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh new file mode 100755 index 00000000000..9c7ad1d9476 --- /dev/null +++ b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +WORKING_FOLDER_01528="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" +rm -rf "${WORKING_FOLDER_01528}" + +mkdir -p "${WORKING_FOLDER_01528}/metadata/local" + +## Checks scenario of preparing parts offline by clickhouse-local + +## that is the metadata for the table we want to fill +## schema should match the schema of the table from server +## (the easiest way is just to copy it from the server) +cat < "${WORKING_FOLDER_01528}/metadata/local/test.sql" +ATTACH TABLE local.test (id UInt64, d Date, s String) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); +EOF + +################# + +echo "Option 1. Prepare parts from from table with Engine=File defined in metadata, read from an arbitrary path" + +## Source file: +cat < "${WORKING_FOLDER_01528}/data.csv" +1,2020-01-01,"String" +2,2020-02-02,"Another string" +3,2020-03-03,"One more string" +4,2020-01-02,"String for first partition" +EOF + +## metadata written into file +cat < "${WORKING_FOLDER_01528}/metadata/local/data_csv.sql" +ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '${WORKING_FOLDER_01528}/data.csv'); +EOF + +## feed the table +${CLICKHOUSE_LOCAL} --query "INSERT INTO local.test SELECT * FROM local.data_csv;" -- --path="${WORKING_FOLDER_01528}" + +## check the parts were created +${CLICKHOUSE_LOCAL} --query "SELECT * FROM local.test WHERE id < 10 ORDER BY id;" -- --path="${WORKING_FOLDER_01528}" + +################# + +echo "Option 2. Prepare parts from from table with Engine=File defined in metadata, read from stdin (pipe)" + +cat < "${WORKING_FOLDER_01528}/metadata/local/stdin.sql" +ATTACH TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin); +EOF + +cat < Date: Thu, 22 Oct 2020 12:47:24 +0300 Subject: [PATCH 186/432] Fix tests. --- src/DataTypes/DataTypeLowCardinalityHelpers.cpp | 2 +- src/DataTypes/DataTypeTuple.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp index 673253500c4..a68dc30d5c2 100644 --- a/src/DataTypes/DataTypeLowCardinalityHelpers.cpp +++ b/src/DataTypes/DataTypeLowCardinalityHelpers.cpp @@ -34,7 +34,7 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type) element = recursiveRemoveLowCardinality(element); if (tuple_type->haveExplicitNames()) - return std::make_shared(elements, tuple_type->getElementNames()); + return std::make_shared(elements, tuple_type->getElementNames(), tuple_type->serializeNames()); else return std::make_shared(elements); } diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index da3dfdb1d3d..9714cc6ca71 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -104,6 +104,7 @@ public: size_t getPositionByName(const String & name) const; bool haveExplicitNames() const { return have_explicit_names; } + bool serializeNames() const { return serialize_names; } }; } From 0902bc96cff2de81fe719a5144a8789c0455de7b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 22 Oct 2020 14:01:26 +0300 Subject: [PATCH 187/432] Fix build --- src/DataTypes/DataTypeTuple.h | 2 +- src/Interpreters/ActionsVisitor.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 9714cc6ca71..7e4e68651f1 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -22,7 +22,7 @@ private: DataTypes elems; Strings names; bool have_explicit_names; - bool serialize_names; + bool serialize_names = true; public: static constexpr bool is_parametric = true; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 13f8be8a13b..3c9dc6a3ce5 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -527,7 +527,7 @@ struct CachedColumnName bool & skip_cache; String cached; - CachedColumnName(bool & skip_cache_) : skip_cache(skip_cache_) {} + explicit CachedColumnName(bool & skip_cache_) : skip_cache(skip_cache_) {} const String & get(const ASTPtr & ast) { From 4b65d5469f441bac67e7026ffb9a8f16337fcd65 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 22 Oct 2020 15:41:01 +0300 Subject: [PATCH 188/432] Fix some unrelated performance issues in select parts for merge --- src/Disks/StoragePolicy.cpp | 7 +++++ src/Disks/StoragePolicy.h | 3 ++ .../MergeTree/MergeTreeDataMergerMutator.cpp | 5 +++- .../MergeTree/SimpleMergeSelector.cpp | 28 ++++++++++++------- 4 files changed, 32 insertions(+), 11 deletions(-) diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index 8a71f4f7a2f..2215615feda 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -307,6 +307,13 @@ void StoragePolicy::buildVolumeIndices() } } +bool StoragePolicy::hasAnyVolumeWithDisabledMerges() const +{ + for (const auto & volume : volumes) + if (volume->areMergesAvoided()) + return true; + return false; +} StoragePolicySelector::StoragePolicySelector( const Poco::Util::AbstractConfiguration & config, diff --git a/src/Disks/StoragePolicy.h b/src/Disks/StoragePolicy.h index f4a4a0070b8..fc45ed3ed06 100644 --- a/src/Disks/StoragePolicy.h +++ b/src/Disks/StoragePolicy.h @@ -88,6 +88,9 @@ public: /// Checks if storage policy can be replaced by another one. void checkCompatibleWith(const StoragePolicyPtr & new_storage_policy) const; + /// Check if we have any volume with stopped merges + bool hasAnyVolumeWithDisabledMerges() const; + private: Volumes volumes; const String name; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index df42f164e34..b29966751f9 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -227,6 +227,9 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( IMergeSelector::PartsRanges parts_ranges; StoragePolicyPtr storage_policy = data.getStoragePolicy(); + /// Volumes with stopped merges are extremely rare situation. + /// Check it once and don't check each part (this is bad for performance). + bool has_volumes_with_disabled_merges = storage_policy->hasAnyVolumeWithDisabledMerges(); const String * prev_partition_id = nullptr; /// Previous part only in boundaries of partition frame @@ -277,7 +280,7 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( part_info.data = ∂ part_info.ttl_infos = &part->ttl_infos; part_info.compression_codec_desc = part->default_codec->getFullCodecDesc(); - part_info.shall_participate_in_merges = part->shallParticipateInMerges(storage_policy); + part_info.shall_participate_in_merges = has_volumes_with_disabled_merges ? part->shallParticipateInMerges(storage_policy) : true; parts_ranges.back().emplace_back(part_info); diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index 335833998c8..1156c17835b 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -92,19 +92,21 @@ double mapPiecewiseLinearToUnit(double value, double min, double max) /** Is allowed to merge parts in range with specific properties. */ bool allow( - double sum_size, - double max_size, - double min_age, - double range_size, - double partition_size, + size_t sum_size, + size_t max_size, + size_t min_age, + size_t range_size, + size_t partition_size, + double min_size_to_lower_base_log, + double max_size_to_lower_base_log, const SimpleMergeSelector::Settings & settings) { // std::cerr << "sum_size: " << sum_size << "\n"; /// Map size to 0..1 using logarithmic scale - /// Use log(1 + x) instead of log1p(x) because our x variables (sum_size and settings) are always integer. + /// Use log(1 + x) instead of log1p(x) because our sum_size is always integer. /// Also log1p seems to be slow and significantly affect performance of merges assignment. - double size_normalized = mapPiecewiseLinearToUnit(log(1 + sum_size), log(1 + settings.min_size_to_lower_base), log(1 + settings.max_size_to_lower_base)); + double size_normalized = mapPiecewiseLinearToUnit(log(1 + sum_size), min_size_to_lower_base_log, max_size_to_lower_base_log); // std::cerr << "size_normalized: " << size_normalized << "\n"; @@ -143,7 +145,9 @@ void selectWithinPartition( const SimpleMergeSelector::PartsRange & parts, const size_t max_total_size_to_merge, Estimator & estimator, - const SimpleMergeSelector::Settings & settings) + const SimpleMergeSelector::Settings & settings, + double min_size_to_lower_base_log, + double max_size_to_lower_base_log) { size_t parts_count = parts.size(); if (parts_count <= 1) @@ -180,7 +184,7 @@ void selectWithinPartition( if (max_total_size_to_merge && sum_size > max_total_size_to_merge) break; - if (allow(sum_size, max_size, min_age, end - begin, parts_count, settings)) + if (allow(sum_size, max_size, min_age, end - begin, parts_count, min_size_to_lower_base_log, max_size_to_lower_base_log, settings)) estimator.consider( parts.begin() + begin, parts.begin() + end, @@ -200,8 +204,12 @@ SimpleMergeSelector::PartsRange SimpleMergeSelector::select( { Estimator estimator; + /// Precompute logarithm of settings boundaries, because log function is quite expensive in terms of performance + const double min_size_to_lower_base_log = log(1 + settings.min_size_to_lower_base); + const double max_size_to_lower_base_log = log(1 + settings.max_size_to_lower_base); + for (const auto & part_range : parts_ranges) - selectWithinPartition(part_range, max_total_size_to_merge, estimator, settings); + selectWithinPartition(part_range, max_total_size_to_merge, estimator, settings, min_size_to_lower_base_log, max_size_to_lower_base_log); return estimator.getBest(); } From ac7af67ede43e8a1923e1476d91e03c65ce1a16d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 22 Oct 2020 15:52:43 +0300 Subject: [PATCH 189/432] fixup --- docker/test/performance-comparison/Dockerfile | 18 ++++++++++-------- docker/test/performance-comparison/compare.sh | 1 + 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 1a904cf73c2..92c3da4d059 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -9,6 +9,7 @@ RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \ bash \ curl \ + dmidecode \ g++ \ gdb \ git \ @@ -37,15 +38,16 @@ RUN apt-get update \ COPY * / -# Bind everything to NUMA node #1, if there's more than one. We avoid node #0, -# because it is more susceptible to system interruptions. We have to bind both -# servers and the tmpfs on which the database is stored. How to do it through -# Yandex Sandbox API is unclear, but by default tmpfs uses 'process allocation -# policy', not sure which process but hopefully the one that writes to it, so -# just bind the downloader script as well. -# We could also try to remount it with proper options in Sandbox task. +# Bind everything to one NUMA node, if there's more than one. Theoretically the +# node #0 should be less stable because of system interruptions. We bind +# randomly to node 1 or 0 to gather some statistics on that. We have to bind +# both servers and the tmpfs on which the database is stored. How to do it +# through Yandex Sandbox API is unclear, but by default tmpfs uses +# 'process allocation policy', not sure which process but hopefully the one that +# writes to it, so just bind the downloader script as well. We could also try to +# remount it with proper options in Sandbox task. # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -CMD ["bash", "-c", "node=$(numactl --hardware | grep -q 'available: 1 nodes' && echo 0 || echo 1); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] +CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 8a567d7a11a..258bc0a95f7 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -1088,6 +1088,7 @@ case "$stage" in time configure ;& "restart") + numactl --show ||: numactl --hardware ||: lscpu ||: dmidecode -t 4 ||: From 5d4ae538fd9ed8e3ff7904b7091af56a6ee9caca Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 22 Oct 2020 16:14:49 +0300 Subject: [PATCH 190/432] Flaky test --- .../0_stateless/00652_replicated_mutations_zookeeper.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index 9e4bdba1294..356e97015b3 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -62,8 +62,8 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE ${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner_r1(x) VALUES (1), (2), (3), (4)" # Add some mutations and wait for their execution -${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 1 SETTINGS mutations_sync = 2" -${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 2 SETTINGS mutations_sync = 2" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 1" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 2" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE mutations_cleaner_r1 DELETE WHERE x = 3 SETTINGS mutations_sync = 2" # Add another mutation and prevent its execution on the second replica From 1d07ece5d1b309c89a8ece2c48bb2247314cfad1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 22 Oct 2020 17:15:56 +0300 Subject: [PATCH 191/432] Fix clang tidy warning --- src/Storages/MergeTree/SimpleMergeSelector.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/SimpleMergeSelector.cpp b/src/Storages/MergeTree/SimpleMergeSelector.cpp index 1156c17835b..972c6ea6ecb 100644 --- a/src/Storages/MergeTree/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/SimpleMergeSelector.cpp @@ -92,11 +92,11 @@ double mapPiecewiseLinearToUnit(double value, double min, double max) /** Is allowed to merge parts in range with specific properties. */ bool allow( - size_t sum_size, - size_t max_size, - size_t min_age, - size_t range_size, - size_t partition_size, + double sum_size, + double max_size, + double min_age, + double range_size, + double partition_size, double min_size_to_lower_base_log, double max_size_to_lower_base_log, const SimpleMergeSelector::Settings & settings) From d91db2473e2083b7c7a65648e4a64ecabab208b1 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 22 Oct 2020 17:33:23 +0300 Subject: [PATCH 192/432] Fix bash --- docker/test/stateless/run.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index e69fdc0fce0..f098cab9625 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -17,14 +17,17 @@ service clickhouse-server start && sleep 5 if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then SKIP_LIST_OPT="--use-skip-list" fi -# We can have several additional options so we path them as array because it's -# more idiologically correct. -read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" function run_tests() { + # We can have several additional options so we path them as array because it's + # more idiologically correct. + read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" for i in $(seq 1 $NUM_TRIES); do - clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt + clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt + if [ ${PIPESTATUS[0]} -ne "0" ]; then + break; + fi done } From ccf59c6412772d84def060be399be405756ee740 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Thu, 22 Oct 2020 22:53:46 +0800 Subject: [PATCH 193/432] Fix definitely wrong bug --- src/Common/HashTable/TwoLevelStringHashMap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/HashTable/TwoLevelStringHashMap.h b/src/Common/HashTable/TwoLevelStringHashMap.h index 55d54e51b6a..6bd8f74dbd6 100644 --- a/src/Common/HashTable/TwoLevelStringHashMap.h +++ b/src/Common/HashTable/TwoLevelStringHashMap.h @@ -18,7 +18,7 @@ public: void ALWAYS_INLINE forEachMapped(Func && func) { for (auto i = 0u; i < this->NUM_BUCKETS; ++i) - return this->impls[i].forEachMapped(func); + this->impls[i].forEachMapped(func); } TMapped & ALWAYS_INLINE operator[](const Key & x) From f4f104eeb80154491826ed6abeed4198f52a0ade Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 13 Oct 2020 17:29:24 +0300 Subject: [PATCH 194/432] Database atomic sync drop detach --- tests/config/install.sh | 1 + tests/config/users.d/database_atomic_drop_detach_sync.xml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 tests/config/users.d/database_atomic_drop_detach_sync.xml diff --git a/tests/config/install.sh b/tests/config/install.sh index ef9604904e7..ff96e46c947 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -30,6 +30,7 @@ ln -sf $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/readonly.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/access_management.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/database_atomic_drop_detach_sync.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/ints_dictionary.xml $DEST_SERVER_PATH/ ln -sf $SRC_PATH/strings_dictionary.xml $DEST_SERVER_PATH/ diff --git a/tests/config/users.d/database_atomic_drop_detach_sync.xml b/tests/config/users.d/database_atomic_drop_detach_sync.xml new file mode 100644 index 00000000000..4313edf8be1 --- /dev/null +++ b/tests/config/users.d/database_atomic_drop_detach_sync.xml @@ -0,0 +1,7 @@ + + + + 1 + + + From 0275e9c0104768c229a04004499f8e708a49240e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 22 Oct 2020 19:42:31 +0300 Subject: [PATCH 195/432] work around docker weirdness --- docker/test/performance-comparison/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index 92c3da4d059..76cadc3ce11 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -47,7 +47,9 @@ COPY * / # writes to it, so just bind the downloader script as well. We could also try to # remount it with proper options in Sandbox task. # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt -CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] +# Double-escaped backslashes are a tribute to the engineering wonder of docker -- +# it gives '/bin/sh: 1: [bash,: not found' otherwise. +CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-performance-comparison From 145e2b012f13a9c469d3939f99019d6c457de81d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Thu, 22 Oct 2020 19:47:20 +0300 Subject: [PATCH 196/432] cleanup --- ...pentelemetry-draft.md => opentelemetry.md} | 14 ++++++++--- programs/server/config.xml | 6 ++--- src/Interpreters/ClientInfo.cpp | 6 ----- src/Interpreters/ClientInfo.h | 7 +++--- src/Interpreters/Context.cpp | 4 ++-- src/Interpreters/Context.h | 2 +- src/Interpreters/InterpreterSystemQuery.cpp | 4 ++-- ...emetryLog.cpp => OpenTelemetrySpanLog.cpp} | 2 +- ...nTelemetryLog.h => OpenTelemetrySpanLog.h} | 23 ------------------- src/Interpreters/SystemLog.cpp | 12 +++++----- src/Interpreters/SystemLog.h | 4 ++-- src/Interpreters/executeQuery.cpp | 14 +++++------ src/Interpreters/ya.make | 2 +- .../01455_opentelemetry_distributed.sh | 8 +++---- 14 files changed, 43 insertions(+), 65 deletions(-) rename docs/en/operations/{opentelemetry-draft.md => opentelemetry.md} (87%) rename src/Interpreters/{OpenTelemetryLog.cpp => OpenTelemetrySpanLog.cpp} (97%) rename src/Interpreters/{OpenTelemetryLog.h => OpenTelemetrySpanLog.h} (59%) diff --git a/docs/en/operations/opentelemetry-draft.md b/docs/en/operations/opentelemetry.md similarity index 87% rename from docs/en/operations/opentelemetry-draft.md rename to docs/en/operations/opentelemetry.md index 3363b37b6d6..7c2de8ed930 100644 --- a/docs/en/operations/opentelemetry-draft.md +++ b/docs/en/operations/opentelemetry.md @@ -1,9 +1,17 @@ -# [draft] OpenTelemetry support +--- +toc_priority: 62 +toc_title: [experimental] OpenTelemetry +--- + +# [experimental] OpenTelemetry Support [OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from distributed application. ClickHouse has some support for OpenTelemetry. +!!! warning "Warning" +This is an experimental feature that will change in backwards-incompatible ways in the future releases. + ## Supplying Trace Context to ClickHouse @@ -40,9 +48,9 @@ a dependency on a particular monitoring system, instead only providing the tracing data conforming to the standard. A natural way to do so in an SQL RDBMS is a system table. OpenTelemetry trace span information [required by the standard](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/overview.md#span) -is stored in the system table called `system.opentelemetry_log`. +is stored in the system table called `system.opentelemetry_span_log`. -The table must be enabled in the server configuration, see the `opentelemetry_log` +The table must be enabled in the server configuration, see the `opentelemetry_span_log` element in the default config file `config.xml`. It is enabled by default. The table has the following columns: diff --git a/programs/server/config.xml b/programs/server/config.xml index 9a1b626b26a..9ba9cf61b87 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -597,7 +597,7 @@ - + + 86400 + diff --git a/tests/integration/test_MemoryTracking/test.py b/tests/integration/test_MemoryTracking/test.py index 6009e8cc953..a0ad8dc519d 100644 --- a/tests/integration/test_MemoryTracking/test.py +++ b/tests/integration/test_MemoryTracking/test.py @@ -8,7 +8,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/no_system_log.xml']) +node = cluster.add_instance('node', main_configs=[ + 'configs/no_system_log.xml', + 'configs/asynchronous_metrics_update_period_s.xml', +]) logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) From 21315127a0adfade3050c1d4f1662077beed9068 Mon Sep 17 00:00:00 2001 From: sundy-li <543950155@qq.com> Date: Sat, 24 Oct 2020 10:05:54 +0800 Subject: [PATCH 243/432] remove unused codes in AggregateFunctionGroupBitmapData --- .../AggregateFunctionGroupBitmapData.h | 124 ------------------ 1 file changed, 124 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index 20788318b3d..d80e5e81f19 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -12,7 +12,6 @@ // TODO: find out what it is. On github, they have proper interface headers like // this one: https://github.com/RoaringBitmap/CRoaring/blob/master/include/roaring/roaring.h #include -void * containerptr_roaring_bitmap_add(roaring_bitmap_t * r, uint32_t val, uint8_t * typecode, int * index); namespace DB { @@ -599,129 +598,6 @@ public: } } -private: - - /// To read and write the DB Buffer directly, migrate code from CRoaring - void db_roaring_bitmap_add_many(DB::ReadBuffer & db_buf, roaring_bitmap_t * r, size_t n_args) - { - void * container = nullptr; // hold value of last container touched - uint8_t typecode = 0; // typecode of last container touched - uint32_t prev = 0; // previous valued inserted - size_t i = 0; // index of value - int containerindex = 0; - if (n_args == 0) - return; - uint32_t val; - readBinary(val, db_buf); - container = containerptr_roaring_bitmap_add(r, val, &typecode, &containerindex); - prev = val; - ++i; - for (; i < n_args; ++i) - { - readBinary(val, db_buf); - if (((prev ^ val) >> 16) == 0) - { // no need to seek the container, it is at hand - // because we already have the container at hand, we can do the - // insertion - // automatically, bypassing the roaring_bitmap_add call - uint8_t newtypecode = typecode; - void * container2 = container_add(container, val & 0xFFFF, typecode, &newtypecode); - // rare instance when we need to - if (container2 != container) - { - // change the container type - container_free(container, typecode); - ra_set_container_at_index(&r->high_low_container, containerindex, container2, newtypecode); - typecode = newtypecode; - container = container2; - } - } - else - { - container = containerptr_roaring_bitmap_add(r, val, &typecode, &containerindex); - } - prev = val; - } - } - - void db_ra_to_uint32_array(DB::WriteBuffer & db_buf, roaring_array_t * ra) const - { - size_t ctr = 0; - for (Int32 i = 0; i < ra->size; ++i) - { - Int32 num_added = db_container_to_uint32_array(db_buf, ra->containers[i], ra->typecodes[i], (static_cast(ra->keys[i])) << 16); - ctr += num_added; - } - } - - UInt32 db_container_to_uint32_array(DB::WriteBuffer & db_buf, const void * container, uint8_t typecode, UInt32 base) const - { - container = container_unwrap_shared(container, &typecode); - switch (typecode) - { - case BITSET_CONTAINER_TYPE_CODE: - return db_bitset_container_to_uint32_array(db_buf, static_cast(container), base); - case ARRAY_CONTAINER_TYPE_CODE: - return db_array_container_to_uint32_array(db_buf, static_cast(container), base); - case RUN_CONTAINER_TYPE_CODE: - return db_run_container_to_uint32_array(db_buf, static_cast(container), base); - } - return 0; - } - - UInt32 db_bitset_container_to_uint32_array(DB::WriteBuffer & db_buf, const bitset_container_t * cont, UInt32 base) const - { - return static_cast(db_bitset_extract_setbits(db_buf, cont->array, BITSET_CONTAINER_SIZE_IN_WORDS, base)); - } - - size_t db_bitset_extract_setbits(DB::WriteBuffer & db_buf, UInt64 * bitset, size_t length, UInt32 base) const - { - UInt32 outpos = 0; - for (size_t i = 0; i < length; ++i) - { - UInt64 w = bitset[i]; - while (w != 0) - { - UInt64 t = w & (~w + 1); // on x64, should compile to BLSI (careful: the Intel compiler seems to fail) - UInt32 r = __builtin_ctzll(w); // on x64, should compile to TZCNT - UInt32 val = r + base; - writePODBinary(val, db_buf); - outpos++; - w ^= t; - } - base += 64; - } - return outpos; - } - - int db_array_container_to_uint32_array(DB::WriteBuffer & db_buf, const array_container_t * cont, UInt32 base) const - { - UInt32 outpos = 0; - for (Int32 i = 0; i < cont->cardinality; ++i) - { - const UInt32 val = base + cont->array[i]; - writePODBinary(val, db_buf); - outpos++; - } - return outpos; - } - - int db_run_container_to_uint32_array(DB::WriteBuffer & db_buf, const run_container_t * cont, UInt32 base) const - { - UInt32 outpos = 0; - for (Int32 i = 0; i < cont->n_runs; ++i) - { - UInt32 run_start = base + cont->runs[i].value; - UInt16 le = cont->runs[i].length; - for (Int32 j = 0; j <= le; ++j) - { - UInt32 val = run_start + j; - writePODBinary(val, db_buf); - outpos++; - } - } - return outpos; - } }; template From e354108e532b59c98f61ca4e72415543178a9451 Mon Sep 17 00:00:00 2001 From: zhang2014 Date: Sat, 24 Oct 2020 10:54:02 +0800 Subject: [PATCH 244/432] ISSUES-15883 trigger CI From 74558a4e27d14ff7dc6ae21b5e7dc10cf3f48d06 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 06:41:47 +0300 Subject: [PATCH 245/432] Better diagnostics when client has dropped connection --- src/Server/TCPHandler.cpp | 26 +++++++++++++++++++++++--- src/Server/TCPHandler.h | 1 + 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 406e29ba4ab..a37f88f9306 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -277,6 +277,9 @@ void TCPHandler::runImpl() /// Do it before sending end of stream, to have a chance to show log message in client. query_scope->logPeakMemoryUsage(); + if (state.is_connection_closed) + break; + sendLogs(); sendEndOfStream(); @@ -444,7 +447,11 @@ bool TCPHandler::readDataNext(const size_t & poll_interval, const int & receive_ /// If client disconnected. if (in->eof()) + { + LOG_INFO(log, "Client has dropped the connection, cancel the query."); + state.is_connection_closed = true; return false; + } /// We accept and process data. And if they are over, then we leave. if (!receivePacket()) @@ -477,9 +484,8 @@ void TCPHandler::readData(const Settings & connection_settings) std::tie(poll_interval, receive_timeout) = getReadTimeouts(connection_settings); sendLogs(); - while (true) - if (!readDataNext(poll_interval, receive_timeout)) - return; + while (readDataNext(poll_interval, receive_timeout)) + ; } @@ -567,6 +573,9 @@ void TCPHandler::processOrdinaryQuery() sendProgress(); } + if (state.is_connection_closed) + return; + sendData({}); } @@ -632,6 +641,9 @@ void TCPHandler::processOrdinaryQueryWithProcessors() sendLogs(); } + if (state.is_connection_closed) + return; + sendData({}); } @@ -1179,6 +1191,14 @@ bool TCPHandler::isQueryCancelled() /// During request execution the only packet that can come from the client is stopping the query. if (static_cast(*in).poll(0)) { + if (in->eof()) + { + LOG_INFO(log, "Client has dropped the connection, cancel the query."); + state.is_cancelled = true; + state.is_connection_closed = true; + return true; + } + UInt64 packet_type = 0; readVarUInt(packet_type, *in); diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 12149d9a66f..2f2bf35e59e 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -57,6 +57,7 @@ struct QueryState /// Is request cancelled bool is_cancelled = false; + bool is_connection_closed = false; /// empty or not bool is_empty = true; /// Data was sent. From 2bbb663fdd2cadc1fae7b3a78767132a1b9363e8 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 24 Oct 2020 07:30:39 +0300 Subject: [PATCH 246/432] Update programs/local/LocalServer.cpp Co-authored-by: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> --- programs/local/LocalServer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 3a1aecbc1cf..db1838c38f7 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -48,7 +48,6 @@ namespace DB namespace ErrorCodes { extern const int BAD_ARGUMENTS; - extern const int LOGICAL_ERROR; extern const int SYNTAX_ERROR; extern const int CANNOT_LOAD_CONFIG; extern const int FILE_ALREADY_EXISTS; From 0f3caaffe400ef44d82630a65551cd42763872a2 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 24 Oct 2020 07:48:15 +0300 Subject: [PATCH 247/432] Trigger CI --- tests/integration/test_disabled_mysql_server/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_disabled_mysql_server/test.py b/tests/integration/test_disabled_mysql_server/test.py index 34dd094af3d..df5f123c6bd 100644 --- a/tests/integration/test_disabled_mysql_server/test.py +++ b/tests/integration/test_disabled_mysql_server/test.py @@ -51,7 +51,6 @@ def test_disabled_mysql_server(started_cluster): with PartitionManager() as pm: clickhouse_node.query("CREATE DATABASE test_db ENGINE = MySQL('mysql1:3306', 'test_db', 'root', 'clickhouse')") - pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': 'DROP'}) clickhouse_node.query("SELECT * FROM system.parts") From 0748377ab17e719caab311a8480bc23c52768df2 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 24 Oct 2020 07:59:08 +0300 Subject: [PATCH 248/432] Revert "Optionally upload clickhouse binary in fast test" --- docker/test/fasttest/Dockerfile | 1 - docker/test/fasttest/run.sh | 3 --- 2 files changed, 4 deletions(-) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 0fff738e718..6547a98c58b 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -79,7 +79,6 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV COMMIT_SHA='' ENV PULL_REQUEST_NUMBER='' -ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0 COPY run.sh / CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 74fdbeedc08..91fe84a04cd 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -172,9 +172,6 @@ function build ( cd "$FASTTEST_BUILD" time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" -if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then - cp programs/clickhouse "$FASTTEST_OUTPUT/clickhouse" -fi ccache --show-stats ||: ) } From c3c6ac39e012fd3eba4bc33925fdfbb39be70984 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 24 Oct 2020 09:06:49 +0300 Subject: [PATCH 249/432] Tune TTL of the background query in 01541_max_memory_usage_for_user --- tests/queries/0_stateless/01541_max_memory_usage_for_user.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh index 7544ecfb026..945f202a803 100755 --- a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh +++ b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh @@ -35,7 +35,7 @@ function execute_tcp_one_session() # one users query in background (to avoid reseting max_memory_usage_for_user) # --max_block_size=1 to make it killable (check the state each 1 second, 1 row) # (the test takes ~40 seconds in debug build, so 60 seconds is ok) -${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null -q 'SELECT sleepEachRow(1) FROM numbers(60)' & +${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null -q 'SELECT sleepEachRow(1) FROM numbers(600)' & # trap sleep_query_pid=$! function cleanup() From 85c69aad7f3c4fbda047ca2edbead8bae8b0f39a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 09:23:54 +0300 Subject: [PATCH 250/432] Add check for BOM --- utils/check-style/check-style | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index a65ffeb8c67..4983782c00d 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -94,3 +94,8 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' | while read file; do [[ $(head # Check for executable bit on non-executable files find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -q '.' && echo "These files should not be executable." + +# Check for BOM +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' && echo "Files should not have UTF-8 BOM" +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' && echo "Files should not have UTF-16LE BOM" +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' && echo "Files should not have UTF-16BE BOM" From 97d84dd85549dc7c6f635d86f70bf2bfa1b1fc89 Mon Sep 17 00:00:00 2001 From: Xianda Ke Date: Sat, 24 Oct 2020 17:37:51 +0800 Subject: [PATCH 251/432] minor fix. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit redundant variable, duplicated defination of the variable aad_data compiler error: variable ‘aad_value’ set but not used [-Werror=unused-but-set-variable] --- src/Functions/FunctionsAES.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Functions/FunctionsAES.h b/src/Functions/FunctionsAES.h index 6600931118e..10c4a27e509 100644 --- a/src/Functions/FunctionsAES.h +++ b/src/Functions/FunctionsAES.h @@ -307,11 +307,6 @@ private: } const auto input_value = input_column->getDataAt(r); - auto aad_value = StringRef{}; - if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM && !std::is_same_v>) - { - aad_value = aad_column->getDataAt(r); - } if constexpr (mode != CipherMode::MySQLCompatibility) { From 34b9d15b66c1b468fb2d84f7097076bef8d17a63 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sat, 24 Oct 2020 21:34:54 +0300 Subject: [PATCH 252/432] Update ThreadStatusExt.cpp --- src/Interpreters/ThreadStatusExt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index d324307b487..2166c78ef7c 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -300,7 +300,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) performance_counters.setParent(&ProfileEvents::global_counters); memory_tracker.reset(); - /// Must reset pointer to thread_group's memory_tracker, because it will be destroyed two lines below. + /// Must reset pointer to thread_group's memory_tracker, because it will be destroyed two lines below (will reset to its parent). memory_tracker.setParent(thread_group->memory_tracker.getParent()); query_id.clear(); From 1d170f57457fd575714797615b65f2304746e06c Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Sat, 24 Oct 2020 21:46:10 +0300 Subject: [PATCH 253/432] ASTTableIdentifier Part #1: improve internal representation of ASTIdentifier name (#16149) * Use only |name_parts| as primary name source * Restore legacy logic for table restoration * Fix build * Fix tests * Add pytest server config * Fix tests * Fixes due to review --- programs/client/Client.cpp | 2 +- programs/copier/Internals.cpp | 6 +- src/Compression/CompressionFactory.cpp | 4 +- src/DataTypes/DataTypeFactory.cpp | 2 +- .../getDictionaryConfigurationFromAST.cpp | 4 +- src/Interpreters/ActionsVisitor.cpp | 2 +- src/Interpreters/AddDefaultDatabaseVisitor.h | 15 +-- src/Interpreters/ArrayJoinedColumnsVisitor.h | 14 +-- src/Interpreters/CollectJoinOnKeysVisitor.cpp | 12 +-- .../ExtractExpressionInfoVisitor.cpp | 2 +- src/Interpreters/IdentifierSemantic.cpp | 31 +++--- src/Interpreters/IdentifierSemantic.h | 10 +- .../JoinToSubqueryTransformVisitor.cpp | 28 +++--- src/Interpreters/JoinedTables.cpp | 8 +- src/Interpreters/QueryNormalizer.cpp | 8 +- src/Interpreters/RenameColumnVisitor.cpp | 5 +- .../RequiredSourceColumnsData.cpp | 4 +- .../RequiredSourceColumnsVisitor.cpp | 7 +- .../TranslateQualifiedNamesVisitor.cpp | 11 ++- .../evaluateConstantExpression.cpp | 4 +- src/Interpreters/getClusterName.cpp | 2 +- src/Parsers/ASTColumnsTransformers.cpp | 2 +- src/Parsers/ASTIdentifier.cpp | 99 +++++++++++-------- src/Parsers/ASTIdentifier.h | 43 ++++---- src/Parsers/ExpressionElementParsers.cpp | 12 +-- src/Parsers/ExpressionListParsers.cpp | 14 ++- src/Parsers/MySQL/ASTDeclareConstraint.cpp | 2 +- src/Parsers/MySQL/ASTDeclareIndex.cpp | 18 ++-- src/Parsers/MySQL/ASTDeclarePartition.cpp | 2 +- src/Parsers/MySQL/ASTDeclareReference.cpp | 2 +- src/Parsers/MySQL/ASTDeclareSubPartition.cpp | 2 +- .../MySQL/tests/gtest_column_parser.cpp | 8 +- .../MySQL/tests/gtest_constraint_parser.cpp | 10 +- .../MySQL/tests/gtest_create_parser.cpp | 2 +- .../MySQL/tests/gtest_index_parser.cpp | 12 +-- .../tests/gtest_partition_options_parser.cpp | 38 +++---- .../MySQL/tests/gtest_partition_parser.cpp | 34 +++---- .../MySQL/tests/gtest_reference_parser.cpp | 22 ++--- .../MySQL/tests/gtest_subpartition_parser.cpp | 8 +- .../tests/gtest_table_options_parser.cpp | 24 ++--- src/Parsers/ParserCreateQuery.cpp | 4 +- src/Parsers/ParserDictionary.cpp | 4 +- src/Parsers/ParserSystemQuery.cpp | 4 +- src/Parsers/tests/gtest_dictionary_parser.cpp | 2 +- src/Storages/AlterCommands.cpp | 10 +- src/Storages/MutationCommands.cpp | 8 +- src/Storages/StorageDistributed.cpp | 2 +- .../System/StorageSystemZooKeeper.cpp | 2 +- tests/queries/server.py | 41 +++++--- utils/db-generator/query_db_generator.cpp | 14 +-- 50 files changed, 322 insertions(+), 304 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 07f2a231afe..3cd584c0e55 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1896,7 +1896,7 @@ private: if (has_vertical_output_suffix) throw Exception("Output format already specified", ErrorCodes::CLIENT_OUTPUT_FORMAT_SPECIFIED); const auto & id = query_with_output->format->as(); - current_format = id.name; + current_format = id.name(); } } diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp index 24b5d616af4..0f607ea5faf 100644 --- a/programs/copier/Internals.cpp +++ b/programs/copier/Internals.cpp @@ -168,11 +168,11 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast) throw Exception("ORDER BY cannot be empty", ErrorCodes::BAD_ARGUMENTS); } -/// Wraps only identifiers with backticks. +/// Wraps only identifiers with backticks. std::string wrapIdentifiersWithBackticks(const ASTPtr & root) { if (auto identifier = std::dynamic_pointer_cast(root)) - return backQuote(identifier->name); + return backQuote(identifier->name()); if (auto function = std::dynamic_pointer_cast(root)) return function->name + '(' + wrapIdentifiersWithBackticks(function->arguments) + ')'; @@ -214,7 +214,7 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast) for (size_t i = 0; i < sorting_key_size; ++i) { /// Column name could be represented as a f_1(f_2(...f_n(column_name))). - /// Each f_i could take one or more parameters. + /// Each f_i could take one or more parameters. /// We will wrap identifiers with backticks to allow non-standart identifier names. String sorting_key_column = sorting_key_expr_list->children[i]->getColumnName(); diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index 91b4aa4b8de..fe6a5b2dacd 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -76,7 +76,7 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(const ASTPtr ASTPtr codec_arguments; if (const auto * family_name = inner_codec_ast->as()) { - codec_family_name = family_name->name; + codec_family_name = family_name->name(); codec_arguments = {}; } else if (const auto * ast_func = inner_codec_ast->as()) @@ -207,7 +207,7 @@ CompressionCodecPtr CompressionCodecFactory::get(const ASTPtr & ast, const IData ASTPtr codec_arguments; if (const auto * family_name = inner_codec_ast->as()) { - codec_family_name = family_name->name; + codec_family_name = family_name->name(); codec_arguments = {}; } else if (const auto * ast_func = inner_codec_ast->as()) diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 9386f4b39f1..5052a065163 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -43,7 +43,7 @@ DataTypePtr DataTypeFactory::get(const ASTPtr & ast) const if (const auto * ident = ast->as()) { - return get(ident->name, {}); + return get(ident->name(), {}); } if (const auto * lit = ast->as()) diff --git a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp index b1962e48eea..430c1d591dd 100644 --- a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp +++ b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp @@ -172,7 +172,7 @@ Names getPrimaryKeyColumns(const ASTExpressionList * primary_key) for (size_t index = 0; index != children.size(); ++index) { const ASTIdentifier * key_part = children[index]->as(); - result.push_back(key_part->name); + result.push_back(key_part->name()); } return result; } @@ -367,7 +367,7 @@ void buildConfigurationFromFunctionWithKeyValueArguments( if (const auto * identifier = pair->second->as(); identifier) { - AutoPtr value(doc->createTextNode(identifier->name)); + AutoPtr value(doc->createTextNode(identifier->name())); current_xml_element->appendChild(value); } else if (const auto * literal = pair->second->as(); literal) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e0e921b003b..96da40e8f6c 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -582,7 +582,7 @@ void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr & ast, /// Special check for WITH statement alias. Add alias action to be able to use this alias. if (identifier.prefer_alias_to_column_name && !identifier.alias.empty()) - data.addAlias(identifier.name, identifier.alias); + data.addAlias(identifier.name(), identifier.alias); } } diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 9322232c154..bb684c5547a 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -24,11 +24,12 @@ namespace DB class AddDefaultDatabaseVisitor { public: - AddDefaultDatabaseVisitor(const String & database_name_, bool only_replace_current_database_function_ = false, std::ostream * ostr_ = nullptr) - : database_name(database_name_), - only_replace_current_database_function(only_replace_current_database_function_), - visit_depth(0), - ostr(ostr_) + explicit AddDefaultDatabaseVisitor( + const String & database_name_, bool only_replace_current_database_function_ = false, std::ostream * ostr_ = nullptr) + : database_name(database_name_) + , only_replace_current_database_function(only_replace_current_database_function_) + , visit_depth(0) + , ostr(ostr_) {} void visitDDL(ASTPtr & ast) const @@ -105,7 +106,7 @@ private: void visit(const ASTIdentifier & identifier, ASTPtr & ast) const { if (!identifier.compound()) - ast = createTableIdentifier(database_name, identifier.name); + ast = createTableIdentifier(database_name, identifier.name()); } void visit(ASTSubquery & subquery, ASTPtr &) const @@ -116,7 +117,7 @@ private: void visit(ASTFunction & function, ASTPtr &) const { bool is_operator_in = false; - for (auto name : {"in", "notIn", "globalIn", "globalNotIn"}) + for (const auto * name : {"in", "notIn", "globalIn", "globalNotIn"}) { if (function.name == name) { diff --git a/src/Interpreters/ArrayJoinedColumnsVisitor.h b/src/Interpreters/ArrayJoinedColumnsVisitor.h index 56832914b80..94f6bdaf138 100644 --- a/src/Interpreters/ArrayJoinedColumnsVisitor.h +++ b/src/Interpreters/ArrayJoinedColumnsVisitor.h @@ -98,33 +98,33 @@ private: if (!IdentifierSemantic::getColumnName(node)) return; - auto split = Nested::splitName(node.name); /// ParsedParams, Key1 + auto split = Nested::splitName(node.name()); /// ParsedParams, Key1 - if (array_join_alias_to_name.count(node.name)) + if (array_join_alias_to_name.count(node.name())) { /// ARRAY JOIN was written with an array column. Example: SELECT K1 FROM ... ARRAY JOIN ParsedParams.Key1 AS K1 - array_join_result_to_source[node.name] = array_join_alias_to_name[node.name]; /// K1 -> ParsedParams.Key1 + array_join_result_to_source[node.name()] = array_join_alias_to_name[node.name()]; /// K1 -> ParsedParams.Key1 } else if (array_join_alias_to_name.count(split.first) && !split.second.empty()) { /// ARRAY JOIN was written with a nested table. Example: SELECT PP.KEY1 FROM ... ARRAY JOIN ParsedParams AS PP - array_join_result_to_source[node.name] /// PP.Key1 -> ParsedParams.Key1 + array_join_result_to_source[node.name()] /// PP.Key1 -> ParsedParams.Key1 = Nested::concatenateName(array_join_alias_to_name[split.first], split.second); } - else if (array_join_name_to_alias.count(node.name)) + else if (array_join_name_to_alias.count(node.name())) { /** Example: SELECT ParsedParams.Key1 FROM ... ARRAY JOIN ParsedParams.Key1 AS PP.Key1. * That is, the query uses the original array, replicated by itself. */ array_join_result_to_source[ /// PP.Key1 -> ParsedParams.Key1 - array_join_name_to_alias[node.name]] = node.name; + array_join_name_to_alias[node.name()]] = node.name(); } else if (array_join_name_to_alias.count(split.first) && !split.second.empty()) { /** Example: SELECT ParsedParams.Key1 FROM ... ARRAY JOIN ParsedParams AS PP. */ array_join_result_to_source[ /// PP.Key1 -> ParsedParams.Key1 - Nested::concatenateName(array_join_name_to_alias[split.first], split.second)] = node.name; + Nested::concatenateName(array_join_name_to_alias[split.first], split.second)] = node.name(); } } }; diff --git a/src/Interpreters/CollectJoinOnKeysVisitor.cpp b/src/Interpreters/CollectJoinOnKeysVisitor.cpp index e0fce4854d2..48273e32209 100644 --- a/src/Interpreters/CollectJoinOnKeysVisitor.cpp +++ b/src/Interpreters/CollectJoinOnKeysVisitor.cpp @@ -144,11 +144,11 @@ std::pair CollectJoinOnKeysMatcher::getTableNumbers(const ASTPtr const ASTIdentifier * CollectJoinOnKeysMatcher::unrollAliases(const ASTIdentifier * identifier, const Aliases & aliases) { - if (identifier->compound()) + if (identifier->supposedToBeCompound()) return identifier; UInt32 max_attempts = 100; - for (auto it = aliases.find(identifier->name); it != aliases.end();) + for (auto it = aliases.find(identifier->name()); it != aliases.end();) { const ASTIdentifier * parent = identifier; identifier = it->second->as(); @@ -156,12 +156,12 @@ const ASTIdentifier * CollectJoinOnKeysMatcher::unrollAliases(const ASTIdentifie break; /// not a column alias if (identifier == parent) break; /// alias to itself with the same name: 'a as a' - if (identifier->compound()) + if (identifier->supposedToBeCompound()) break; /// not an alias. Break to prevent cycle through short names: 'a as b, t1.b as a' - it = aliases.find(identifier->name); + it = aliases.find(identifier->name()); if (!max_attempts--) - throw Exception("Cannot unroll aliases for '" + identifier->name + "'", ErrorCodes::LOGICAL_ERROR); + throw Exception("Cannot unroll aliases for '" + identifier->name() + "'", ErrorCodes::LOGICAL_ERROR); } return identifier; @@ -186,7 +186,7 @@ size_t CollectJoinOnKeysMatcher::getTableForIdentifiers(std::vectorname; + const String & name = identifier->name(); bool in_left_table = data.left_table.hasColumn(name); bool in_right_table = data.right_table.hasColumn(name); diff --git a/src/Interpreters/ExtractExpressionInfoVisitor.cpp b/src/Interpreters/ExtractExpressionInfoVisitor.cpp index 5f7754d315a..c730f49fe90 100644 --- a/src/Interpreters/ExtractExpressionInfoVisitor.cpp +++ b/src/Interpreters/ExtractExpressionInfoVisitor.cpp @@ -41,7 +41,7 @@ void ExpressionInfoMatcher::visit(const ASTIdentifier & identifier, const ASTPtr const auto & table = data.tables[index]; // TODO: make sure no collision ever happens - if (table.hasColumn(identifier.name)) + if (table.hasColumn(identifier.name())) { data.unique_reference_tables_pos.emplace(index); break; diff --git a/src/Interpreters/IdentifierSemantic.cpp b/src/Interpreters/IdentifierSemantic.cpp index 256a3784c77..a1fc533eb7f 100644 --- a/src/Interpreters/IdentifierSemantic.cpp +++ b/src/Interpreters/IdentifierSemantic.cpp @@ -51,7 +51,7 @@ std::optional tryChooseTable(const ASTIdentifier & identifier, const std if ((best_match != ColumnMatch::NoMatch) && same_match) { if (!allow_ambiguous) - throw Exception("Ambiguous column '" + identifier.name + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); + throw Exception("Ambiguous column '" + identifier.name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); best_match = ColumnMatch::Ambiguous; return {}; } @@ -66,7 +66,7 @@ std::optional tryChooseTable(const ASTIdentifier & identifier, const std std::optional IdentifierSemantic::getColumnName(const ASTIdentifier & node) { if (!node.semantic->special) - return node.name; + return node.name(); return {}; } @@ -75,14 +75,14 @@ std::optional IdentifierSemantic::getColumnName(const ASTPtr & ast) if (ast) if (const auto * id = ast->as()) if (!id->semantic->special) - return id->name; + return id->name(); return {}; } std::optional IdentifierSemantic::getTableName(const ASTIdentifier & node) { if (node.semantic->special) - return node.name; + return node.name(); return {}; } @@ -91,7 +91,7 @@ std::optional IdentifierSemantic::getTableName(const ASTPtr & ast) if (ast) if (const auto * id = ast->as()) if (id->semantic->special) - return id->name; + return id->name(); return {}; } @@ -151,7 +151,7 @@ StorageID IdentifierSemantic::extractDatabaseAndTable(const ASTIdentifier & iden if (identifier.name_parts.size() == 2) return { identifier.name_parts[0], identifier.name_parts[1], identifier.uuid }; - return { "", identifier.name, identifier.uuid }; + return { "", identifier.name_parts[0], identifier.uuid }; } std::optional IdentifierSemantic::extractNestedName(const ASTIdentifier & identifier, const String & table_name) @@ -232,16 +232,8 @@ void IdentifierSemantic::setColumnShortName(ASTIdentifier & identifier, const Da if (!to_strip) return; - std::vector stripped(identifier.name_parts.begin() + to_strip, identifier.name_parts.end()); - - DB::String new_name; - for (const auto & part : stripped) - { - if (!new_name.empty()) - new_name += '.'; - new_name += part; - } - identifier.name.swap(new_name); + identifier.name_parts = std::vector(identifier.name_parts.begin() + to_strip, identifier.name_parts.end()); + identifier.resetFullName(); } void IdentifierSemantic::setColumnLongName(ASTIdentifier & identifier, const DatabaseAndTableWithAlias & db_and_table) @@ -249,10 +241,11 @@ void IdentifierSemantic::setColumnLongName(ASTIdentifier & identifier, const Dat String prefix = db_and_table.getQualifiedNamePrefix(); if (!prefix.empty()) { - String short_name = identifier.shortName(); - identifier.name = prefix + short_name; prefix.resize(prefix.size() - 1); /// crop dot - identifier.name_parts = {prefix, short_name}; + identifier.name_parts = {prefix, identifier.shortName()}; + identifier.resetFullName(); + identifier.semantic->table = prefix; + identifier.semantic->legacy_compound = true; } } diff --git a/src/Interpreters/IdentifierSemantic.h b/src/Interpreters/IdentifierSemantic.h index ca5a923c2ea..80b55ba0537 100644 --- a/src/Interpreters/IdentifierSemantic.h +++ b/src/Interpreters/IdentifierSemantic.h @@ -10,10 +10,12 @@ namespace DB struct IdentifierSemanticImpl { - bool special = false; /// for now it's 'not a column': tables, subselects and some special stuff like FORMAT - bool can_be_alias = true; /// if it's a cropped name it could not be an alias - bool covered = false; /// real (compound) name is hidden by an alias (short name) - std::optional membership; /// table position in join + bool special = false; /// for now it's 'not a column': tables, subselects and some special stuff like FORMAT + bool can_be_alias = true; /// if it's a cropped name it could not be an alias + bool covered = false; /// real (compound) name is hidden by an alias (short name) + std::optional membership; /// table position in join + String table = {}; /// store table name for columns just to support legacy logic. + bool legacy_compound = false; /// true if identifier supposed to be comply for legacy |compound()| behavior }; /// Static class to manipulate IdentifierSemanticImpl via ASTIdentifier diff --git a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp index cdd7ec3ebf9..372bbfbe648 100644 --- a/src/Interpreters/JoinToSubqueryTransformVisitor.cpp +++ b/src/Interpreters/JoinToSubqueryTransformVisitor.cpp @@ -117,7 +117,7 @@ private: throw Exception("Logical error: qualified asterisk must have exactly one child", ErrorCodes::LOGICAL_ERROR); ASTIdentifier & identifier = child->children[0]->as(); - data.addTableColumns(identifier.name); + data.addTableColumns(identifier.name()); } else data.new_select_expression_list->children.push_back(child); @@ -228,7 +228,7 @@ struct CollectColumnIdentifiersMatcher void addIdentirier(const ASTIdentifier & ident) { for (const auto & aliases : ignored) - if (aliases.count(ident.name)) + if (aliases.count(ident.name())) return; identifiers.push_back(const_cast(&ident)); } @@ -293,7 +293,7 @@ struct CheckAliasDependencyVisitorData void visit(ASTIdentifier & ident, ASTPtr &) { - if (!dependency && aliases.count(ident.name)) + if (!dependency && aliases.count(ident.name())) dependency = &ident; } }; @@ -467,7 +467,7 @@ std::vector normalizeColumnNamesExtractNeeded( for (ASTIdentifier * ident : identifiers) { - bool got_alias = aliases.count(ident->name); + bool got_alias = aliases.count(ident->name()); bool allow_ambiguous = got_alias; /// allow ambiguous column overridden by an alias if (auto table_pos = IdentifierSemantic::chooseTableColumnMatch(*ident, tables, allow_ambiguous)) @@ -475,12 +475,12 @@ std::vector normalizeColumnNamesExtractNeeded( if (!ident->isShort()) { if (got_alias) - throw Exception("Alias clashes with qualified column '" + ident->name + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); + throw Exception("Alias clashes with qualified column '" + ident->name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); String short_name = ident->shortName(); String original_long_name; if (public_identifiers.count(ident)) - original_long_name = ident->name; + original_long_name = ident->name(); size_t count = countTablesWithColumn(tables, short_name); @@ -488,7 +488,7 @@ std::vector normalizeColumnNamesExtractNeeded( { const auto & table = tables[*table_pos]; IdentifierSemantic::setColumnLongName(*ident, table.table); /// table.column -> table_alias.column - auto & unique_long_name = ident->name; + const auto & unique_long_name = ident->name(); /// For tables moved into subselects we need unique short names for clashed names if (*table_pos != last_table_pos) @@ -512,7 +512,7 @@ std::vector normalizeColumnNamesExtractNeeded( needed_columns[*table_pos].no_clashes.emplace(ident->shortName()); } else if (!got_alias) - throw Exception("Unknown column name '" + ident->name + "'", ErrorCodes::UNKNOWN_IDENTIFIER); + throw Exception("Unknown column name '" + ident->name() + "'", ErrorCodes::UNKNOWN_IDENTIFIER); } return needed_columns; @@ -613,12 +613,12 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast { for (auto * ident : on_identifiers) { - auto it = data.aliases.find(ident->name); - if (!on_aliases.count(ident->name) && it != data.aliases.end()) + auto it = data.aliases.find(ident->name()); + if (!on_aliases.count(ident->name()) && it != data.aliases.end()) { auto alias_expression = it->second; alias_pushdown[table_pos].push_back(alias_expression); - on_aliases[ident->name] = alias_expression; + on_aliases[ident->name()] = alias_expression; } } } @@ -638,14 +638,14 @@ void JoinToSubqueryTransformMatcher::visit(ASTSelectQuery & select, ASTPtr & ast CheckAliasDependencyVisitor(check).visit(expr.second); if (check.dependency) throw Exception("Cannot rewrite JOINs. Alias '" + expr.first + - "' used in ON section depends on another alias '" + check.dependency->name + "'", + "' used in ON section depends on another alias '" + check.dependency->name() + "'", ErrorCodes::NOT_IMPLEMENTED); } /// Check same name in aliases, USING and ON sections. Cannot push down alias to ON through USING cause of name masquerading. for (auto * ident : using_identifiers) - if (on_aliases.count(ident->name)) - throw Exception("Cannot rewrite JOINs. Alias '" + ident->name + "' appears both in ON and USING", ErrorCodes::NOT_IMPLEMENTED); + if (on_aliases.count(ident->name())) + throw Exception("Cannot rewrite JOINs. Alias '" + ident->name() + "' appears both in ON and USING", ErrorCodes::NOT_IMPLEMENTED); using_identifiers.clear(); /// Replace pushdowned expressions with aliases names in original expression lists. diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index d38a3fa68dc..c0511122c1e 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -49,7 +49,7 @@ void replaceJoinedTable(const ASTSelectQuery & select_query) if (table_expr.database_and_table_name) { const auto & table_id = table_expr.database_and_table_name->as(); - String expr = "(select * from " + table_id.name + ") as " + table_id.shortName(); + String expr = "(select * from " + table_id.name() + ") as " + table_id.shortName(); // FIXME: since the expression "a as b" exposes both "a" and "b" names, which is not equivalent to "(select * from a) as b", // we can't replace aliased tables. @@ -99,7 +99,7 @@ private: match == IdentifierSemantic::ColumnMatch::DbAndTable) { if (rewritten) - throw Exception("Failed to rewrite distributed table names. Ambiguous column '" + identifier.name + "'", + throw Exception("Failed to rewrite distributed table names. Ambiguous column '" + identifier.name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); /// Table has an alias. So we set a new name qualified by table alias. IdentifierSemantic::setColumnLongName(identifier, table); @@ -114,10 +114,10 @@ private: bool rewritten = false; for (const auto & table : data) { - if (identifier.name == table.table) + if (identifier.name() == table.table) { if (rewritten) - throw Exception("Failed to rewrite distributed table. Ambiguous column '" + identifier.name + "'", + throw Exception("Failed to rewrite distributed table. Ambiguous column '" + identifier.name() + "'", ErrorCodes::AMBIGUOUS_COLUMN_NAME); identifier.setShortName(table.alias); rewritten = true; diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 07d4888b555..3252626959d 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -73,8 +73,8 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) return; /// If it is an alias, but not a parent alias (for constructs like "SELECT column + 1 AS column"). - auto it_alias = data.aliases.find(node.name); - if (it_alias != data.aliases.end() && current_alias != node.name) + auto it_alias = data.aliases.find(node.name()); + if (it_alias != data.aliases.end() && current_alias != node.name()) { if (!IdentifierSemantic::canBeAlias(node)) return; @@ -89,7 +89,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) String node_alias = ast->tryGetAlias(); if (current_asts.count(alias_node.get()) /// We have loop of multiple aliases - || (node.name == our_alias_or_name && our_name && node_alias == *our_name)) /// Our alias points to node.name, direct loop + || (node.name() == our_alias_or_name && our_name && node_alias == *our_name)) /// Our alias points to node.name, direct loop throw Exception("Cyclic aliases", ErrorCodes::CYCLIC_ALIASES); /// Let's replace it with the corresponding tree node. @@ -97,7 +97,7 @@ void QueryNormalizer::visit(ASTIdentifier & node, ASTPtr & ast, Data & data) { /// Avoid infinite recursion here auto opt_name = IdentifierSemantic::getColumnName(alias_node); - bool is_cycle = opt_name && *opt_name == node.name; + bool is_cycle = opt_name && *opt_name == node.name(); if (!is_cycle) { diff --git a/src/Interpreters/RenameColumnVisitor.cpp b/src/Interpreters/RenameColumnVisitor.cpp index f94353cf646..22bbfc7a83f 100644 --- a/src/Interpreters/RenameColumnVisitor.cpp +++ b/src/Interpreters/RenameColumnVisitor.cpp @@ -3,10 +3,13 @@ namespace DB { + void RenameColumnData::visit(ASTIdentifier & identifier, ASTPtr &) const { + // TODO(ilezhankin): make proper rename std::optional identifier_column_name = IdentifierSemantic::getColumnName(identifier); if (identifier_column_name && identifier_column_name == column_name) - identifier.name = rename_to; + identifier.setShortName(rename_to); } + } diff --git a/src/Interpreters/RequiredSourceColumnsData.cpp b/src/Interpreters/RequiredSourceColumnsData.cpp index ec9f2ca4817..9118acc38c9 100644 --- a/src/Interpreters/RequiredSourceColumnsData.cpp +++ b/src/Interpreters/RequiredSourceColumnsData.cpp @@ -27,7 +27,7 @@ void RequiredSourceColumnsData::addColumnIdentifier(const ASTIdentifier & node) /// There should be no complex cases after query normalization. Names to aliases: one-to-many. String alias = node.tryGetAlias(); - required_names[node.name].addInclusion(alias); + required_names[node.name()].addInclusion(alias); } bool RequiredSourceColumnsData::addArrayJoinAliasIfAny(const IAST & ast) @@ -42,7 +42,7 @@ bool RequiredSourceColumnsData::addArrayJoinAliasIfAny(const IAST & ast) void RequiredSourceColumnsData::addArrayJoinIdentifier(const ASTIdentifier & node) { - array_join_columns.insert(node.name); + array_join_columns.insert(node.name()); } size_t RequiredSourceColumnsData::nameInclusion(const String & name) const diff --git a/src/Interpreters/RequiredSourceColumnsVisitor.cpp b/src/Interpreters/RequiredSourceColumnsVisitor.cpp index e546a40f28d..5a265b59414 100644 --- a/src/Interpreters/RequiredSourceColumnsVisitor.cpp +++ b/src/Interpreters/RequiredSourceColumnsVisitor.cpp @@ -34,7 +34,7 @@ std::vector RequiredSourceColumnsMatcher::extractNamesFromLambda(const A if (!identifier) throw Exception("lambda argument declarations must be identifiers", ErrorCodes::TYPE_MISMATCH); - names.push_back(identifier->name); + names.push_back(identifier->name()); } return names; @@ -132,10 +132,11 @@ void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const AS void RequiredSourceColumnsMatcher::visit(const ASTIdentifier & node, const ASTPtr &, Data & data) { - if (node.name.empty()) + // FIXME(ilezhankin): shouldn't ever encounter + if (node.name().empty()) throw Exception("Expected not empty name", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - if (!data.private_aliases.count(node.name)) + if (!data.private_aliases.count(node.name())) data.addColumnIdentifier(node); } diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index 32d8841d7b4..98ed2166c40 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -104,7 +104,7 @@ void TranslateQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, if (data.unknownColumn(table_pos, identifier)) { String table_name = data.tables[table_pos].table.getQualifiedNamePrefix(false); - throw Exception("There's no column '" + identifier.name + "' in table '" + table_name + "'", + throw Exception("There's no column '" + identifier.name() + "' in table '" + table_name + "'", ErrorCodes::UNKNOWN_IDENTIFIER); } @@ -175,9 +175,12 @@ void TranslateQualifiedNamesMatcher::visit(ASTSelectQuery & select, const ASTPtr static void addIdentifier(ASTs & nodes, const DatabaseAndTableWithAlias & table, const String & column_name) { + std::vector parts = {column_name}; + String table_name = table.getQualifiedNamePrefix(false); - auto identifier = std::make_shared(std::vector{table_name, column_name}); - nodes.emplace_back(identifier); + if (!table_name.empty()) parts.insert(parts.begin(), table_name); + + nodes.emplace_back(std::make_shared(std::move(parts))); } /// Replace *, alias.*, database.table.* with a list of columns. @@ -354,7 +357,7 @@ void RestoreQualifiedNamesMatcher::visit(ASTIdentifier & identifier, ASTPtr &, D { if (IdentifierSemantic::getMembership(identifier)) { - identifier.restoreCompoundName(); + identifier.restoreTable(); // TODO(ilezhankin): should restore qualified name here - why exactly here? if (data.rename) data.changeTable(identifier); } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index e1f53c72801..02ef3426483 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -72,7 +72,7 @@ ASTPtr evaluateConstantExpressionAsLiteral(const ASTPtr & node, const Context & ASTPtr evaluateConstantExpressionOrIdentifierAsLiteral(const ASTPtr & node, const Context & context) { if (const auto * id = node->as()) - return std::make_shared(id->name); + return std::make_shared(id->name()); return evaluateConstantExpressionAsLiteral(node, context); } @@ -113,7 +113,7 @@ namespace const auto & name = name_and_type.name; const auto & type = name_and_type.type; - if (name == identifier->name) + if (name == identifier->name()) { ColumnWithTypeAndName column; Field converted = convertFieldToType(value, *type); diff --git a/src/Interpreters/getClusterName.cpp b/src/Interpreters/getClusterName.cpp index 60040ce4cb0..01e45e1d650 100644 --- a/src/Interpreters/getClusterName.cpp +++ b/src/Interpreters/getClusterName.cpp @@ -19,7 +19,7 @@ namespace ErrorCodes std::string getClusterName(const IAST & node) { if (const auto * ast_id = node.as()) - return ast_id->name; + return ast_id->name(); if (const auto * ast_lit = node.as()) return ast_lit->value.safeGet(); diff --git a/src/Parsers/ASTColumnsTransformers.cpp b/src/Parsers/ASTColumnsTransformers.cpp index 43d54f07ab8..fee606aec26 100644 --- a/src/Parsers/ASTColumnsTransformers.cpp +++ b/src/Parsers/ASTColumnsTransformers.cpp @@ -71,7 +71,7 @@ void ASTColumnsExceptTransformer::transform(ASTs & nodes) const { for (const auto & except_child : children) { - if (except_child->as().name == id->shortName()) + if (except_child->as().name() == id->shortName()) return true; } } diff --git a/src/Parsers/ASTIdentifier.cpp b/src/Parsers/ASTIdentifier.cpp index 9117be46e51..d980300a22a 100644 --- a/src/Parsers/ASTIdentifier.cpp +++ b/src/Parsers/ASTIdentifier.cpp @@ -1,10 +1,10 @@ -#include #include -#include + #include #include #include #include +#include namespace DB @@ -16,6 +16,27 @@ namespace ErrorCodes extern const int SYNTAX_ERROR; } +ASTIdentifier::ASTIdentifier(const String & short_name) + : full_name(short_name), name_parts{short_name}, semantic(std::make_shared()) +{ + assert(!full_name.empty()); +} + +ASTIdentifier::ASTIdentifier(std::vector && name_parts_, bool special) + : name_parts(name_parts_), semantic(std::make_shared()) +{ + assert(!name_parts.empty()); + for (const auto & part [[maybe_unused]] : name_parts) + assert(!part.empty()); + + semantic->special = special; + semantic->legacy_compound = true; + + if (!special && name_parts.size() >= 2) + semantic->table = name_parts.end()[-2]; + + resetFullName(); +} ASTPtr ASTIdentifier::clone() const { @@ -24,51 +45,29 @@ ASTPtr ASTIdentifier::clone() const return ret; } -std::shared_ptr ASTIdentifier::createSpecial(const String & name, std::vector && name_parts) +bool ASTIdentifier::supposedToBeCompound() const { - auto ret = std::make_shared(name, std::move(name_parts)); - ret->semantic->special = true; - return ret; + return semantic->legacy_compound; } -ASTIdentifier::ASTIdentifier(const String & name_, std::vector && name_parts_) - : name(name_) - , name_parts(name_parts_) - , semantic(std::make_shared()) -{ - if (!name_parts.empty() && name_parts[0].empty()) - name_parts.erase(name_parts.begin()); - - if (name.empty()) - { - if (name_parts.size() == 2) - name = name_parts[0] + '.' + name_parts[1]; - else if (name_parts.size() == 1) - name = name_parts[0]; - } -} - -ASTIdentifier::ASTIdentifier(std::vector && name_parts_) - : ASTIdentifier("", std::move(name_parts_)) -{} - void ASTIdentifier::setShortName(const String & new_name) { - name = new_name; - name_parts.clear(); + assert(!new_name.empty()); + + full_name = new_name; + name_parts = {new_name}; bool special = semantic->special; *semantic = IdentifierSemanticImpl(); semantic->special = special; } -void ASTIdentifier::restoreCompoundName() +const String & ASTIdentifier::name() const { - if (name_parts.empty()) - return; - name = name_parts[0]; - for (size_t i = 1; i < name_parts.size(); ++i) - name += '.' + name_parts[i]; + assert(!name_parts.empty()); + assert(!full_name.empty()); + + return full_name; } void ASTIdentifier::formatImplWithoutAlias(const FormatSettings & settings, FormatState &, FormatStateStacked) const @@ -93,20 +92,29 @@ void ASTIdentifier::formatImplWithoutAlias(const FormatSettings & settings, Form } else { - format_element(name); + format_element(shortName()); } } void ASTIdentifier::appendColumnNameImpl(WriteBuffer & ostr) const { - writeString(name, ostr); + writeString(name(), ostr); +} + +void ASTIdentifier::restoreTable() +{ + if (!compound()) + { + name_parts.insert(name_parts.begin(), semantic->table); + resetFullName(); + } } void ASTIdentifier::resetTable(const String & database_name, const String & table_name) { auto ast = createTableIdentifier(database_name, table_name); auto & ident = ast->as(); - name.swap(ident.name); + full_name.swap(ident.full_name); name_parts.swap(ident.name_parts); uuid = ident.uuid; } @@ -117,6 +125,13 @@ void ASTIdentifier::updateTreeHashImpl(SipHash & hash_state) const IAST::updateTreeHashImpl(hash_state); } +void ASTIdentifier::resetFullName() +{ + full_name = name_parts[0]; + for (size_t i = 1; i < name_parts.size(); ++i) + full_name += '.' + name_parts[i]; +} + ASTPtr createTableIdentifier(const String & database_name, const String & table_name) { assert(database_name != "_temporary_and_external_tables"); @@ -127,9 +142,9 @@ ASTPtr createTableIdentifier(const StorageID & table_id) { std::shared_ptr res; if (table_id.database_name.empty()) - res = ASTIdentifier::createSpecial(table_id.table_name); + res = std::make_shared(std::vector{table_id.table_name}, true); else - res = ASTIdentifier::createSpecial(table_id.database_name + "." + table_id.table_name, {table_id.database_name, table_id.table_name}); + res = std::make_shared(std::vector{table_id.database_name, table_id.table_name}, true); res->uuid = table_id.uuid; return res; } @@ -156,7 +171,7 @@ bool tryGetIdentifierNameInto(const IAST * ast, String & name) { if (const auto * node = ast->as()) { - name = node->name; + name = node->name(); return true; } } @@ -180,7 +195,7 @@ StorageID getTableIdentifier(const ASTPtr & ast) if (identifier.name_parts.size() == 2) return { identifier.name_parts[0], identifier.name_parts[1], identifier.uuid }; - return { "", identifier.name, identifier.uuid }; + return { "", identifier.name_parts[0], identifier.uuid }; } } diff --git a/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h index 5c06fa7fa38..59f698eab1c 100644 --- a/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -18,59 +18,54 @@ struct StorageID; class ASTIdentifier : public ASTWithAlias { public: - /// The composite identifier will have a concatenated name (of the form a.b.c), - /// and individual components will be available inside the name_parts. - String name; UUID uuid = UUIDHelpers::Nil; - ASTIdentifier(const String & name_, std::vector && name_parts_ = {}); - ASTIdentifier(std::vector && name_parts_); + explicit ASTIdentifier(const String & short_name); + explicit ASTIdentifier(std::vector && name_parts, bool special = false); /** Get the text that identifies this element. */ - String getID(char delim) const override { return "Identifier" + (delim + name); } + String getID(char delim) const override { return "Identifier" + (delim + name()); } ASTPtr clone() const override; - void collectIdentifierNames(IdentifierNameSet & set) const override - { - set.insert(name); - } + void collectIdentifierNames(IdentifierNameSet & set) const override { set.insert(name()); } - bool compound() const { return !name_parts.empty(); } - bool isShort() const { return name_parts.empty() || name == name_parts.back(); } + bool compound() const { return name_parts.size() > 1; } + bool isShort() const { return name_parts.size() == 1; } + bool supposedToBeCompound() const; // TODO(ilezhankin): get rid of this void setShortName(const String & new_name); - /// Restore name field from name_parts in case it was cropped by analyzer but we need a full form for future (re)analyze. - void restoreCompoundName(); + /// The composite identifier will have a concatenated name (of the form a.b.c), + /// and individual components will be available inside the name_parts. + const String & shortName() const { return name_parts.back(); } + const String & name() const; - const String & shortName() const - { - if (!name_parts.empty()) - return name_parts.back(); - return name; - } + void restoreTable(); // TODO(ilezhankin): get rid of this - void resetTable(const String & database_name, const String & table_name); + // FIXME: used only when it's needed to rewrite distributed table name to real remote table name. + void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this void updateTreeHashImpl(SipHash & hash_state) const override; protected: + String full_name; + std::vector name_parts; + void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; void appendColumnNameImpl(WriteBuffer & ostr) const override; private: using ASTWithAlias::children; /// ASTIdentifier is child free - std::vector name_parts; std::shared_ptr semantic; /// pimpl - static std::shared_ptr createSpecial(const String & name, std::vector && name_parts = {}); - friend struct IdentifierSemantic; friend ASTPtr createTableIdentifier(const StorageID & table_id); friend void setIdentifierSpecial(ASTPtr & ast); friend StorageID getTableIdentifier(const ASTPtr & ast); + + void resetFullName(); }; diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index b26e73287d0..3c45bd005a9 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -184,16 +184,10 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex .parse(pos, id_list, expected)) return false; - String name; std::vector parts; const auto & list = id_list->as(); for (const auto & child : list.children) - { - if (!name.empty()) - name += '.'; parts.emplace_back(getIdentifierName(child)); - name += parts.back(); - } ParserKeyword s_uuid("UUID"); UUID uuid = UUIDHelpers::Nil; @@ -207,9 +201,7 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex uuid = parseFromString(ast_uuid->as()->value.get()); } - if (parts.size() == 1) - parts.clear(); - node = std::make_shared(name, std::move(parts)); + node = std::make_shared(std::move(parts)); node->as()->uuid = uuid; return true; @@ -1651,7 +1643,7 @@ bool ParserFunctionWithKeyValueArguments::parseImpl(Pos & pos, ASTPtr & node, Ex } auto function = std::make_shared(left_bracket_found); - function->name = Poco::toLower(typeid_cast(*identifier.get()).name); + function->name = Poco::toLower(identifier->as()->name()); function->elements = expr_list_args; function->children.push_back(function->elements); node = function; diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index d6678bb9a78..ad03d949174 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -1,13 +1,11 @@ -#include +#include + #include #include -#include -#include -#include -#include #include - -#include +#include +#include +#include #include @@ -750,7 +748,7 @@ bool ParserKeyValuePair::parseImpl(Pos & pos, ASTPtr & node, Expected & expected } auto pair = std::make_shared(with_brackets); - pair->first = Poco::toLower(typeid_cast(*identifier.get()).name); + pair->first = Poco::toLower(identifier->as()->name()); pair->set(pair->second, value); node = pair; return true; diff --git a/src/Parsers/MySQL/ASTDeclareConstraint.cpp b/src/Parsers/MySQL/ASTDeclareConstraint.cpp index 0f447fb3b40..96184dfc89d 100644 --- a/src/Parsers/MySQL/ASTDeclareConstraint.cpp +++ b/src/Parsers/MySQL/ASTDeclareConstraint.cpp @@ -63,7 +63,7 @@ bool ParserDeclareConstraint::parseImpl(IParser::Pos & pos, ASTPtr & node, Expec declare_constraint->check_expression = index_check_expression; if (constraint_symbol) - declare_constraint->constraint_name = constraint_symbol->as()->name; + declare_constraint->constraint_name = constraint_symbol->as()->name(); node = declare_constraint; return true; diff --git a/src/Parsers/MySQL/ASTDeclareIndex.cpp b/src/Parsers/MySQL/ASTDeclareIndex.cpp index 8e6e9d43793..c5b4686e976 100644 --- a/src/Parsers/MySQL/ASTDeclareIndex.cpp +++ b/src/Parsers/MySQL/ASTDeclareIndex.cpp @@ -73,7 +73,7 @@ static inline bool parseDeclareOrdinaryIndex(IParser::Pos & pos, String & index_ index_type = "SPATIAL"; if (p_identifier.parse(pos, temp_node, expected)) - index_name = temp_node->as()->name; + index_name = temp_node->as()->name(); } else if (ParserKeyword("FULLTEXT").ignore(pos, expected)) { @@ -82,7 +82,7 @@ static inline bool parseDeclareOrdinaryIndex(IParser::Pos & pos, String & index_ index_type = "FULLTEXT"; if (p_identifier.parse(pos, temp_node, expected)) - index_name = temp_node->as()->name; + index_name = temp_node->as()->name(); } else { @@ -94,14 +94,14 @@ static inline bool parseDeclareOrdinaryIndex(IParser::Pos & pos, String & index_ index_type = "KEY_BTREE"; /// default index type if (p_identifier.parse(pos, temp_node, expected)) - index_name = temp_node->as()->name; + index_name = temp_node->as()->name(); if (ParserKeyword("USING").ignore(pos, expected)) { if (!p_identifier.parse(pos, temp_node, expected)) return false; - index_type = "KEY_" + temp_node->as()->name; + index_type = "KEY_" + temp_node->as()->name(); } } @@ -122,7 +122,7 @@ static inline bool parseDeclareConstraintIndex(IParser::Pos & pos, String & inde if (!p_identifier.parse(pos, temp_node, expected)) return false; - index_name = temp_node->as()->name; + index_name = temp_node->as()->name(); } } @@ -132,7 +132,7 @@ static inline bool parseDeclareConstraintIndex(IParser::Pos & pos, String & inde ParserKeyword("INDEX").ignore(pos, expected); if (p_identifier.parse(pos, temp_node, expected)) - index_name = temp_node->as()->name; /// reset index_name + index_name = temp_node->as()->name(); /// reset index_name index_type = "UNIQUE_BTREE"; /// default btree index_type if (ParserKeyword("USING").ignore(pos, expected)) @@ -140,7 +140,7 @@ static inline bool parseDeclareConstraintIndex(IParser::Pos & pos, String & inde if (!p_identifier.parse(pos, temp_node, expected)) return false; - index_type = "UNIQUE_" + temp_node->as()->name; + index_type = "UNIQUE_" + temp_node->as()->name(); } } else if (ParserKeyword("PRIMARY KEY").ignore(pos, expected)) @@ -151,14 +151,14 @@ static inline bool parseDeclareConstraintIndex(IParser::Pos & pos, String & inde if (!p_identifier.parse(pos, temp_node, expected)) return false; - index_type = "PRIMARY_KEY_" + temp_node->as()->name; + index_type = "PRIMARY_KEY_" + temp_node->as()->name(); } } else if (ParserKeyword("FOREIGN KEY").ignore(pos, expected)) { index_type = "FOREIGN"; if (p_identifier.parse(pos, temp_node, expected)) - index_name = temp_node->as()->name; /// reset index_name + index_name = temp_node->as()->name(); /// reset index_name } return true; diff --git a/src/Parsers/MySQL/ASTDeclarePartition.cpp b/src/Parsers/MySQL/ASTDeclarePartition.cpp index 8e1d27778b5..76f864fcc44 100644 --- a/src/Parsers/MySQL/ASTDeclarePartition.cpp +++ b/src/Parsers/MySQL/ASTDeclarePartition.cpp @@ -107,7 +107,7 @@ bool ParserDeclarePartition::parseImpl(IParser::Pos & pos, ASTPtr & node, Expect partition_declare->less_than = less_than; partition_declare->in_expression = in_expression; partition_declare->subpartitions = subpartitions; - partition_declare->partition_name = partition_name->as()->name; + partition_declare->partition_name = partition_name->as()->name(); if (options) { diff --git a/src/Parsers/MySQL/ASTDeclareReference.cpp b/src/Parsers/MySQL/ASTDeclareReference.cpp index 434b9561eda..862d35e2b76 100644 --- a/src/Parsers/MySQL/ASTDeclareReference.cpp +++ b/src/Parsers/MySQL/ASTDeclareReference.cpp @@ -95,7 +95,7 @@ bool ParserDeclareReference::parseImpl(IParser::Pos & pos, ASTPtr & node, Expect declare_reference->on_delete_option = delete_option; declare_reference->on_update_option = update_option; declare_reference->reference_expression = expression; - declare_reference->reference_table_name = table_name->as()->name; + declare_reference->reference_table_name = table_name->as()->name(); node = declare_reference; return true; diff --git a/src/Parsers/MySQL/ASTDeclareSubPartition.cpp b/src/Parsers/MySQL/ASTDeclareSubPartition.cpp index 1b2d9c081e6..d77fba271c4 100644 --- a/src/Parsers/MySQL/ASTDeclareSubPartition.cpp +++ b/src/Parsers/MySQL/ASTDeclareSubPartition.cpp @@ -41,7 +41,7 @@ bool ParserDeclareSubPartition::parseImpl(Pos & pos, ASTPtr & node, Expected & e auto subpartition_declare = std::make_shared(); subpartition_declare->options = options; - subpartition_declare->logical_name = logical_name->as()->name; + subpartition_declare->logical_name = logical_name->as()->name(); if (options) { diff --git a/src/Parsers/MySQL/tests/gtest_column_parser.cpp b/src/Parsers/MySQL/tests/gtest_column_parser.cpp index ef6371f71d9..de4c64be817 100644 --- a/src/Parsers/MySQL/tests/gtest_column_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_column_parser.cpp @@ -29,9 +29,9 @@ TEST(ParserColumn, AllNonGeneratedColumnOption) EXPECT_EQ(declare_options->changes["unique_key"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["primary_key"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "column comment"); - EXPECT_EQ(declare_options->changes["collate"]->as()->name, "utf-8"); - EXPECT_EQ(declare_options->changes["column_format"]->as()->name, "FIXED"); - EXPECT_EQ(declare_options->changes["storage"]->as()->name, "MEMORY"); + EXPECT_EQ(declare_options->changes["collate"]->as()->name(), "utf-8"); + EXPECT_EQ(declare_options->changes["column_format"]->as()->name(), "FIXED"); + EXPECT_EQ(declare_options->changes["storage"]->as()->name(), "MEMORY"); EXPECT_TRUE(declare_options->changes["reference"]->as()); EXPECT_TRUE(declare_options->changes["constraint"]->as()); } @@ -52,7 +52,7 @@ TEST(ParserColumn, AllGeneratedColumnOption) EXPECT_EQ(declare_options->changes["unique_key"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["primary_key"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "column comment"); - EXPECT_EQ(declare_options->changes["collate"]->as()->name, "utf-8"); + EXPECT_EQ(declare_options->changes["collate"]->as()->name(), "utf-8"); EXPECT_EQ(declare_options->changes["generated"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["is_stored"]->as()->value.safeGet(), 1); EXPECT_TRUE(declare_options->changes["reference"]->as()); diff --git a/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp b/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp index de885bf36c8..9c9124c9f58 100644 --- a/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_constraint_parser.cpp @@ -18,7 +18,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(ast_constraint_01->as()->constraint_name, "symbol_name"); auto * check_expression_01 = ast_constraint_01->as()->check_expression->as(); EXPECT_EQ(check_expression_01->name, "equals"); - EXPECT_EQ(check_expression_01->arguments->children[0]->as()->name, "col_01"); + EXPECT_EQ(check_expression_01->arguments->children[0]->as()->name(), "col_01"); EXPECT_EQ(check_expression_01->arguments->children[1]->as()->value.safeGet(), 1); String constraint_02 = "CONSTRAINT CHECK col_01 = 1"; @@ -26,7 +26,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(ast_constraint_02->as()->constraint_name, ""); auto * check_expression_02 = ast_constraint_02->as()->check_expression->as(); EXPECT_EQ(check_expression_02->name, "equals"); - EXPECT_EQ(check_expression_02->arguments->children[0]->as()->name, "col_01"); + EXPECT_EQ(check_expression_02->arguments->children[0]->as()->name(), "col_01"); EXPECT_EQ(check_expression_02->arguments->children[1]->as()->value.safeGet(), 1); String constraint_03 = "CHECK col_01 = 1"; @@ -34,7 +34,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(ast_constraint_03->as()->constraint_name, ""); auto * check_expression_03 = ast_constraint_03->as()->check_expression->as(); EXPECT_EQ(check_expression_03->name, "equals"); - EXPECT_EQ(check_expression_03->arguments->children[0]->as()->name, "col_01"); + EXPECT_EQ(check_expression_03->arguments->children[0]->as()->name(), "col_01"); EXPECT_EQ(check_expression_03->arguments->children[1]->as()->value.safeGet(), 1); String constraint_04 = "CONSTRAINT CHECK col_01 = 1 ENFORCED"; @@ -43,7 +43,7 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(ast_constraint_04->as()->constraint_name, ""); auto * check_expression_04 = ast_constraint_04->as()->check_expression->as(); EXPECT_EQ(check_expression_04->name, "equals"); - EXPECT_EQ(check_expression_04->arguments->children[0]->as()->name, "col_01"); + EXPECT_EQ(check_expression_04->arguments->children[0]->as()->name(), "col_01"); EXPECT_EQ(check_expression_04->arguments->children[1]->as()->value.safeGet(), 1); String constraint_05 = "CONSTRAINT CHECK col_01 = 1 NOT ENFORCED"; @@ -52,6 +52,6 @@ TEST(ParserConstraint, CheckConstraint) EXPECT_EQ(ast_constraint_05->as()->constraint_name, ""); auto * check_expression_05 = ast_constraint_05->as()->check_expression->as(); EXPECT_EQ(check_expression_05->name, "equals"); - EXPECT_EQ(check_expression_05->arguments->children[0]->as()->name, "col_01"); + EXPECT_EQ(check_expression_05->arguments->children[0]->as()->name(), "col_01"); EXPECT_EQ(check_expression_05->arguments->children[1]->as()->value.safeGet(), 1); } diff --git a/src/Parsers/MySQL/tests/gtest_create_parser.cpp b/src/Parsers/MySQL/tests/gtest_create_parser.cpp index 92c0070aa88..1aaba8d67e4 100644 --- a/src/Parsers/MySQL/tests/gtest_create_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_create_parser.cpp @@ -28,7 +28,7 @@ TEST(CreateTableParser, SimpleCreate) EXPECT_EQ(ast->as()->columns_list->as()->columns->children.size(), 1); EXPECT_EQ(ast->as()->columns_list->as()->indices->children.size(), 1); EXPECT_EQ(ast->as()->columns_list->as()->constraints->children.size(), 1); - EXPECT_EQ(ast->as()->table_options->as()->changes["engine"]->as()->name, "INNODB"); + EXPECT_EQ(ast->as()->table_options->as()->changes["engine"]->as()->name(), "INNODB"); EXPECT_TRUE(ast->as()->partition_options->as()); } diff --git a/src/Parsers/MySQL/tests/gtest_index_parser.cpp b/src/Parsers/MySQL/tests/gtest_index_parser.cpp index 02b3b10acff..a8be6787b2c 100644 --- a/src/Parsers/MySQL/tests/gtest_index_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_index_parser.cpp @@ -17,13 +17,13 @@ TEST(ParserIndex, AllIndexOptions) ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0); ASTDeclareIndex * declare_index = ast->as(); - EXPECT_EQ(declare_index->index_columns->children[0]->as()->name, "col_01"); + EXPECT_EQ(declare_index->index_columns->children[0]->as()->name(), "col_01"); EXPECT_EQ(declare_index->index_columns->children[1]->as()->name, "col_02"); EXPECT_EQ(declare_index->index_columns->children[1]->as()->arguments->children[0]->as()->value.safeGet(), 100); - EXPECT_EQ(declare_index->index_columns->children[2]->as()->name, "col_03"); + EXPECT_EQ(declare_index->index_columns->children[2]->as()->name(), "col_03"); ASTDeclareOptions * declare_options = declare_index->index_options->as(); EXPECT_EQ(declare_options->changes["key_block_size"]->as()->value.safeGet(), 3); - EXPECT_EQ(declare_options->changes["index_type"]->as()->name, "HASH"); + EXPECT_EQ(declare_options->changes["index_type"]->as()->name(), "HASH"); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "index comment"); EXPECT_EQ(declare_options->changes["visible"]->as()->value.safeGet(), 1); } @@ -36,12 +36,12 @@ TEST(ParserIndex, OptionalIndexOptions) ASTPtr ast = parseQuery(p_index, input.data(), input.data() + input.size(), "", 0, 0); ASTDeclareIndex * declare_index = ast->as(); - EXPECT_EQ(declare_index->index_columns->children[0]->as()->name, "col_01"); + EXPECT_EQ(declare_index->index_columns->children[0]->as()->name(), "col_01"); EXPECT_EQ(declare_index->index_columns->children[1]->as()->name, "col_02"); EXPECT_EQ(declare_index->index_columns->children[1]->as()->arguments->children[0]->as()->value.safeGet(), 100); - EXPECT_EQ(declare_index->index_columns->children[2]->as()->name, "col_03"); + EXPECT_EQ(declare_index->index_columns->children[2]->as()->name(), "col_03"); ASTDeclareOptions * declare_options = declare_index->index_options->as(); - EXPECT_EQ(declare_options->changes["index_type"]->as()->name, "HASH"); + EXPECT_EQ(declare_options->changes["index_type"]->as()->name(), "HASH"); EXPECT_EQ(declare_options->changes["visible"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["key_block_size"]->as()->value.safeGet(), 3); } diff --git a/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp b/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp index 1651efcb966..01b757e5891 100644 --- a/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_partition_options_parser.cpp @@ -18,14 +18,14 @@ TEST(ParserPartitionOptions, HashPatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "hash"); - EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String linear_hash_partition = "PARTITION BY LINEAR HASH(col_01)"; ASTPtr ast_02 = parseQuery(p_partition_options, linear_hash_partition.data(), linear_hash_partition.data() + linear_hash_partition.size(), "", 0, 0); ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "linear_hash"); - EXPECT_EQ(declare_partition_options_02->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_02->partition_expression->as()->name(), "col_01"); } TEST(ParserPartitionOptions, KeyPatitionOptions) @@ -37,7 +37,7 @@ TEST(ParserPartitionOptions, KeyPatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "key"); - EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String linear_key_partition = "PARTITION BY LINEAR KEY(col_01, col_02)"; ASTPtr ast_02 = parseQuery(p_partition_options, linear_key_partition.data(), linear_key_partition.data() + linear_key_partition.size(), "", 0, 0); @@ -45,15 +45,15 @@ TEST(ParserPartitionOptions, KeyPatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "linear_key"); ASTPtr columns_list = declare_partition_options_02->partition_expression->as()->arguments; - EXPECT_EQ(columns_list->children[0]->as()->name, "col_01"); - EXPECT_EQ(columns_list->children[1]->as()->name, "col_02"); + EXPECT_EQ(columns_list->children[0]->as()->name(), "col_01"); + EXPECT_EQ(columns_list->children[1]->as()->name(), "col_02"); String key_partition_with_algorithm = "PARTITION BY KEY ALGORITHM=1 (col_01)"; ASTPtr ast_03 = parseQuery(p_partition_options, key_partition_with_algorithm.data(), key_partition_with_algorithm.data() + key_partition_with_algorithm.size(), "", 0, 0); ASTDeclarePartitionOptions * declare_partition_options_03 = ast_03->as(); EXPECT_EQ(declare_partition_options_03->partition_type, "key_1"); - EXPECT_EQ(declare_partition_options_03->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_03->partition_expression->as()->name(), "col_01"); } TEST(ParserPartitionOptions, RangePatitionOptions) @@ -65,7 +65,7 @@ TEST(ParserPartitionOptions, RangePatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "range"); - EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String range_columns_partition = "PARTITION BY RANGE COLUMNS(col_01, col_02)"; ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0); @@ -73,8 +73,8 @@ TEST(ParserPartitionOptions, RangePatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "range"); ASTPtr columns_list = declare_partition_options_02->partition_expression->as()->arguments; - EXPECT_EQ(columns_list->children[0]->as()->name, "col_01"); - EXPECT_EQ(columns_list->children[1]->as()->name, "col_02"); + EXPECT_EQ(columns_list->children[0]->as()->name(), "col_01"); + EXPECT_EQ(columns_list->children[1]->as()->name(), "col_02"); } TEST(ParserPartitionOptions, ListPatitionOptions) @@ -86,7 +86,7 @@ TEST(ParserPartitionOptions, ListPatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_01 = ast_01->as(); EXPECT_EQ(declare_partition_options_01->partition_type, "list"); - EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options_01->partition_expression->as()->name(), "col_01"); String range_columns_partition = "PARTITION BY LIST COLUMNS(col_01, col_02)"; ASTPtr ast_02 = parseQuery(p_partition_options, range_columns_partition.data(), range_columns_partition.data() + range_columns_partition.size(), "", 0, 0); @@ -94,8 +94,8 @@ TEST(ParserPartitionOptions, ListPatitionOptions) ASTDeclarePartitionOptions * declare_partition_options_02 = ast_02->as(); EXPECT_EQ(declare_partition_options_02->partition_type, "list"); ASTPtr columns_list = declare_partition_options_02->partition_expression->as()->arguments; - EXPECT_EQ(columns_list->children[0]->as()->name, "col_01"); - EXPECT_EQ(columns_list->children[1]->as()->name, "col_02"); + EXPECT_EQ(columns_list->children[0]->as()->name(), "col_01"); + EXPECT_EQ(columns_list->children[1]->as()->name(), "col_02"); } TEST(ParserPartitionOptions, PatitionNumberOptions) @@ -107,7 +107,7 @@ TEST(ParserPartitionOptions, PatitionNumberOptions) ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); - EXPECT_EQ(declare_partition_options->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options->partition_expression->as()->name(), "col_01"); EXPECT_EQ(declare_partition_options->partition_numbers->as()->value.safeGet(), 2); } @@ -120,10 +120,10 @@ TEST(ParserPartitionOptions, PatitionWithSubpartitionOptions) ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); - EXPECT_EQ(declare_partition_options->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options->partition_expression->as()->name(), "col_01"); EXPECT_EQ(declare_partition_options->partition_numbers->as()->value.safeGet(), 3); EXPECT_EQ(declare_partition_options->subpartition_type, "hash"); - EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name, "col_02"); + EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name(), "col_02"); EXPECT_EQ(declare_partition_options->subpartition_numbers->as()->value.safeGet(), 4); } @@ -138,10 +138,10 @@ TEST(ParserPartitionOptions, PatitionOptionsWithDeclarePartition) ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); - EXPECT_EQ(declare_partition_options->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options->partition_expression->as()->name(), "col_01"); EXPECT_EQ(declare_partition_options->partition_numbers->as()->value.safeGet(), 3); EXPECT_EQ(declare_partition_options->subpartition_type, "hash"); - EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name, "col_02"); + EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name(), "col_02"); EXPECT_EQ(declare_partition_options->subpartition_numbers->as()->value.safeGet(), 4); EXPECT_TRUE(declare_partition_options->declare_partitions->as()->children[0]->as()); } @@ -157,10 +157,10 @@ TEST(ParserPartitionOptions, PatitionOptionsWithDeclarePartitions) ASTDeclarePartitionOptions * declare_partition_options = ast->as(); EXPECT_EQ(declare_partition_options->partition_type, "key"); - EXPECT_EQ(declare_partition_options->partition_expression->as()->name, "col_01"); + EXPECT_EQ(declare_partition_options->partition_expression->as()->name(), "col_01"); EXPECT_EQ(declare_partition_options->partition_numbers->as()->value.safeGet(), 3); EXPECT_EQ(declare_partition_options->subpartition_type, "hash"); - EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name, "col_02"); + EXPECT_EQ(declare_partition_options->subpartition_expression->as()->name(), "col_02"); EXPECT_EQ(declare_partition_options->subpartition_numbers->as()->value.safeGet(), 4); EXPECT_TRUE(declare_partition_options->declare_partitions->as()->children[0]->as()); EXPECT_TRUE(declare_partition_options->declare_partitions->as()->children[1]->as()); diff --git a/src/Parsers/MySQL/tests/gtest_partition_parser.cpp b/src/Parsers/MySQL/tests/gtest_partition_parser.cpp index 48e8a9f53c6..458c7acd553 100644 --- a/src/Parsers/MySQL/tests/gtest_partition_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_partition_parser.cpp @@ -22,13 +22,13 @@ TEST(ParserPartition, AllPatitionOptions) ASTDeclarePartition * declare_partition = ast->as(); EXPECT_EQ(declare_partition->partition_name, "partition_name"); ASTDeclareOptions * declare_options = declare_partition->options->as(); - EXPECT_EQ(declare_options->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options->changes["engine"]->as()->name(), "engine_name"); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "partition comment"); EXPECT_EQ(declare_options->changes["data_directory"]->as()->value.safeGet(), "data_directory"); EXPECT_EQ(declare_options->changes["index_directory"]->as()->value.safeGet(), "index_directory"); EXPECT_EQ(declare_options->changes["min_rows"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); - EXPECT_EQ(declare_options->changes["tablespace"]->as()->name, "table_space_name"); + EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "table_space_name"); } TEST(ParserPartition, OptionalPatitionOptions) @@ -40,10 +40,10 @@ TEST(ParserPartition, OptionalPatitionOptions) ASTDeclarePartition * declare_partition = ast->as(); EXPECT_EQ(declare_partition->partition_name, "partition_name"); ASTDeclareOptions * declare_options = declare_partition->options->as(); - EXPECT_EQ(declare_options->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options->changes["engine"]->as()->name(), "engine_name"); EXPECT_EQ(declare_options->changes["min_rows"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); - EXPECT_EQ(declare_options->changes["tablespace"]->as()->name, "table_space_name"); + EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "table_space_name"); } TEST(ParserPartition, PatitionOptionsWithLessThan) @@ -56,16 +56,16 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) EXPECT_EQ(declare_partition_01->partition_name, "partition_01"); EXPECT_EQ(declare_partition_01->less_than->as()->value.safeGet(), 1991); ASTDeclareOptions * declare_options_01 = declare_partition_01->options->as(); - EXPECT_EQ(declare_options_01->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_01->changes["engine"]->as()->name(), "engine_name"); String partition_02 = "PARTITION partition_02 VALUES LESS THAN MAXVALUE STORAGE engine = engine_name"; ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0); ASTDeclarePartition * declare_partition_02 = ast_partition_02->as(); EXPECT_EQ(declare_partition_02->partition_name, "partition_02"); - EXPECT_EQ(declare_partition_02->less_than->as()->name, "MAXVALUE"); + EXPECT_EQ(declare_partition_02->less_than->as()->name(), "MAXVALUE"); ASTDeclareOptions * declare_options_02 = declare_partition_02->options->as(); - EXPECT_EQ(declare_options_02->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_02->changes["engine"]->as()->name(), "engine_name"); String partition_03 = "PARTITION partition_03 VALUES LESS THAN (50, MAXVALUE) STORAGE engine = engine_name"; ASTPtr ast_partition_03 = parseQuery(p_partition, partition_03.data(), partition_03.data() + partition_03.size(), "", 0, 0); @@ -74,9 +74,9 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) EXPECT_EQ(declare_partition_03->partition_name, "partition_03"); ASTPtr declare_partition_03_argument = declare_partition_03->less_than->as()->arguments; EXPECT_EQ(declare_partition_03_argument->children[0]->as()->value.safeGet(), 50); - EXPECT_EQ(declare_partition_03_argument->children[1]->as()->name, "MAXVALUE"); + EXPECT_EQ(declare_partition_03_argument->children[1]->as()->name(), "MAXVALUE"); ASTDeclareOptions * declare_options_03 = declare_partition_03->options->as(); - EXPECT_EQ(declare_options_03->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_03->changes["engine"]->as()->name(), "engine_name"); String partition_04 = "PARTITION partition_04 VALUES LESS THAN (MAXVALUE, MAXVALUE) STORAGE engine = engine_name"; ASTPtr ast_partition_04 = parseQuery(p_partition, partition_04.data(), partition_04.data() + partition_04.size(), "", 0, 0); @@ -84,10 +84,10 @@ TEST(ParserPartition, PatitionOptionsWithLessThan) ASTDeclarePartition * declare_partition_04 = ast_partition_04->as(); EXPECT_EQ(declare_partition_04->partition_name, "partition_04"); ASTPtr declare_partition_04_argument = declare_partition_04->less_than->as()->arguments; - EXPECT_EQ(declare_partition_04_argument->children[0]->as()->name, "MAXVALUE"); - EXPECT_EQ(declare_partition_04_argument->children[1]->as()->name, "MAXVALUE"); + EXPECT_EQ(declare_partition_04_argument->children[0]->as()->name(), "MAXVALUE"); + EXPECT_EQ(declare_partition_04_argument->children[1]->as()->name(), "MAXVALUE"); ASTDeclareOptions * declare_options_04 = declare_partition_04->options->as(); - EXPECT_EQ(declare_options_04->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_04->changes["engine"]->as()->name(), "engine_name"); } TEST(ParserPartition, PatitionOptionsWithInExpression) @@ -101,9 +101,9 @@ TEST(ParserPartition, PatitionOptionsWithInExpression) ASTPtr declare_partition_01_argument = declare_partition_01->in_expression->as()->arguments; EXPECT_TRUE(declare_partition_01_argument->children[0]->as()->value.isNull()); EXPECT_EQ(declare_partition_01_argument->children[1]->as()->value.safeGet(), 1991); - EXPECT_EQ(declare_partition_01_argument->children[2]->as()->name, "MAXVALUE"); + EXPECT_EQ(declare_partition_01_argument->children[2]->as()->name(), "MAXVALUE"); ASTDeclareOptions * declare_options_01 = declare_partition_01->options->as(); - EXPECT_EQ(declare_options_01->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_01->changes["engine"]->as()->name(), "engine_name"); String partition_02 = "PARTITION partition_02 VALUES IN ((NULL, 1991), (1991, NULL), (MAXVALUE, MAXVALUE)) STORAGE engine = engine_name"; ASTPtr ast_partition_02 = parseQuery(p_partition, partition_02.data(), partition_02.data() + partition_02.size(), "", 0, 0); @@ -121,11 +121,11 @@ TEST(ParserPartition, PatitionOptionsWithInExpression) EXPECT_TRUE(argument_02->as()->value.safeGet()[1].isNull()); ASTPtr argument_03 = declare_partition_02_argument->children[2]->as()->arguments; - EXPECT_EQ(argument_03->as()->children[0]->as()->name, "MAXVALUE"); - EXPECT_EQ(argument_03->as()->children[1]->as()->name, "MAXVALUE"); + EXPECT_EQ(argument_03->as()->children[0]->as()->name(), "MAXVALUE"); + EXPECT_EQ(argument_03->as()->children[1]->as()->name(), "MAXVALUE"); ASTDeclareOptions * declare_options_02 = declare_partition_02->options->as(); - EXPECT_EQ(declare_options_02->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options_02->changes["engine"]->as()->name(), "engine_name"); } TEST(ParserPartition, PatitionOptionsWithSubpartitions) diff --git a/src/Parsers/MySQL/tests/gtest_reference_parser.cpp b/src/Parsers/MySQL/tests/gtest_reference_parser.cpp index 694558b9cc3..7447f16fc7c 100644 --- a/src/Parsers/MySQL/tests/gtest_reference_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_reference_parser.cpp @@ -14,14 +14,14 @@ TEST(ParserReference, SimpleReference) String reference_01 = "REFERENCES table_name (ref_col_01)"; ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); String reference_02 = "REFERENCES table_name (ref_col_01, ref_col_02)"; ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); ASTPtr arguments = ast_reference_02->as()->reference_expression->as()->arguments; - EXPECT_EQ(arguments->children[0]->as()->name, "ref_col_01"); - EXPECT_EQ(arguments->children[1]->as()->name, "ref_col_02"); + EXPECT_EQ(arguments->children[0]->as()->name(), "ref_col_01"); + EXPECT_EQ(arguments->children[1]->as()->name(), "ref_col_02"); } TEST(ParserReference, ReferenceDifferenceKind) @@ -30,19 +30,19 @@ TEST(ParserReference, ReferenceDifferenceKind) String reference_01 = "REFERENCES table_name (ref_col_01) MATCH FULL"; ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_01->as()->kind, ASTDeclareReference::MATCH_FULL); String reference_02 = "REFERENCES table_name (ref_col_01) MATCH PARTIAL"; ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_02->as()->kind, ASTDeclareReference::MATCH_PARTIAL); String reference_03 = "REFERENCES table_name (ref_col_01) MATCH SIMPLE"; ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0); EXPECT_EQ(ast_reference_03->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_03->as()->kind, ASTDeclareReference::MATCH_SIMPLE); } @@ -52,7 +52,7 @@ TEST(ParserReference, ReferenceDifferenceOption) String reference_01 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE RESTRICT ON UPDATE RESTRICT"; ASTPtr ast_reference_01 = parseQuery(p_reference, reference_01.data(), reference_01.data() + reference_01.size(), "", 0, 0); EXPECT_EQ(ast_reference_01->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_01->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_01->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_01->as()->on_delete_option, ASTDeclareReference::RESTRICT); EXPECT_EQ(ast_reference_01->as()->on_update_option, ASTDeclareReference::RESTRICT); @@ -60,7 +60,7 @@ TEST(ParserReference, ReferenceDifferenceOption) String reference_02 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE"; ASTPtr ast_reference_02 = parseQuery(p_reference, reference_02.data(), reference_02.data() + reference_02.size(), "", 0, 0); EXPECT_EQ(ast_reference_02->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_02->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_02->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_02->as()->on_delete_option, ASTDeclareReference::CASCADE); EXPECT_EQ(ast_reference_02->as()->on_update_option, ASTDeclareReference::CASCADE); @@ -68,7 +68,7 @@ TEST(ParserReference, ReferenceDifferenceOption) String reference_03 = "REFERENCES table_name (ref_col_01) MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL"; ASTPtr ast_reference_03 = parseQuery(p_reference, reference_03.data(), reference_03.data() + reference_03.size(), "", 0, 0); EXPECT_EQ(ast_reference_03->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_03->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_03->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_03->as()->on_delete_option, ASTDeclareReference::SET_NULL); EXPECT_EQ(ast_reference_03->as()->on_update_option, ASTDeclareReference::SET_NULL); @@ -76,7 +76,7 @@ TEST(ParserReference, ReferenceDifferenceOption) String reference_04 = "REFERENCES table_name (ref_col_01) MATCH FULL ON UPDATE NO ACTION ON DELETE NO ACTION"; ASTPtr ast_reference_04 = parseQuery(p_reference, reference_04.data(), reference_04.data() + reference_04.size(), "", 0, 0); EXPECT_EQ(ast_reference_04->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_04->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_04->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_04->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_04->as()->on_delete_option, ASTDeclareReference::NO_ACTION); EXPECT_EQ(ast_reference_04->as()->on_update_option, ASTDeclareReference::NO_ACTION); @@ -84,7 +84,7 @@ TEST(ParserReference, ReferenceDifferenceOption) String reference_05 = "REFERENCES table_name (ref_col_01) MATCH FULL ON UPDATE SET DEFAULT ON DELETE SET DEFAULT"; ASTPtr ast_reference_05 = parseQuery(p_reference, reference_05.data(), reference_05.data() + reference_05.size(), "", 0, 0); EXPECT_EQ(ast_reference_05->as()->reference_table_name, "table_name"); - EXPECT_EQ(ast_reference_05->as()->reference_expression->as()->name, "ref_col_01"); + EXPECT_EQ(ast_reference_05->as()->reference_expression->as()->name(), "ref_col_01"); EXPECT_EQ(ast_reference_05->as()->kind, ASTDeclareReference::MATCH_FULL); EXPECT_EQ(ast_reference_05->as()->on_delete_option, ASTDeclareReference::SET_DEFAULT); EXPECT_EQ(ast_reference_05->as()->on_update_option, ASTDeclareReference::SET_DEFAULT); diff --git a/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp b/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp index 5c1cf3710ab..b375f73c55c 100644 --- a/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_subpartition_parser.cpp @@ -19,13 +19,13 @@ TEST(ParserSubpartition, AllSubpatitionOptions) ASTDeclareSubPartition * declare_subpartition = ast->as(); EXPECT_EQ(declare_subpartition->logical_name, "subpartition_name"); ASTDeclareOptions * declare_options = declare_subpartition->options->as(); - EXPECT_EQ(declare_options->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options->changes["engine"]->as()->name(), "engine_name"); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "subpartition comment"); EXPECT_EQ(declare_options->changes["data_directory"]->as()->value.safeGet(), "data_directory"); EXPECT_EQ(declare_options->changes["index_directory"]->as()->value.safeGet(), "index_directory"); EXPECT_EQ(declare_options->changes["min_rows"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); - EXPECT_EQ(declare_options->changes["tablespace"]->as()->name, "table_space_name"); + EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "table_space_name"); } TEST(ParserSubpartition, OptionalSubpatitionOptions) @@ -37,9 +37,9 @@ TEST(ParserSubpartition, OptionalSubpatitionOptions) ASTDeclareSubPartition * declare_subpartition = ast->as(); EXPECT_EQ(declare_subpartition->logical_name, "subpartition_name"); ASTDeclareOptions * declare_options = declare_subpartition->options->as(); - EXPECT_EQ(declare_options->changes["engine"]->as()->name, "engine_name"); + EXPECT_EQ(declare_options->changes["engine"]->as()->name(), "engine_name"); EXPECT_EQ(declare_options->changes["min_rows"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); - EXPECT_EQ(declare_options->changes["tablespace"]->as()->name, "table_space_name"); + EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "table_space_name"); } diff --git a/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp b/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp index b051f6149bb..b252ee51ace 100644 --- a/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp +++ b/src/Parsers/MySQL/tests/gtest_table_options_parser.cpp @@ -23,9 +23,9 @@ TEST(ParserTableOptions, AllSubpatitionOptions) ASTDeclareOptions * declare_options = ast->as(); EXPECT_EQ(declare_options->changes["auto_increment"]->as()->value.safeGet(), 1); EXPECT_EQ(declare_options->changes["avg_row_length"]->as()->value.safeGet(), 3); - EXPECT_EQ(declare_options->changes["character_set"]->as()->name, "utf-8"); + EXPECT_EQ(declare_options->changes["character_set"]->as()->name(), "utf-8"); EXPECT_EQ(declare_options->changes["checksum"]->as()->value.safeGet(), 1); - EXPECT_EQ(declare_options->changes["collate"]->as()->name, "utf8_bin"); + EXPECT_EQ(declare_options->changes["collate"]->as()->name(), "utf8_bin"); EXPECT_EQ(declare_options->changes["comment"]->as()->value.safeGet(), "table option comment"); EXPECT_EQ(declare_options->changes["compression"]->as()->value.safeGet(), "LZ4"); EXPECT_EQ(declare_options->changes["connection"]->as()->value.safeGet(), "connect_string"); @@ -33,23 +33,23 @@ TEST(ParserTableOptions, AllSubpatitionOptions) EXPECT_EQ(declare_options->changes["index_directory"]->as()->value.safeGet(), "index_directory"); EXPECT_EQ(declare_options->changes["delay_key_write"]->as()->value.safeGet(), 0); EXPECT_EQ(declare_options->changes["encryption"]->as()->value.safeGet(), "Y"); - EXPECT_EQ(declare_options->changes["engine"]->as()->name, "INNODB"); - EXPECT_EQ(declare_options->changes["insert_method"]->as()->name, "NO"); + EXPECT_EQ(declare_options->changes["engine"]->as()->name(), "INNODB"); + EXPECT_EQ(declare_options->changes["insert_method"]->as()->name(), "NO"); EXPECT_EQ(declare_options->changes["key_block_size"]->as()->value.safeGet(), 3); EXPECT_EQ(declare_options->changes["max_rows"]->as()->value.safeGet(), 1000); EXPECT_EQ(declare_options->changes["min_rows"]->as()->value.safeGet(), 0); - EXPECT_EQ(declare_options->changes["pack_keys"]->as()->name, "DEFAULT"); + EXPECT_EQ(declare_options->changes["pack_keys"]->as()->name(), "DEFAULT"); EXPECT_EQ(declare_options->changes["password"]->as()->value.safeGet(), "password"); - EXPECT_EQ(declare_options->changes["row_format"]->as()->name, "DYNAMIC"); - EXPECT_EQ(declare_options->changes["stats_auto_recalc"]->as()->name, "DEFAULT"); - EXPECT_EQ(declare_options->changes["stats_persistent"]->as()->name, "DEFAULT"); + EXPECT_EQ(declare_options->changes["row_format"]->as()->name(), "DYNAMIC"); + EXPECT_EQ(declare_options->changes["stats_auto_recalc"]->as()->name(), "DEFAULT"); + EXPECT_EQ(declare_options->changes["stats_persistent"]->as()->name(), "DEFAULT"); EXPECT_EQ(declare_options->changes["stats_sample_pages"]->as()->value.safeGet(), 3); - EXPECT_EQ(declare_options->changes["tablespace"]->as()->name, "tablespace_name"); + EXPECT_EQ(declare_options->changes["tablespace"]->as()->name(), "tablespace_name"); ASTPtr arguments = declare_options->changes["union"]->as()->arguments; - EXPECT_EQ(arguments->children[0]->as()->name, "table_01"); - EXPECT_EQ(arguments->children[1]->as()->name, "table_02"); + EXPECT_EQ(arguments->children[0]->as()->name(), "table_01"); + EXPECT_EQ(arguments->children[1]->as()->name(), "table_02"); } TEST(ParserTableOptions, OptionalTableOptions) @@ -60,5 +60,5 @@ TEST(ParserTableOptions, OptionalTableOptions) ASTDeclareOptions * declare_options = ast->as(); EXPECT_EQ(declare_options->changes["auto_increment"]->as()->value.safeGet(), 1); - EXPECT_EQ(declare_options->changes["stats_auto_recalc"]->as()->name, "DEFAULT"); + EXPECT_EQ(declare_options->changes["stats_auto_recalc"]->as()->name(), "DEFAULT"); } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 1afdfac0461..6416e08d93b 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -114,7 +114,7 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; auto index = std::make_shared(); - index->name = name->as().name; + index->name = name->as().name(); index->granularity = granularity->as().value.safeGet(); index->set(index->expr, expr); index->set(index->type, type); @@ -143,7 +143,7 @@ bool ParserConstraintDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & return false; auto constraint = std::make_shared(); - constraint->name = name->as().name; + constraint->name = name->as().name(); constraint->set(constraint->expr, expr); node = constraint; diff --git a/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp index d69e4b02aed..77cd480d595 100644 --- a/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -95,9 +95,9 @@ bool ParserDictionaryRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expec return false; if (pair.first == "min") - res->min_attr_name = identifier->name; + res->min_attr_name = identifier->name(); else if (pair.first == "max") - res->max_attr_name = identifier->name; + res->max_attr_name = identifier->name(); else return false; } diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 296f4187e3a..020b7993c2d 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -137,7 +137,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & { ASTPtr ast; if (ParserIdentifier{}.parse(pos, ast, expected)) - storage_policy_str = ast->as().name; + storage_policy_str = ast->as().name(); else return false; @@ -145,7 +145,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & return false; if (ParserIdentifier{}.parse(pos, ast, expected)) - volume_str = ast->as().name; + volume_str = ast->as().name(); else return false; } diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index 6b777af77a2..c2bde5fa8f1 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -86,7 +86,7 @@ TEST(ParserDictionaryDDL, SimpleDictionary) auto * primary_key = create->dictionary->primary_key; EXPECT_EQ(primary_key->children.size(), 1); - EXPECT_EQ(primary_key->children[0]->as()->name, "key_column"); + EXPECT_EQ(primary_key->children[0]->as()->name(), "key_column"); /// range test auto * range = create->dictionary->range; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 7beb0a4d706..8cae7866748 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -208,7 +208,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.index_name = ast_index_decl.name; if (command_ast->index) - command.after_index_name = command_ast->index->as().name; + command.after_index_name = command_ast->index->as().name(); command.if_not_exists = command_ast->if_not_exists; @@ -235,7 +235,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.ast = command_ast->clone(); command.if_exists = command_ast->if_exists; command.type = AlterCommand::DROP_CONSTRAINT; - command.constraint_name = command_ast->constraint->as().name; + command.constraint_name = command_ast->constraint->as().name(); return command; } @@ -244,7 +244,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ AlterCommand command; command.ast = command_ast->clone(); command.type = AlterCommand::DROP_INDEX; - command.index_name = command_ast->index->as().name; + command.index_name = command_ast->index->as().name(); command.if_exists = command_ast->if_exists; if (command_ast->clear_index) command.clear = true; @@ -290,8 +290,8 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ AlterCommand command; command.ast = command_ast->clone(); command.type = AlterCommand::RENAME_COLUMN; - command.column_name = command_ast->column->as().name; - command.rename_to = command_ast->rename_to->as().name; + command.column_name = command_ast->column->as().name(); + command.rename_to = command_ast->rename_to->as().name(); command.if_exists = command_ast->if_exists; return command; } diff --git a/src/Storages/MutationCommands.cpp b/src/Storages/MutationCommands.cpp index f3569c344d9..ba998dd5951 100644 --- a/src/Storages/MutationCommands.cpp +++ b/src/Storages/MutationCommands.cpp @@ -57,7 +57,7 @@ std::optional MutationCommand::parse(ASTAlterCommand * command, res.type = MATERIALIZE_INDEX; res.partition = command->partition; res.predicate = nullptr; - res.index_name = command->index->as().name; + res.index_name = command->index->as().name(); return res; } else if (parse_alter_commands && command->type == ASTAlterCommand::MODIFY_COLUMN) @@ -88,7 +88,7 @@ std::optional MutationCommand::parse(ASTAlterCommand * command, MutationCommand res; res.ast = command->ptr(); res.type = MutationCommand::Type::DROP_INDEX; - res.column_name = command->index->as().name; + res.column_name = command->index->as().name(); if (command->partition) res.partition = command->partition; if (command->clear_index) @@ -100,8 +100,8 @@ std::optional MutationCommand::parse(ASTAlterCommand * command, MutationCommand res; res.ast = command->ptr(); res.type = MutationCommand::Type::RENAME_COLUMN; - res.column_name = command->column->as().name; - res.rename_to = command->rename_to->as().name; + res.column_name = command->column->as().name(); + res.rename_to = command->rename_to->as().name(); return res; } else if (command->type == ASTAlterCommand::MATERIALIZE_TTL) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index b858239d637..9046940b3f7 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -271,7 +271,7 @@ std::optional getOptimizedQueryProcessingStage(const if (!id) return false; /// TODO: if GROUP BY contains multiIf()/if() it should contain only columns from sharding_key - if (!sharding_key_block.has(id->name)) + if (!sharding_key_block.has(id->name())) return false; } return true; diff --git a/src/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp index 81a42f1fe63..a3660cf2dec 100644 --- a/src/Storages/System/StorageSystemZooKeeper.cpp +++ b/src/Storages/System/StorageSystemZooKeeper.cpp @@ -74,7 +74,7 @@ static bool extractPathImpl(const IAST & elem, String & res, const Context & con else return false; - if (ident->name != "path") + if (ident->name() != "path") return false; auto evaluated = evaluateConstantExpressionAsLiteral(value, context); diff --git a/tests/queries/server.py b/tests/queries/server.py index e9f7361a6fe..c4f8968e08a 100644 --- a/tests/queries/server.py +++ b/tests/queries/server.py @@ -137,19 +137,34 @@ ServerThread.DEFAULT_SERVER_CONFIG = \ - - - localhost - {tcp_port} - - - - - localhost - {tcp_port} - - - + + + localhost + {tcp_port} + + + + + localhost + {tcp_port} + + + + + + + + 127.0.0.1 + {tcp_port} + + + + + 127.0.0.2 + {tcp_port} + + + diff --git a/utils/db-generator/query_db_generator.cpp b/utils/db-generator/query_db_generator.cpp index 84ad07056b9..c8aae4a56f3 100644 --- a/utils/db-generator/query_db_generator.cpp +++ b/utils/db-generator/query_db_generator.cpp @@ -622,7 +622,7 @@ FuncRet arrayJoinFunc(DB::ASTPtr ch, std::map & columns) { auto ident = std::dynamic_pointer_cast(arg); if (ident) - indents.insert(ident->name); + indents.insert(ident->name()); } for (const auto & indent : indents) { @@ -654,7 +654,7 @@ FuncRet inFunc(DB::ASTPtr ch, std::map & columns) auto ident = std::dynamic_pointer_cast(arg); if (ident) { - indents.insert(ident->name); + indents.insert(ident->name()); } auto literal = std::dynamic_pointer_cast(arg); if (literal) @@ -734,7 +734,7 @@ FuncRet arrayFunc(DB::ASTPtr ch, std::map & columns) if (ident) { no_indent = false; - indents.insert(ident->name); + indents.insert(ident->name()); } auto literal = std::dynamic_pointer_cast(arg); if (literal) @@ -784,7 +784,7 @@ FuncRet arithmeticFunc(DB::ASTPtr ch, std::map & columns) if (ident) { no_indent = false; - indents.insert(ident->name); + indents.insert(ident->name()); } auto literal = std::dynamic_pointer_cast(arg); if (literal) @@ -848,7 +848,7 @@ FuncRet likeFunc(DB::ASTPtr ch, std::map & columns) { auto ident = std::dynamic_pointer_cast(arg); if (ident) - indents.insert(ident->name); + indents.insert(ident->name()); auto literal = std::dynamic_pointer_cast(arg); if (literal) { @@ -905,7 +905,7 @@ FuncRet simpleFunc(DB::ASTPtr ch, std::map & columns) if (ident) { no_indent = false; - indents.insert(ident->name); + indents.insert(ident->name()); } auto literal = std::dynamic_pointer_cast(arg); if (literal) @@ -1046,7 +1046,7 @@ std::set getIndent(DB::ASTPtr ch) std::set ret = {}; auto x = std::dynamic_pointer_cast(ch); if (x) - ret.insert(x->name); + ret.insert(x->name()); for (const auto & child : (*ch).children) { auto child_ind = getIndent(child); From 2e23fc242fe3cc295571c126f487a874b8e8fb8f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 22:48:16 +0300 Subject: [PATCH 254/432] Remove flaky LIVE VIEW test --- .../__init__.py | 0 .../configs/remote_servers.xml | 18 -- .../configs/set_distributed_defaults.xml | 35 --- .../test_distributed_over_live_view/test.py | 276 ------------------ 4 files changed, 329 deletions(-) delete mode 100644 tests/integration/test_distributed_over_live_view/__init__.py delete mode 100644 tests/integration/test_distributed_over_live_view/configs/remote_servers.xml delete mode 100644 tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml delete mode 100644 tests/integration/test_distributed_over_live_view/test.py diff --git a/tests/integration/test_distributed_over_live_view/__init__.py b/tests/integration/test_distributed_over_live_view/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_distributed_over_live_view/configs/remote_servers.xml b/tests/integration/test_distributed_over_live_view/configs/remote_servers.xml deleted file mode 100644 index ebce4697529..00000000000 --- a/tests/integration/test_distributed_over_live_view/configs/remote_servers.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - node1 - 9000 - - - - - node2 - 9000 - - - - - diff --git a/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml b/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml deleted file mode 100644 index 194eb1ebb87..00000000000 --- a/tests/integration/test_distributed_over_live_view/configs/set_distributed_defaults.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - 3 - 1000 - 1 - - - 5 - 3000 - 1 - - - - - - - - ::/0 - - default - default - - - - - ::/0 - - delays - default - - - - - diff --git a/tests/integration/test_distributed_over_live_view/test.py b/tests/integration/test_distributed_over_live_view/test.py deleted file mode 100644 index 78b90024ebf..00000000000 --- a/tests/integration/test_distributed_over_live_view/test.py +++ /dev/null @@ -1,276 +0,0 @@ - - -import sys -import time - -import pytest -from helpers.cluster import ClickHouseCluster -from helpers.uclient import client, prompt, end_of_block - -cluster = ClickHouseCluster(__file__) - -# log = sys.stdout -log = None - -NODES = {'node' + str(i): cluster.add_instance( - 'node' + str(i), - main_configs=['configs/remote_servers.xml'], - user_configs=['configs/set_distributed_defaults.xml'], -) for i in (1, 2)} - -CREATE_TABLES_SQL = ''' -DROP TABLE IF EXISTS lv_over_base_table; -DROP TABLE IF EXISTS distributed_table; -DROP TABLE IF EXISTS base_table; - -SET allow_experimental_live_view = 1; - -CREATE TABLE - base_table( - node String, - key Int32, - value Int32 - ) -ENGINE = Memory; - -CREATE LIVE VIEW lv_over_base_table AS SELECT * FROM base_table; - -CREATE TABLE - distributed_table -AS base_table -ENGINE = Distributed(test_cluster, default, base_table, rand()); -''' - -INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" - - -@pytest.fixture(scope="function") -def started_cluster(): - try: - cluster.start() - for node_index, (node_name, node) in enumerate(NODES.items()): - node.query(CREATE_TABLES_SQL) - for i in range(0, 2): - sql = INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10)) - node.query(sql) - yield cluster - - finally: - cluster.shutdown() - -def poll_query(node, query, expected, timeout): - """Repeatedly execute query until either expected result is returned or timeout occurs. - """ - start_time = time.time() - while node.query(query) != expected and time.time() - start_time < timeout: - pass - assert node.query(query) == expected - -@pytest.mark.parametrize("node", list(NODES.values())[:1]) -@pytest.mark.parametrize("source", ["lv_over_distributed_table"]) -class TestLiveViewOverDistributedSuite: - def test_distributed_over_live_view_order_by_node(self, started_cluster, node, source): - node0, node1 = list(NODES.values()) - - select_query = "SELECT * FROM distributed_over_lv ORDER BY node, key FORMAT CSV" - select_query_dist_table = "SELECT * FROM distributed_table ORDER BY node, key FORMAT CSV" - select_count_query = "SELECT count() FROM distributed_over_lv" - - with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ - client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send("DROP TABLE IF EXISTS distributed_over_lv") - client1.expect(prompt) - client1.send( - "CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") - client1.expect(prompt) - - client1.send(select_query) - client1.expect('"node1",0,0') - client1.expect('"node1",1,1') - client1.expect('"node2",0,10') - client1.expect('"node2",1,11') - client1.expect(prompt) - - client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)") - client1.expect(prompt) - client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") - client2.expect(prompt) - - poll_query(node0, select_count_query, "7\n", timeout=60) - print("\n--DEBUG1--") - print(select_query) - print(node0.query(select_query)) - print("---------") - print("\n--DEBUG2--") - print(select_query_dist_table) - print(node0.query(select_query_dist_table)) - print("---------") - - client1.send(select_query) - client1.expect('"node1",0,0') - client1.expect('"node1",1,1') - client1.expect('"node1",1,3') - client1.expect('"node1",2,3') - client1.expect('"node1",3,3') - client1.expect('"node2",0,10') - client1.expect('"node2",1,11') - client1.expect(prompt) - - def test_distributed_over_live_view_order_by_key(self, started_cluster, node, source): - node0, node1 = list(NODES.values()) - - select_query = "SELECT * FROM distributed_over_lv ORDER BY key, node FORMAT CSV" - select_count_query = "SELECT count() FROM distributed_over_lv" - - with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ - client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send("DROP TABLE IF EXISTS distributed_over_lv") - client1.expect(prompt) - client1.send( - "CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") - client1.expect(prompt) - - client1.send(select_query) - client1.expect('"node1",0,0') - client1.expect('"node2",0,10') - client1.expect('"node1",1,1') - client1.expect('"node2",1,11') - client1.expect(prompt) - - client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 2, 3)") - client1.expect(prompt) - client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") - client2.expect(prompt) - - poll_query(node0, select_count_query, "7\n", timeout=60) - - client1.send(select_query) - client1.expect('"node1",0,0') - client1.expect('"node2",0,10') - client1.expect('"node1",1,1') - client1.expect('"node1",1,3') - client1.expect('"node2",1,11') - client1.expect('"node1",2,3') - client1.expect('"node1",3,3') - client1.expect(prompt) - - def test_distributed_over_live_view_group_by_node(self, started_cluster, node, source): - node0, node1 = list(NODES.values()) - - select_query = "SELECT node, SUM(value) FROM distributed_over_lv GROUP BY node ORDER BY node FORMAT CSV" - - with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ - client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send("DROP TABLE IF EXISTS distributed_over_lv") - client1.expect(prompt) - client1.send( - "CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") - client1.expect(prompt) - - client1.send(select_query) - client1.expect('"node1",1') - client1.expect('"node2",21') - client1.expect(prompt) - - client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") - client2.expect(prompt) - - poll_query(node0, select_query, '"node1",3\n"node2",21\n', timeout=60) - - client1.send(select_query) - client1.expect('"node1",3') - client1.expect('"node2",21') - client1.expect(prompt) - - client1.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)") - client1.expect(prompt) - client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3)") - client2.expect(prompt) - - poll_query(node0, select_query, '"node1",12\n"node2",21\n', timeout=60) - - client1.send(select_query) - client1.expect('"node1",12') - client1.expect('"node2",21') - client1.expect(prompt) - - def test_distributed_over_live_view_group_by_key(self, started_cluster, node, source): - node0, node1 = list(NODES.values()) - - select_query = "SELECT key, SUM(value) FROM distributed_over_lv GROUP BY key ORDER BY key FORMAT CSV" - - with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ - client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send("DROP TABLE IF EXISTS distributed_over_lv") - client1.expect(prompt) - client1.send( - "CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") - client1.expect(prompt) - - client1.send(select_query) - client1.expect('0,10') - client1.expect('1,12') - client1.expect(prompt) - - client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") - client2.expect(prompt) - - poll_query(node0, "SELECT count() FROM (%s)" % select_query.rsplit("FORMAT")[0], "3\n", timeout=60) - - client1.send(select_query) - client1.expect('0,10') - client1.expect('1,12') - client1.expect('2,2') - client1.expect(prompt) - - client2.send("INSERT INTO distributed_table VALUES ('node1', 1, 3), ('node1', 3, 3)") - client2.expect(prompt) - - poll_query(node0, "SELECT count() FROM (%s)" % select_query.rsplit("FORMAT")[0], "4\n", timeout=60) - - client1.send(select_query) - client1.expect('0,10') - client1.expect('1,15') - client1.expect('2,2') - client1.expect('3,3') - client1.expect(prompt) - - def test_distributed_over_live_view_sum(self, started_cluster, node, source): - node0, node1 = list(NODES.values()) - - with client(name="client1> ", log=log, command=" ".join(node0.client.command)) as client1, \ - client(name="client2> ", log=log, command=" ".join(node1.client.command)) as client2: - client1.expect(prompt) - client2.expect(prompt) - - client1.send("DROP TABLE IF EXISTS distributed_over_lv") - client1.expect(prompt) - client1.send( - "CREATE TABLE distributed_over_lv AS lv_over_base_table ENGINE = Distributed(test_cluster, default, lv_over_base_table)") - client1.expect(prompt) - - client1.send("SELECT sum(value) FROM distributed_over_lv") - client1.expect(r"22" + end_of_block) - client1.expect(prompt) - - client2.send("INSERT INTO distributed_table VALUES ('node1', 2, 2)") - client2.expect(prompt) - - poll_query(node0, "SELECT sum(value) FROM distributed_over_lv", "24\n", timeout=60) - - client2.send("INSERT INTO distributed_table VALUES ('node1', 3, 3), ('node1', 4, 4)") - client2.expect(prompt) - - poll_query(node0, "SELECT sum(value) FROM distributed_over_lv", "31\n", timeout=60) From 9d50921e529402ec7dbd76b61506fa40df84e2f2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 23:52:32 +0300 Subject: [PATCH 255/432] Fix performance test "functions logical" after move to clang --- src/Functions/FunctionsLogical.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 58e1c52a60c..defa4f4493b 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -290,10 +290,9 @@ private: /// Apply target function by feeding it "batches" of N columns -/// Combining 10 columns per pass is the fastest for large columns sizes. -/// For small columns sizes - more columns is faster. +/// Combining 8 columns per pass is the fastest method, because it's the maximum when clang vectorizes a loop. template < - typename Op, template typename OperationApplierImpl, size_t N = 10> + typename Op, template typename OperationApplierImpl, size_t N = 8> struct OperationApplier { template From 8b21ef5d4fd6c9d2e3a49135d819676ed09fffd0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 25 Oct 2020 00:50:52 +0300 Subject: [PATCH 256/432] Remove excessive exclamation marks --- .../AggregateFunctionTimeSeriesGroupSum.h | 2 +- src/Core/tests/CMakeLists.txt | 3 --- src/Core/tests/gtest_move_field.cpp | 22 ++++++++++++++++ src/Core/tests/move_field.cpp | 25 ------------------- src/Interpreters/tests/two_level_hash_map.cpp | 2 +- 5 files changed, 24 insertions(+), 30 deletions(-) create mode 100644 src/Core/tests/gtest_move_field.cpp delete mode 100644 src/Core/tests/move_field.cpp diff --git a/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h index be0a3eb4af5..b755fbf081b 100644 --- a/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h +++ b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.h @@ -92,7 +92,7 @@ struct AggregateFunctionTimeSeriesGroupSumData it_ss->second.add(t, v); } if (result.size() > 0 && t < result.back().first) - throw Exception{"timeSeriesGroupSum or timeSeriesGroupRateSum must order by timestamp asc!!!", ErrorCodes::LOGICAL_ERROR}; + throw Exception{"timeSeriesGroupSum or timeSeriesGroupRateSum must order by timestamp asc.", ErrorCodes::LOGICAL_ERROR}; if (result.size() > 0 && t == result.back().first) { //do not add new point diff --git a/src/Core/tests/CMakeLists.txt b/src/Core/tests/CMakeLists.txt index d609e49f247..cd6450633ff 100644 --- a/src/Core/tests/CMakeLists.txt +++ b/src/Core/tests/CMakeLists.txt @@ -5,9 +5,6 @@ target_include_directories (string_pool SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLU add_executable (field field.cpp) target_link_libraries (field PRIVATE dbms) -add_executable (move_field move_field.cpp) -target_link_libraries (move_field PRIVATE clickhouse_common_io) - add_executable (string_ref_hash string_ref_hash.cpp) target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io) diff --git a/src/Core/tests/gtest_move_field.cpp b/src/Core/tests/gtest_move_field.cpp new file mode 100644 index 00000000000..9c807039c6a --- /dev/null +++ b/src/Core/tests/gtest_move_field.cpp @@ -0,0 +1,22 @@ +#include +#include + +using namespace DB; + +GTEST_TEST(Field, Move) +{ + Field f; + + f = Field{String{"Hello, world (1)"}}; + ASSERT_EQ(f.get(), "Hello, world (1)"); + f = Field{String{"Hello, world (2)"}}; + ASSERT_EQ(f.get(), "Hello, world (2)"); + f = Field{Array{Field{String{"Hello, world (3)"}}}}; + ASSERT_EQ(f.get()[0].get(), "Hello, world (3)"); + f = String{"Hello, world (4)"}; + ASSERT_EQ(f.get(), "Hello, world (4)"); + f = Array{Field{String{"Hello, world (5)"}}}; + ASSERT_EQ(f.get()[0].get(), "Hello, world (5)"); + f = Array{String{"Hello, world (6)"}}; + ASSERT_EQ(f.get()[0].get(), "Hello, world (6)"); +} diff --git a/src/Core/tests/move_field.cpp b/src/Core/tests/move_field.cpp deleted file mode 100644 index 2780abffc40..00000000000 --- a/src/Core/tests/move_field.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include - - -int main(int, char **) -{ - using namespace DB; - - Field f; - - f = Field{String{"Hello, world"}}; - std::cerr << f.get() << "\n"; - f = Field{String{"Hello, world!"}}; - std::cerr << f.get() << "\n"; - f = Field{Array{Field{String{"Hello, world!!"}}}}; - std::cerr << f.get()[0].get() << "\n"; - f = String{"Hello, world!!!"}; - std::cerr << f.get() << "\n"; - f = Array{Field{String{"Hello, world!!!!"}}}; - std::cerr << f.get()[0].get() << "\n"; - f = Array{String{"Hello, world!!!!!"}}; - std::cerr << f.get()[0].get() << "\n"; - - return 0; -} diff --git a/src/Interpreters/tests/two_level_hash_map.cpp b/src/Interpreters/tests/two_level_hash_map.cpp index f79be16e095..33fd5ee8305 100644 --- a/src/Interpreters/tests/two_level_hash_map.cpp +++ b/src/Interpreters/tests/two_level_hash_map.cpp @@ -128,7 +128,7 @@ int main(int argc, char ** argv) std::cerr << "sum_counts: " << sum_counts << ", elems: " << elems << std::endl; if (sum_counts != n) - std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; + std::cerr << "Error!" << std::endl; } return 0; From 942828f4d5c810235cdcd281de90ffb4350ef07b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 25 Oct 2020 00:54:17 +0300 Subject: [PATCH 257/432] Check style --- utils/check-style/check-style | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 4983782c00d..ef569c9f73e 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -99,3 +99,6 @@ find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*. find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' && echo "Files should not have UTF-8 BOM" find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' && echo "Files should not have UTF-16LE BOM" find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' && echo "Files should not have UTF-16BE BOM" + +# Too many exclamation marks +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -F '!!!' && echo "Too many exclamation marks (looks dirty, unconfident)." From 2613012fd1f888f5ec72f1ee0b4296e7a672596a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 25 Oct 2020 04:43:06 +0300 Subject: [PATCH 258/432] Improve performance of FunctionsLogical a little by adding "restrict" --- src/Functions/FunctionsLogical.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index defa4f4493b..3e19516daaa 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -299,28 +299,29 @@ struct OperationApplier static void apply(Columns & in, ResultData & result_data, bool use_result_data_as_input = false) { if (!use_result_data_as_input) - doBatchedApply(in, result_data); + doBatchedApply(in, result_data.data(), result_data.size()); while (!in.empty()) - doBatchedApply(in, result_data); + doBatchedApply(in, result_data.data(), result_data.size()); } - template - static void NO_INLINE doBatchedApply(Columns & in, ResultData & result_data) + template + static void NO_INLINE doBatchedApply(Columns & in, Result * __restrict result_data, size_t size) { if (N > in.size()) { OperationApplier - ::template doBatchedApply(in, result_data); + ::template doBatchedApply(in, result_data, size); return; } const OperationApplierImpl operation_applier_impl(in); - size_t i = 0; - for (auto & res : result_data) + for (size_t i = 0; i < size; ++i) + { if constexpr (CarryResult) - res = Op::apply(res, operation_applier_impl.apply(i++)); + result_data[i] = Op::apply(result_data[i], operation_applier_impl.apply(i)); else - res = operation_applier_impl.apply(i++); + result_data[i] = operation_applier_impl.apply(i); + } in.erase(in.end() - N, in.end()); } @@ -331,7 +332,7 @@ template < struct OperationApplier { template - static void NO_INLINE doBatchedApply(Columns &, Result &) + static void NO_INLINE doBatchedApply(Columns &, Result &, size_t) { throw Exception( "OperationApplier<...>::apply(...): not enough arguments to run this method", From 4bf32cb15bf78827bd86c1310741ebe005acc8b9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Oct 2020 02:46:36 +0300 Subject: [PATCH 259/432] Fix destruction order of Suggest #16035 --- programs/client/Client.cpp | 9 ++++++--- programs/client/Suggest.h | 13 ++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 3cd584c0e55..8ccdfd8af65 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -218,6 +218,8 @@ private: QueryFuzzer fuzzer; int query_fuzzer_runs = 0; + std::optional suggest; + /// We will format query_id in interactive mode in various ways, the default is just to print Query id: ... std::vector> query_id_formats; @@ -577,10 +579,11 @@ private: if (print_time_to_stderr) throw Exception("time option could be specified only in non-interactive mode", ErrorCodes::BAD_ARGUMENTS); + suggest.emplace(); if (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false)) { /// Load suggestion data from the server. - Suggest::instance().load(connection_parameters, config().getInt("suggestion_limit")); + suggest->load(connection_parameters, config().getInt("suggestion_limit")); } /// Load command history if present. @@ -607,7 +610,7 @@ private: highlight_callback = highlight; ReplxxLineReader lr( - Suggest::instance(), + *suggest, history_file, config().has("multiline"), query_extenders, @@ -615,7 +618,7 @@ private: highlight_callback); #elif defined(USE_READLINE) && USE_READLINE - ReadlineLineReader lr(Suggest::instance(), history_file, config().has("multiline"), query_extenders, query_delimiters); + ReadlineLineReader lr(*suggest, history_file, config().has("multiline"), query_extenders, query_delimiters); #else LineReader lr(history_file, config().has("multiline"), query_extenders, query_delimiters); #endif diff --git a/programs/client/Suggest.h b/programs/client/Suggest.h index b13289ac322..03332088cbe 100644 --- a/programs/client/Suggest.h +++ b/programs/client/Suggest.h @@ -18,10 +18,11 @@ namespace ErrorCodes class Suggest : public LineReader::Suggest, boost::noncopyable { public: - static Suggest & instance() + Suggest(); + ~Suggest() { - static Suggest instance; - return instance; + if (loading_thread.joinable()) + loading_thread.join(); } void load(const ConnectionParameters & connection_parameters, size_t suggestion_limit); @@ -30,12 +31,6 @@ public: static constexpr int MIN_SERVER_REVISION = 54406; private: - Suggest(); - ~Suggest() - { - if (loading_thread.joinable()) - loading_thread.join(); - } void loadImpl(Connection & connection, const ConnectionTimeouts & timeouts, size_t suggestion_limit); void fetch(Connection & connection, const ConnectionTimeouts & timeouts, const std::string & query); From 2efd740dc7cbd3159e269eab5e03e1a48fbfb8c6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Oct 2020 03:07:56 +0300 Subject: [PATCH 260/432] Added a test --- .../01526_client_start_and_exit.reference | 1 + .../01526_client_start_and_exit.sh | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 tests/queries/0_stateless/01526_client_start_and_exit.reference create mode 100755 tests/queries/0_stateless/01526_client_start_and_exit.sh diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.reference b/tests/queries/0_stateless/01526_client_start_and_exit.reference new file mode 100644 index 00000000000..7326d960397 --- /dev/null +++ b/tests/queries/0_stateless/01526_client_start_and_exit.reference @@ -0,0 +1 @@ +Ok diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.sh b/tests/queries/0_stateless/01526_client_start_and_exit.sh new file mode 100755 index 00000000000..d3fd2cb73b2 --- /dev/null +++ b/tests/queries/0_stateless/01526_client_start_and_exit.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +# Create a huge amount of tables, so Suggest will take a time to load +seq 1 1000 | sed -r -e 's/(.+)/CREATE TABLE IF NOT EXISTS test\1 (x UInt8) ENGINE = Memory;/' | ${CLICKHOUSE_CLIENT} -n + +function stress() +{ + while true; do + ./"$CURDIR"/01526_client_start_and_exit.expect + done +} + +export -f stress + +for _ in {1..10}; do + timeout 3 bash -c stress & +done + +wait +echo 'Ok' + +seq 1 1000 | sed -r -e 's/(.+)/DROP TABLE test\1;/' | ${CLICKHOUSE_CLIENT} -n From 90a3e95aabac19ab4dae30819dc579a88428c297 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 16 Oct 2020 03:38:32 +0300 Subject: [PATCH 261/432] Add missing file --- .../0_stateless/01526_client_start_and_exit.expect | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100755 tests/queries/0_stateless/01526_client_start_and_exit.expect diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.expect b/tests/queries/0_stateless/01526_client_start_and_exit.expect new file mode 100755 index 00000000000..4ad0eea5d97 --- /dev/null +++ b/tests/queries/0_stateless/01526_client_start_and_exit.expect @@ -0,0 +1,12 @@ +#!/usr/bin/expect -f + +log_user 0 +set timeout 5 +match_max 100000 + +if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000} + +spawn clickhouse-client --port "$env(CLICKHOUSE_PORT_TCP)" +expect ":) " +send -- "\4" +expect eof From 8cc463f549d29e5c11ea8664e5c17eb05ab50033 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sun, 25 Oct 2020 05:31:58 +0300 Subject: [PATCH 262/432] Better test --- .../queries/0_stateless/01526_client_start_and_exit.expect | 4 ++-- .../0_stateless/01526_client_start_and_exit.reference | 2 +- tests/queries/0_stateless/01526_client_start_and_exit.sh | 7 ++----- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.expect b/tests/queries/0_stateless/01526_client_start_and_exit.expect index 4ad0eea5d97..003439ffa54 100755 --- a/tests/queries/0_stateless/01526_client_start_and_exit.expect +++ b/tests/queries/0_stateless/01526_client_start_and_exit.expect @@ -1,12 +1,12 @@ #!/usr/bin/expect -f -log_user 0 +log_user 1 set timeout 5 match_max 100000 if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000} -spawn clickhouse-client --port "$env(CLICKHOUSE_PORT_TCP)" +spawn bash -c "clickhouse-client --port $env(CLICKHOUSE_PORT_TCP) && echo $?" expect ":) " send -- "\4" expect eof diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.reference b/tests/queries/0_stateless/01526_client_start_and_exit.reference index 7326d960397..e3e2e7b22af 100644 --- a/tests/queries/0_stateless/01526_client_start_and_exit.reference +++ b/tests/queries/0_stateless/01526_client_start_and_exit.reference @@ -1 +1 @@ -Ok +Loaded 10000 queries. diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.sh b/tests/queries/0_stateless/01526_client_start_and_exit.sh index d3fd2cb73b2..abcf51d8c50 100755 --- a/tests/queries/0_stateless/01526_client_start_and_exit.sh +++ b/tests/queries/0_stateless/01526_client_start_and_exit.sh @@ -4,12 +4,12 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Create a huge amount of tables, so Suggest will take a time to load -seq 1 1000 | sed -r -e 's/(.+)/CREATE TABLE IF NOT EXISTS test\1 (x UInt8) ENGINE = Memory;/' | ${CLICKHOUSE_CLIENT} -n +${CLICKHOUSE_CLIENT} -q "SELECT 'CREATE TABLE test_' || hex(randomPrintableASCII(40)) || '(x UInt8) Engine=Memory;' FROM numbers(10000)" --format=TSVRaw | ${CLICKHOUSE_BENCHMARK} -c32 -i 10000 -d 0 2>&1 | grep -F 'Loaded 10000 queries' function stress() { while true; do - ./"$CURDIR"/01526_client_start_and_exit.expect + ./"$CURDIR"/01526_client_start_and_exit.expect | grep -v -P 'ClickHouse client|Connecting|Connected|:\) Bye\.|^\s*$|spawn bash|^0\s*$' done } @@ -20,6 +20,3 @@ for _ in {1..10}; do done wait -echo 'Ok' - -seq 1 1000 | sed -r -e 's/(.+)/DROP TABLE test\1;/' | ${CLICKHOUSE_CLIENT} -n From 98f073a3a541db7e9bed9ed4d78057bde3eef4ac Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Sun, 25 Oct 2020 05:38:54 +0300 Subject: [PATCH 263/432] Text updated ant translated to Russian --- .../{crash_log.md => crash-log.md} | 16 +++++-- docs/ru/operations/system-tables/crash-log.md | 48 +++++++++++++++++++ 2 files changed, 59 insertions(+), 5 deletions(-) rename docs/en/operations/system-tables/{crash_log.md => crash-log.md} (77%) create mode 100644 docs/ru/operations/system-tables/crash-log.md diff --git a/docs/en/operations/system-tables/crash_log.md b/docs/en/operations/system-tables/crash-log.md similarity index 77% rename from docs/en/operations/system-tables/crash_log.md rename to docs/en/operations/system-tables/crash-log.md index d38ce31584f..5bdf402513a 100644 --- a/docs/en/operations/system-tables/crash_log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -1,4 +1,4 @@ -## system.crash_log {#system-tables_crash_log} +# system.crash_log {#system-tables_crash_log} Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. @@ -10,11 +10,11 @@ Columns: - `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Signal number. - `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Thread ID. - `query_id` ([String](../../sql-reference/data-types/string.md)) — Query ID. -- `trace` ([Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Array of traces. -- `trace_full` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Array of full traces. +- `trace` ([Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Stack trace at the moment of crash. Each element is a virtual memory address inside ClickHouse server process. +- `trace_full` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Stack trace at the moment of crash. Each element contains a called method inside ClickHouse server process. - `version` ([String](../../sql-reference/data-types/string.md)) — ClickHouse server version. - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse server revision. -- `build_id` ([String](../../sql-reference/data-types/string.md)) — ClickHouse server build. +- `build_id` ([String](../../sql-reference/data-types/string.md)) — BuildID that is generated by compiler. **Example** @@ -23,9 +23,12 @@ Query: ``` sql SELECT * FROM system.crash_log ORDER BY event_time DESC LIMIT 1; ``` + Result (not full): ``` text +Row 1: +────── event_date: 2020-10-14 event_time: 2020-10-14 15:47:40 timestamp_ns: 1602679660271312710 @@ -39,4 +42,7 @@ revision: 54442 build_id: ``` -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash_log) +**See also** +- [trace_log](../../operations/system_tables/trace_log.md) system table + +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash-log) diff --git a/docs/ru/operations/system-tables/crash-log.md b/docs/ru/operations/system-tables/crash-log.md new file mode 100644 index 00000000000..19e9d19b198 --- /dev/null +++ b/docs/ru/operations/system-tables/crash-log.md @@ -0,0 +1,48 @@ +# system.crash_log {#system-tables_crash_log} + +Содержит информацию о трассировках стека для фатальных ошибок. Таблица не содержится в базе данных по умолчанию, а создается только при возникновении фатальных ошибок. + +Колонки: + +- `event_date` ([Datetime](../../sql-reference/data-types/datetime.md)) — Дата события. +- `event_time` ([Datetime](../../sql-reference/data-types/datetime.md)) — Время события. +- `timestamp_ns` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Время события с наносекундами. +- `signal` ([Int32](../../sql-reference/data-types/int-uint.md)) — Номер сигнала, пришедшего в поток. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Идентификатор треда. +- `query_id` ([String](../../sql-reference/data-types/string.md)) — Идентификатор запроса. +- `trace` ([Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Трассировка стека в момент ошибки. Представляет собой список физических адресов, по которым расположены вызываемые методы. +- `trace_full` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) — Трассировка стека в момент ошибки. Содержит вызываемые методы. +- `version` ([String](../../sql-reference/data-types/string.md)) — Версия сервера ClickHouse. +- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Ревизия сборки сервера ClickHouse. +- `build_id` ([String](../../sql-reference/data-types/string.md)) — BuildID, сгенерированный компилятором. + +**Пример** + +Запрос: + +``` sql +SELECT * FROM system.crash_log ORDER BY event_time DESC LIMIT 1; +``` + +Результат (приведён не полностью): + +``` text +Row 1: +────── +event_date: 2020-10-14 +event_time: 2020-10-14 15:47:40 +timestamp_ns: 1602679660271312710 +signal: 11 +thread_id: 23624 +query_id: 428aab7c-8f5c-44e9-9607-d16b44467e69 +trace: [188531193,...] +trace_full: ['3. DB::(anonymous namespace)::FunctionFormatReadableTimeDelta::executeImpl(std::__1::vector >&, std::__1::vector > const&, unsigned long, unsigned long) const @ 0xb3cc1f9 in /home/username/work/ClickHouse/build/programs/clickhouse',...] +version: ClickHouse 20.11.1.1 +revision: 54442 +build_id: +``` + +**См. также** +- Системная таблица [trace_log](../../operations/system_tables/trace_log.md) + +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash-log) From a2d888f19fb29abb7556ce799b55b11a31fbc6d8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 07:13:17 +0300 Subject: [PATCH 264/432] Fix processing very large entries in queue --- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 16 +++++++--------- .../MergeTree/ReplicatedMergeTreeQueue.h | 13 +++++++++++++ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 45e16e81208..1d4f6b38be8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -486,20 +486,18 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper { std::sort(log_entries.begin(), log_entries.end()); - /// ZK contains a limit on the number or total size of operations in a multi-request. - /// If the limit is exceeded, the connection is simply closed. - /// The constant is selected with a margin. The default limit in ZK is 1 MB of data in total. - /// The average size of the node value in this case is less than 10 kilobytes. - static constexpr auto MAX_MULTI_OPS = 100; - - for (size_t entry_idx = 0, num_entries = log_entries.size(); entry_idx < num_entries; entry_idx += MAX_MULTI_OPS) + for (size_t entry_idx = 0, num_entries = log_entries.size(); entry_idx < num_entries; entry_idx += current_multi_batch_size) { auto begin = log_entries.begin() + entry_idx; - auto end = entry_idx + MAX_MULTI_OPS >= log_entries.size() + auto end = entry_idx + current_multi_batch_size >= log_entries.size() ? log_entries.end() - : (begin + MAX_MULTI_OPS); + : (begin + current_multi_batch_size); auto last = end - 1; + /// Increase the batch size exponentially, so it will saturate to MAX_MULTI_OPS. + if (current_multi_batch_size < MAX_MULTI_OPS) + current_multi_batch_size = std::min(MAX_MULTI_OPS, current_multi_batch_size * 2); + String last_entry = *last; if (!startsWith(last_entry, "log-")) throw Exception("Error in zookeeper data: unexpected node " + last_entry + " in " + zookeeper_path + "/log", diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 88a61f50225..06dcddedd3e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -259,6 +259,19 @@ private: ~CurrentlyExecuting(); }; + /// ZK contains a limit on the number or total size of operations in a multi-request. + /// If the limit is exceeded, the connection is simply closed. + /// The constant is selected with a margin. The default limit in ZK is 1 MB of data in total. + /// The average size of the node value in this case is less than 10 kilobytes. + static constexpr auto MAX_MULTI_OPS = 100; + + /// Very large queue entries may appear occasionally. + /// We cannot process MAX_MULTI_OPS at once because it will fail. + /// But we have to process more than one entry at once because otherwise lagged replicas keep up slowly. + /// Let's start with one entry per transaction and icrease it exponentially towards MAX_MULTI_OPS. + /// It will allow to make some progress before failing and remain operational even in extreme cases. + size_t current_multi_batch_size = 1; + public: ReplicatedMergeTreeQueue(StorageReplicatedMergeTree & storage_); ~ReplicatedMergeTreeQueue(); From 22a89fca010d879310dd6cbbf7b172f0a918d4e0 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 24 Oct 2020 07:21:46 +0300 Subject: [PATCH 265/432] Fix processing very large entries in queue --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 6 +++++- src/Storages/MergeTree/ReplicatedMergeTreeQueue.h | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 1d4f6b38be8..a1f20016cb3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -57,6 +57,7 @@ bool ReplicatedMergeTreeQueue::isVirtualPart(const MergeTreeData::DataPartPtr & return virtual_parts.getContainingPart(data_part->info) != data_part->name; } + bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper) { auto queue_path = replica_path + "/queue"; @@ -68,6 +69,9 @@ bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper) { std::lock_guard pull_logs_lock(pull_logs_to_queue_mutex); + /// Reset batch size on initialization to recover from possible errors of too large batch size. + current_multi_batch_size = 1; + String log_pointer_str = zookeeper->get(replica_path + "/log_pointer"); log_pointer = log_pointer_str.empty() ? 0 : parse(log_pointer_str); @@ -496,7 +500,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper /// Increase the batch size exponentially, so it will saturate to MAX_MULTI_OPS. if (current_multi_batch_size < MAX_MULTI_OPS) - current_multi_batch_size = std::min(MAX_MULTI_OPS, current_multi_batch_size * 2); + current_multi_batch_size = std::min(MAX_MULTI_OPS, current_multi_batch_size * 2); String last_entry = *last; if (!startsWith(last_entry, "log-")) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 06dcddedd3e..93b79c8336c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -263,7 +263,7 @@ private: /// If the limit is exceeded, the connection is simply closed. /// The constant is selected with a margin. The default limit in ZK is 1 MB of data in total. /// The average size of the node value in this case is less than 10 kilobytes. - static constexpr auto MAX_MULTI_OPS = 100; + static constexpr size_t MAX_MULTI_OPS = 100; /// Very large queue entries may appear occasionally. /// We cannot process MAX_MULTI_OPS at once because it will fail. From 300f07bdba532ccdd789ce6300cf4388683a36ed Mon Sep 17 00:00:00 2001 From: Olga Revyakina Date: Sun, 25 Oct 2020 05:58:39 +0300 Subject: [PATCH 266/432] Links fixed. --- docs/en/operations/system-tables/crash-log.md | 4 ++-- docs/ru/operations/system-tables/crash-log.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/system-tables/crash-log.md b/docs/en/operations/system-tables/crash-log.md index 5bdf402513a..5b338237b83 100644 --- a/docs/en/operations/system-tables/crash-log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -43,6 +43,6 @@ build_id: ``` **See also** -- [trace_log](../../operations/system_tables/trace_log.md) system table +- [trace_log](../../operations/system-tables/trace_log.md) system table -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash-log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/crash-log) diff --git a/docs/ru/operations/system-tables/crash-log.md b/docs/ru/operations/system-tables/crash-log.md index 19e9d19b198..7b645a06b2d 100644 --- a/docs/ru/operations/system-tables/crash-log.md +++ b/docs/ru/operations/system-tables/crash-log.md @@ -43,6 +43,6 @@ build_id: ``` **См. также** -- Системная таблица [trace_log](../../operations/system_tables/trace_log.md) +- Системная таблица [trace_log](../../operations/system-tables/trace_log.md) -[Original article](https://clickhouse.tech/docs/en/operations/system_tables/crash-log) +[Original article](https://clickhouse.tech/docs/en/operations/system-tables/crash-log) From 6762f967687d8f0e504f0c8f42d4fe39beefad9f Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Sun, 25 Oct 2020 08:25:21 +0300 Subject: [PATCH 267/432] Update 01526_client_start_and_exit.sh --- tests/queries/0_stateless/01526_client_start_and_exit.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.sh b/tests/queries/0_stateless/01526_client_start_and_exit.sh index abcf51d8c50..0b3a2ee6b4f 100755 --- a/tests/queries/0_stateless/01526_client_start_and_exit.sh +++ b/tests/queries/0_stateless/01526_client_start_and_exit.sh @@ -9,7 +9,7 @@ ${CLICKHOUSE_CLIENT} -q "SELECT 'CREATE TABLE test_' || hex(randomPrintableASCII function stress() { while true; do - ./"$CURDIR"/01526_client_start_and_exit.expect | grep -v -P 'ClickHouse client|Connecting|Connected|:\) Bye\.|^\s*$|spawn bash|^0\s*$' + "$CURDIR"/01526_client_start_and_exit.expect | grep -v -P 'ClickHouse client|Connecting|Connected|:\) Bye\.|^\s*$|spawn bash|^0\s*$' done } From 440ae2bc57f29d9a5af3218dacdbc93dc6d8ffc0 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Sun, 25 Oct 2020 11:45:29 +0300 Subject: [PATCH 268/432] UUID safe cast functions added 1. Added readUUIDTextImpl, readUUIDText, tryReadUUIDText functions in ReadHelpers. 2. Added toUUIDOrNull, toUUIDOrZero functions based on ReadHelpers read implementations. 3. Updated documentation. --- .../sql-reference/functions/uuid-functions.md | 48 +++++++++++++++++++ src/Functions/FunctionsConversion.cpp | 4 ++ src/Functions/FunctionsConversion.h | 17 ++++++- src/IO/ReadHelpers.h | 38 +++++++++++++-- .../01528_to_uuid_or_null_or_zero.reference | 8 ++++ .../01528_to_uuid_or_null_or_zero.sql | 19 ++++++++ 6 files changed, 129 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.reference create mode 100644 tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.sql diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index f608c643ee8..b747ac07bb8 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -61,6 +61,54 @@ SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid └──────────────────────────────────────┘ ``` +## toUUIDOrNull (x) {#touuidornull-x} + +It takes an argument of type String and tries to parse it into UUID. If failed, returns NULL. + +``` sql +toUUIDOrNull(String) +``` + +**Returned value** + +The Nullable UUID type value. + +**Usage example** + +``` sql +SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid +``` + +``` text +┌─uuid─┐ +│ ᴺᵁᴸᴸ │ +└──────┘ +``` + +## toUUIDOrZero (x) {#touuidorzero-x} + +It takes an argument of type String and tries to parse it into UUID. If failed, returns zero UUID. + +``` sql +toUUIDOrZero(String) +``` + +**Returned value** + +The UUID type value. + +**Usage example** + +``` sql +SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid +``` + +``` text +┌─────────────────────────────────uuid─┐ +│ 00000000-0000-0000-0000-000000000000 │ +└──────────────────────────────────────┘ +``` + ## UUIDStringToNum {#uuidstringtonum} Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../sql-reference/data-types/fixedstring.md). diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 3f38614f584..df962800385 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -68,6 +68,8 @@ void registerFunctionsConversion(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); @@ -90,6 +92,8 @@ void registerFunctionsConversion(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 1cbf8cc3925..6e8847b7f09 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -568,7 +568,7 @@ template <> inline void parseImpl(DataTypeUUID::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) { UUID tmp; - readText(tmp, rb); + readUUIDText(tmp, rb); x = tmp; } @@ -602,6 +602,17 @@ inline bool tryParseImpl(DataTypeDateTime::FieldType & x, Read return true; } +template <> +inline bool tryParseImpl(DataTypeUUID::FieldType & x, ReadBuffer & rb, const DateLUTImpl *) +{ + UUID tmp; + if (!tryReadUUIDText(tmp, rb)) + return false; + + x = tmp; + return true; +} + /** Throw exception with verbose message when string value is not parsed completely. */ @@ -1754,6 +1765,7 @@ struct NameToDecimal32OrZero { static constexpr auto name = "toDecimal32OrZero"; struct NameToDecimal64OrZero { static constexpr auto name = "toDecimal64OrZero"; }; struct NameToDecimal128OrZero { static constexpr auto name = "toDecimal128OrZero"; }; struct NameToDecimal256OrZero { static constexpr auto name = "toDecimal256OrZero"; }; +struct NameToUUIDOrZero { static constexpr auto name = "toUUIDOrZero"; }; using FunctionToUInt8OrZero = FunctionConvertFromString; using FunctionToUInt16OrZero = FunctionConvertFromString; @@ -1775,6 +1787,7 @@ using FunctionToDecimal32OrZero = FunctionConvertFromString, NameToDecimal64OrZero, ConvertFromStringExceptionMode::Zero>; using FunctionToDecimal128OrZero = FunctionConvertFromString, NameToDecimal128OrZero, ConvertFromStringExceptionMode::Zero>; using FunctionToDecimal256OrZero = FunctionConvertFromString, NameToDecimal256OrZero, ConvertFromStringExceptionMode::Zero>; +using FunctionToUUIDOrZero = FunctionConvertFromString; struct NameToUInt8OrNull { static constexpr auto name = "toUInt8OrNull"; }; struct NameToUInt16OrNull { static constexpr auto name = "toUInt16OrNull"; }; @@ -1796,6 +1809,7 @@ struct NameToDecimal32OrNull { static constexpr auto name = "toDecimal32OrNull"; struct NameToDecimal64OrNull { static constexpr auto name = "toDecimal64OrNull"; }; struct NameToDecimal128OrNull { static constexpr auto name = "toDecimal128OrNull"; }; struct NameToDecimal256OrNull { static constexpr auto name = "toDecimal256OrNull"; }; +struct NameToUUIDOrNull { static constexpr auto name = "toUUIDOrNull"; }; using FunctionToUInt8OrNull = FunctionConvertFromString; using FunctionToUInt16OrNull = FunctionConvertFromString; @@ -1817,6 +1831,7 @@ using FunctionToDecimal32OrNull = FunctionConvertFromString, NameToDecimal64OrNull, ConvertFromStringExceptionMode::Null>; using FunctionToDecimal128OrNull = FunctionConvertFromString, NameToDecimal128OrNull, ConvertFromStringExceptionMode::Null>; using FunctionToDecimal256OrNull = FunctionConvertFromString, NameToDecimal256OrNull, ConvertFromStringExceptionMode::Null>; +using FunctionToUUIDOrNull = FunctionConvertFromString; struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; }; struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; }; diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 90a56af3c34..d79328889f1 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -619,9 +619,11 @@ inline bool tryReadDateText(DayNum & date, ReadBuffer & buf) return readDateTextImpl(date, buf); } - -inline void readUUIDText(UUID & uuid, ReadBuffer & buf) +template +inline ReturnType readUUIDTextImpl(UUID & uuid, ReadBuffer & buf) { + static constexpr bool throw_exception = std::is_same_v; + char s[36]; size_t size = buf.read(s, 32); @@ -634,21 +636,49 @@ inline void readUUIDText(UUID & uuid, ReadBuffer & buf) if (size != 36) { s[size] = 0; - throw Exception(std::string("Cannot parse uuid ") + s, ErrorCodes::CANNOT_PARSE_UUID); + + if constexpr (throw_exception) + { + throw Exception(std::string("Cannot parse uuid ") + s, ErrorCodes::CANNOT_PARSE_UUID); + } + else + { + return ReturnType(false); + } } parseUUID(reinterpret_cast(s), std::reverse_iterator(reinterpret_cast(&uuid) + 16)); } else parseUUIDWithoutSeparator(reinterpret_cast(s), std::reverse_iterator(reinterpret_cast(&uuid) + 16)); + + return ReturnType(true); } else { s[size] = 0; - throw Exception(std::string("Cannot parse uuid ") + s, ErrorCodes::CANNOT_PARSE_UUID); + + if constexpr (throw_exception) + { + throw Exception(std::string("Cannot parse uuid ") + s, ErrorCodes::CANNOT_PARSE_UUID); + } + else + { + return ReturnType(false); + } } } +inline void readUUIDText(UUID & uuid, ReadBuffer & buf) +{ + return readUUIDTextImpl(uuid, buf); +} + +inline bool tryReadUUIDText(UUID & uuid, ReadBuffer & buf) +{ + return readUUIDTextImpl(uuid, buf); +} + template inline T parse(const char * data, size_t size); diff --git a/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.reference b/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.reference new file mode 100644 index 00000000000..041e329748e --- /dev/null +++ b/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.reference @@ -0,0 +1,8 @@ +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +\N +00000000-0000-0000-0000-000000000000 +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +\N +61f0c404-5cb3-11e7-907b-a6006ad3dba0 +00000000-0000-0000-0000-000000000000 diff --git a/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.sql b/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.sql new file mode 100644 index 00000000000..ae6a1b2db04 --- /dev/null +++ b/tests/queries/0_stateless/01528_to_uuid_or_null_or_zero.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS to_uuid_test; + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); --{serverError 6} +SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); +SELECT toUUIDOrZero('59f0c404-5cb3-11e7-907b-a6006ad3dba0T'); + +CREATE TABLE to_uuid_test (value String) ENGINE = TinyLog(); + +INSERT INTO to_uuid_test VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID(value) FROM to_uuid_test; + +INSERT INTO to_uuid_test VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); +SELECT toUUID(value) FROM to_uuid_test; -- {serverError 6} +SELECT toUUIDOrNull(value) FROM to_uuid_test; +SELECT toUUIDOrZero(value) FROM to_uuid_test; + +DROP TABLE to_uuid_test; + From edc8d6e5e76560eca7b59feb62eb1c06c4167d9d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 25 Oct 2020 14:14:52 +0300 Subject: [PATCH 269/432] Fix async Distributed INSERT w/ prefer_localhost_replica=0 and internal_replication --- programs/server/config.xml | 16 +++++++++ src/Interpreters/Cluster.cpp | 11 ++++-- ..._directory_monitor_batch_inserts.reference | 16 +++++++++ ...ibuted_directory_monitor_batch_inserts.sql | 36 ++++++++++++++++++- 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/programs/server/config.xml b/programs/server/config.xml index 9850d77abb7..5bdec5377fd 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -392,6 +392,22 @@ + + + true + + 127.0.0.1 + 9000 + + + + true + + 127.0.0.2 + 9000 + + + diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 8a98e8282a6..9c2766ae7d6 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -614,13 +614,18 @@ const std::string & Cluster::ShardInfo::pathForInsert(bool prefer_localhost_repl if (!has_internal_replication) throw Exception("internal_replication is not set", ErrorCodes::LOGICAL_ERROR); - if (dir_name_for_internal_replication.empty() || dir_name_for_internal_replication_with_local.empty()) - throw Exception("Directory name for async inserts is empty", ErrorCodes::LOGICAL_ERROR); - if (prefer_localhost_replica) + { + if (dir_name_for_internal_replication.empty()) + throw Exception("Directory name for async inserts is empty", ErrorCodes::LOGICAL_ERROR); return dir_name_for_internal_replication; + } else + { + if (dir_name_for_internal_replication_with_local.empty()) + throw Exception("Directory name for async inserts is empty", ErrorCodes::LOGICAL_ERROR); return dir_name_for_internal_replication_with_local; + } } bool Cluster::maybeCrossReplication() const diff --git a/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference index 5565ed6787f..03e58c13ff2 100644 --- a/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference +++ b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.reference @@ -1,4 +1,20 @@ +test_cluster_two_shards prefer_localhost_replica=0 +0 0 1 +1 +test_cluster_two_shards prefer_localhost_replica=1 +0 0 1 +1 +test_cluster_two_shards_internal_replication prefer_localhost_replica=0 +0 +0 +1 +1 +test_cluster_two_shards_internal_replication prefer_localhost_replica=1 +0 +0 +1 +1 diff --git a/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql index dbec319ab76..dec748789c8 100644 --- a/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql +++ b/tests/queries/0_stateless/01040_distributed_directory_monitor_batch_inserts.sql @@ -7,6 +7,40 @@ DROP TABLE IF EXISTS dist_test_01040; CREATE TABLE test_01040 (key UInt64) ENGINE=TinyLog(); CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards, currentDatabase(), test_01040, key); + +-- internal_replication=false +SELECT 'test_cluster_two_shards prefer_localhost_replica=0'; +SET prefer_localhost_replica=0; INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); SYSTEM FLUSH DISTRIBUTED dist_test_01040; -SELECT * FROM dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +SELECT 'test_cluster_two_shards prefer_localhost_replica=1'; +SET prefer_localhost_replica=1; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +DROP TABLE dist_test_01040; + +-- internal_replication=true +CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards_internal_replication, currentDatabase(), test_01040, key); +SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=0'; +SET prefer_localhost_replica=0; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=1'; +SET prefer_localhost_replica=1; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + + +DROP TABLE dist_test_01040; +DROP TABLE test_01040; From f97e6beb70984d65aa705dfe3b4bba467d824d2f Mon Sep 17 00:00:00 2001 From: alesapin Date: Sun, 25 Oct 2020 15:34:18 +0300 Subject: [PATCH 270/432] Remove message broker pool from context --- src/Interpreters/Context.cpp | 13 ------------- src/Interpreters/Context.h | 1 - 2 files changed, 14 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 7f2ada8a426..9d2ea6ded86 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -335,7 +335,6 @@ struct ContextShared std::optional background_move_pool; /// The thread pool for the background moves performed by the tables. std::optional schedule_pool; /// A thread pool that can run different jobs in background (used in replicated tables) std::optional distributed_schedule_pool; /// A thread pool that can run different jobs in background (used for distributed sends) - std::optional message_broker_schedule_pool; /// A thread pool that can run different jobs in background (used in kafka streaming) MultiVersion macros; /// Substitutions extracted from config. std::unique_ptr ddl_worker; /// Process ddl commands from zk. /// Rules for selecting the compression settings, depending on the size of the part. @@ -438,7 +437,6 @@ struct ContextShared schedule_pool.reset(); distributed_schedule_pool.reset(); ddl_worker.reset(); - message_broker_schedule_pool.reset(); /// Stop trace collector if any trace_collector.reset(); @@ -1441,17 +1439,6 @@ BackgroundSchedulePool & Context::getDistributedSchedulePool() return *shared->distributed_schedule_pool; } -BackgroundSchedulePool & Context::getMessageBrokerSchedulePool() -{ - auto lock = getLock(); - if (!shared->message_broker_schedule_pool) - shared->message_broker_schedule_pool.emplace( - settings.background_message_broker_schedule_pool_size, - CurrentMetrics::BackgroundMessageBrokerSchedulePoolTask, - "BgMBSchPool"); - return *shared->message_broker_schedule_pool; -} - void Context::setDDLWorker(std::unique_ptr ddl_worker) { auto lock = getLock(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index bd5e17fe2e4..075fc3837ef 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -511,7 +511,6 @@ public: BackgroundProcessingPool & getBackgroundPool(); BackgroundProcessingPool & getBackgroundMovePool(); BackgroundSchedulePool & getSchedulePool(); - BackgroundSchedulePool & getMessageBrokerSchedulePool(); BackgroundSchedulePool & getDistributedSchedulePool(); void setDDLWorker(std::unique_ptr ddl_worker); From 4bcbcfed1f243a1759b10b3df58409e23909f82b Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Sun, 25 Oct 2020 09:40:19 -0400 Subject: [PATCH 271/432] Fixing procedure of getting log file size in LDAP tests. Increasing default timeouts. --- .../ldap/authentication/tests/common.py | 16 ++++++++-------- .../ldap/external_user_directory/tests/common.py | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index 4e3d1e16647..1c662b3898b 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -47,7 +47,7 @@ ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits def randomword(length, chars=ASCII_CHARS): return ''.join(random.choice(chars) for i in range(length)) -def restart(node=None, safe=False, timeout=20): +def restart(node=None, safe=False, timeout=60): """Restart ClickHouse server and wait for config to be reloaded. """ with When("I restart ClickHouse server node"): @@ -62,7 +62,7 @@ def restart(node=None, safe=False, timeout=20): with And("getting current log size"): logsize = \ - node.command("ls -s --block-size=1 /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ + node.command("stat --format=%s /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ 0].strip() with And("restarting ClickHouse server"): @@ -78,7 +78,7 @@ def restart(node=None, safe=False, timeout=20): f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) -def add_config(config, timeout=20, restart=False): +def add_config(config, timeout=60, restart=False): """Add dynamic configuration file to ClickHouse. :param node: node @@ -108,7 +108,7 @@ def add_config(config, timeout=20, restart=False): with And("I get the current log size"): logsize = \ - node.command("ls -s --block-size=1 /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ + node.command("stat --format=%s /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ 0].strip() with And("I restart ClickHouse to apply the config changes"): @@ -189,7 +189,7 @@ def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-se @contextmanager def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml", - timeout=20, restart=False): + timeout=60, restart=False): """Add LDAP servers configuration. """ config = create_ldap_servers_config_content(servers, config_d_dir, config_file) @@ -236,7 +236,7 @@ def add_users_identified_with_ldap(*users): @contextmanager def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users.d", - config_file=None, timeout=20, restart=True, config=None, rbac=False): + config_file=None, timeout=60, restart=True, config=None, rbac=False): """Add LDAP authenticated users. """ if rbac: @@ -248,7 +248,7 @@ def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users. config = create_ldap_users_config_content(*users, config_d_dir=config_d_dir, config_file=config_file) return add_config(config, restart=restart) -def invalid_server_config(servers, message=None, tail=13, timeout=20): +def invalid_server_config(servers, message=None, tail=13, timeout=60): """Check that ClickHouse errors when trying to load invalid LDAP servers configuration file. """ node = current().context.node @@ -277,7 +277,7 @@ def invalid_server_config(servers, message=None, tail=13, timeout=20): with By("removing the config file", description=config.path): node.command(f"rm -rf {config.path}", exitcode=0) -def invalid_user_config(servers, config, message=None, tail=13, timeout=20): +def invalid_user_config(servers, config, message=None, tail=13, timeout=60): """Check that ClickHouse errors when trying to load invalid LDAP users configuration file. """ node = current().context.node diff --git a/tests/testflows/ldap/external_user_directory/tests/common.py b/tests/testflows/ldap/external_user_directory/tests/common.py index d6f414e617a..38b53ca6e9f 100644 --- a/tests/testflows/ldap/external_user_directory/tests/common.py +++ b/tests/testflows/ldap/external_user_directory/tests/common.py @@ -129,7 +129,7 @@ def create_entries_ldap_external_user_directory_config_content(entries, config_d return Config(content, path, name, uid, "config.xml") -def invalid_ldap_external_user_directory_config(server, roles, message, tail=20, timeout=20, config=None): +def invalid_ldap_external_user_directory_config(server, roles, message, tail=20, timeout=60, config=None): """Check that ClickHouse errors when trying to load invalid LDAP external user directory configuration file. """ @@ -181,7 +181,7 @@ def invalid_ldap_external_user_directory_config(server, roles, message, tail=20, @contextmanager def ldap_external_user_directory(server, roles, config_d_dir="/etc/clickhouse-server/config.d", - config_file=None, timeout=20, restart=True, config=None): + config_file=None, timeout=60, restart=True, config=None): """Add LDAP external user directory. """ if config_file is None: From f8f2d62d704c92ef59bb6428b07aa1f2e257902d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Sun, 25 Oct 2020 20:27:21 +0300 Subject: [PATCH 272/432] minor improvements in DatabaseCatalog --- src/Common/CurrentMetrics.cpp | 1 + src/Databases/DatabaseAtomic.cpp | 1 + src/Databases/DatabaseMemory.cpp | 3 + src/Databases/DatabaseWithDictionaries.cpp | 4 ++ src/Interpreters/DatabaseCatalog.cpp | 72 +++++++++++++++++++--- src/Interpreters/DatabaseCatalog.h | 9 ++- 6 files changed, 79 insertions(+), 11 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index ef0b82666dd..3d6a2d6f99c 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -54,6 +54,7 @@ M(LocalThread, "Number of threads in local thread pools. Should be similar to GlobalThreadActive.") \ M(LocalThreadActive, "Number of threads in local thread pools running a task.") \ M(DistributedFilesToInsert, "Number of pending files to process for asynchronous insertion into Distributed tables. Number of files for every shard is summed.") \ + M(TablesToDropQueueSize, "Number of dropped tables, that are waiting for background data removal.") \ namespace CurrentMetrics { diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index a9dbae8ec92..6c1ca1e8fce 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -269,6 +269,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora database_name, query.database); not_in_use = cleanupDetachedTables(); assertDetachedTableNotInUse(query.uuid); + DatabaseCatalog::instance().addUUIDMapping(query.uuid, {}, {}); renameNoReplace(table_metadata_tmp_path, table_metadata_path); attachTableUnlocked(query.table, table, lock); /// Should never throw table_name_to_path.emplace(query.table, table_data_path); diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 5eacb846d52..357acb32371 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -53,6 +53,9 @@ void DatabaseMemory::dropTable( } table->is_dropped = true; create_queries.erase(table_name); + UUID table_uuid = table->getStorageID().uuid; + if (table_uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().removeUUIDMappingFinally(table_uuid); } ASTPtr DatabaseMemory::getCreateDatabaseQuery() const diff --git a/src/Databases/DatabaseWithDictionaries.cpp b/src/Databases/DatabaseWithDictionaries.cpp index ed85028d04d..6c5173c986f 100644 --- a/src/Databases/DatabaseWithDictionaries.cpp +++ b/src/Databases/DatabaseWithDictionaries.cpp @@ -223,6 +223,10 @@ void DatabaseWithDictionaries::removeDictionary(const Context &, const String & attachDictionary(dictionary_name, attach_info); throw; } + + UUID dict_uuid = attach_info.create_query->as()->uuid; + if (dict_uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().removeUUIDMappingFinally(dict_uuid); } DatabaseDictionariesIteratorPtr DatabaseWithDictionaries::getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index cfe1046aceb..e130646be11 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -13,9 +13,16 @@ #include #include #include +#include #include + +namespace CurrentMetrics +{ + extern const Metric TablesToDropQueueSize; +} + namespace DB { @@ -411,36 +418,71 @@ DatabasePtr DatabaseCatalog::getSystemDatabase() const return getDatabase(SYSTEM_DATABASE); } -void DatabaseCatalog::addUUIDMapping(const UUID & uuid, DatabasePtr database, StoragePtr table) +void DatabaseCatalog::addUUIDMapping(const UUID & uuid, const DatabasePtr & database, const StoragePtr & table) { assert(uuid != UUIDHelpers::Nil && getFirstLevelIdx(uuid) < uuid_map.size()); + assert((database && table) || (!database && !table)); UUIDToStorageMapPart & map_part = uuid_map[getFirstLevelIdx(uuid)]; std::lock_guard lock{map_part.mutex}; - auto [_, inserted] = map_part.map.try_emplace(uuid, std::move(database), std::move(table)); + auto [it, inserted] = map_part.map.try_emplace(uuid, database, table); + if (inserted) + return; + + auto & prev_database = it->second.first; + auto & prev_table = it->second.second; + assert((prev_database && prev_table) || (!prev_database && !prev_table)); + + if (!prev_table && table) + { + /// It's empty mapping, it was created to "lock" UUID and prevent collision. Just update it. + prev_database = database; + prev_table = table; + return; + } + + /// We are trying to replace existing mapping (prev_table != nullptr), it's logical error + if (table) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapping for table with UUID={} already exists", toString(uuid)); /// Normally this should never happen, but it's possible when the same UUIDs are explicitly specified in different CREATE queries, /// so it's not LOGICAL_ERROR - if (!inserted) - throw Exception("Mapping for table with UUID=" + toString(uuid) + " already exists", ErrorCodes::TABLE_ALREADY_EXISTS); + throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Mapping for table with UUID={} already exists. It happened due to UUID collision, " + "most likely because some not random UUIDs were manually specified in CREATE queries.", toString(uuid)); } void DatabaseCatalog::removeUUIDMapping(const UUID & uuid) +{ + assert(uuid != UUIDHelpers::Nil && getFirstLevelIdx(uuid) < uuid_map.size()); + UUIDToStorageMapPart & map_part = uuid_map[getFirstLevelIdx(uuid)]; + std::lock_guard lock{map_part.mutex}; + auto it = map_part.map.find(uuid); + if (it == map_part.map.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapping for table with UUID={} doesn't exist", toString(uuid)); + it->second = {}; +} + +void DatabaseCatalog::removeUUIDMappingFinally(const UUID & uuid) { assert(uuid != UUIDHelpers::Nil && getFirstLevelIdx(uuid) < uuid_map.size()); UUIDToStorageMapPart & map_part = uuid_map[getFirstLevelIdx(uuid)]; std::lock_guard lock{map_part.mutex}; if (!map_part.map.erase(uuid)) - throw Exception("Mapping for table with UUID=" + toString(uuid) + " doesn't exist", ErrorCodes::LOGICAL_ERROR); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapping for table with UUID={} doesn't exist", toString(uuid)); } void DatabaseCatalog::updateUUIDMapping(const UUID & uuid, DatabasePtr database, StoragePtr table) { assert(uuid != UUIDHelpers::Nil && getFirstLevelIdx(uuid) < uuid_map.size()); + assert(database && table); UUIDToStorageMapPart & map_part = uuid_map[getFirstLevelIdx(uuid)]; std::lock_guard lock{map_part.mutex}; auto it = map_part.map.find(uuid); if (it == map_part.map.end()) - throw Exception("Mapping for table with UUID=" + toString(uuid) + " doesn't exist", ErrorCodes::LOGICAL_ERROR); - it->second = std::make_pair(std::move(database), std::move(table)); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapping for table with UUID={} doesn't exist", toString(uuid)); + auto & prev_database = it->second.first; + auto & prev_table = it->second.second; + assert(prev_database && prev_table); + prev_database = std::move(database); + prev_table = std::move(table); } std::unique_ptr DatabaseCatalog::database_catalog; @@ -631,6 +673,8 @@ void DatabaseCatalog::loadMarkedAsDroppedTables() dropped_metadata.emplace(std::move(full_path), std::move(dropped_id)); } + LOG_INFO(log, "Found {} partially dropped tables. Will load them and retry removal.", dropped_metadata.size()); + ThreadPool pool; for (const auto & elem : dropped_metadata) { @@ -695,6 +739,7 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr LOG_WARNING(log, "Cannot parse metadata of partially dropped table {} from {}. Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper.", table_id.getNameForLogs(), dropped_metadata_path); } + addUUIDMapping(table_id.uuid, {}, {}); drop_time = Poco::File(dropped_metadata_path).getLastModified().epochTime(); } @@ -704,6 +749,8 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr else tables_marked_dropped.push_back({table_id, table, dropped_metadata_path, drop_time}); tables_marked_dropped_ids.insert(table_id.uuid); + CurrentMetrics::add(CurrentMetrics::TablesToDropQueueSize, 1); + /// If list of dropped tables was empty, start a drop task if (drop_task && tables_marked_dropped.size() == 1) (*drop_task)->schedule(); @@ -732,6 +779,10 @@ void DatabaseCatalog::dropTableDataTask() LOG_INFO(log, "Will try drop {}", table.table_id.getNameForLogs()); tables_marked_dropped.erase(it); } + else + { + LOG_TRACE(log, "No tables to drop. Queue size: {}", tables_marked_dropped.size()); + } need_reschedule = !tables_marked_dropped.empty(); } catch (...) @@ -770,7 +821,7 @@ void DatabaseCatalog::dropTableDataTask() (*drop_task)->scheduleAfter(reschedule_time_ms); } -void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table) const +void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table) { if (table.table) { @@ -789,6 +840,9 @@ void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table) const LOG_INFO(log, "Removing metadata {} of dropped table {}", table.metadata_path, table.table_id.getNameForLogs()); Poco::File(table.metadata_path).remove(); + + removeUUIDMappingFinally(table.table_id.uuid); + CurrentMetrics::sub(CurrentMetrics::TablesToDropQueueSize, 1); } String DatabaseCatalog::getPathForUUID(const UUID & uuid) @@ -826,6 +880,8 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid) { if (uuid == UUIDHelpers::Nil) return; + + LOG_DEBUG(log, "Waiting for table {} to be finally dropped", toString(uuid)); std::unique_lock lock{tables_marked_dropped_mutex}; wait_table_finally_dropped.wait(lock, [&]() { diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index c6f50117564..46646f2ff05 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -165,10 +165,13 @@ public: void updateDependency(const StorageID & old_from, const StorageID & old_where,const StorageID & new_from, const StorageID & new_where); /// If table has UUID, addUUIDMapping(...) must be called when table attached to some database - /// and removeUUIDMapping(...) must be called when it detached. + /// removeUUIDMapping(...) must be called when it detached, + /// and removeUUIDMappingFinally(...) must be called when table is dropped and its data removed from disk. + /// To "lock" some UUID and prevent collision, addUUIDMapping(...) may be called with nullptr arguments. /// Such tables can be accessed by persistent UUID instead of database and table name. - void addUUIDMapping(const UUID & uuid, DatabasePtr database, StoragePtr table); + void addUUIDMapping(const UUID & uuid, const DatabasePtr & database, const StoragePtr & table); void removeUUIDMapping(const UUID & uuid); + void removeUUIDMappingFinally(const UUID & uuid); /// For moving table between databases void updateUUIDMapping(const UUID & uuid, DatabasePtr database, StoragePtr table); @@ -222,7 +225,7 @@ private: void loadMarkedAsDroppedTables(); void dropTableDataTask(); - void dropTableFinally(const TableMarkedAsDropped & table) const; + void dropTableFinally(const TableMarkedAsDropped & table); static constexpr size_t reschedule_time_ms = 100; From 934f64a2fdd8578097eb175f3cebd2ba771764aa Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 26 Oct 2020 00:51:30 +0300 Subject: [PATCH 273/432] Update DatabaseCatalog.cpp --- src/Interpreters/DatabaseCatalog.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index e130646be11..90e1a32827f 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -162,7 +162,12 @@ void DatabaseCatalog::shutdownImpl() tables_marked_dropped.clear(); std::lock_guard lock(databases_mutex); - assert(std::find_if_not(uuid_map.begin(), uuid_map.end(), [](const auto & elem) { return elem.map.empty(); }) == uuid_map.end()); + assert(std::find_if(uuid_map.begin(), uuid_map.end(), [](const auto & elem) + { + const auto & not_empty_mapping = [] (const auto & mapping) { return mapping.second.second; }; + auto it = std::find_if(elem.map.begin(), elem.map.end(), not_empty_mapping); + return it != elem.map.end(); + }) == uuid_map.end()); databases.clear(); db_uuid_map.clear(); view_dependencies.clear(); From be95d3d854dc9bc96995944882482b090d34b44c Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Sun, 25 Oct 2020 21:09:14 -0400 Subject: [PATCH 274/432] Fixing another issue in LDAP tests. --- .../ldap/authentication/tests/common.py | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index 1c662b3898b..0fd9670fae0 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -85,19 +85,29 @@ def add_config(config, timeout=60, restart=False): :param config: configuration file description :param timeout: timeout, default: 20 sec """ - def check_preprocessed_config_is_updated(): + def check_preprocessed_config_is_updated(after_removal=False): """Check that preprocessed config is updated. """ started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" + while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode - if exitcode == 0: - break + if after_removal: + if exitcode == 1: + break + else: + if exitcode == 0: + break time.sleep(1) + if settings.debug: node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}") - assert exitcode == 0, error() + + if after_removal: + assert exitcode == 1, error() + else: + assert exitcode == 0, error() def wait_for_config_to_be_loaded(): """Wait for config to be loaded. @@ -160,7 +170,7 @@ def add_config(config, timeout=60, restart=False): node.command(f"rm -rf {config.path}", exitcode=0) with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): - check_preprocessed_config_is_updated() + check_preprocessed_config_is_updated(after_removal=True) with And("I wait for config to be reloaded"): wait_for_config_to_be_loaded() From dd84fb572fa5267472749b2c2f507c907ca6c3d0 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Mon, 26 Oct 2020 04:33:28 +0300 Subject: [PATCH 275/432] More diagnostics. --- tests/integration/test_multiple_disks/test.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index b0159d16501..07478d99657 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -449,8 +449,11 @@ def test_jbod_overflow(start_cluster, name, engine): data.append(get_random_string(1024 * 1024)) # 1MB row node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + for p in ("/jbod1", "/jbod2", "/external"): + print(node1.exec_in_container([f"bash", "-c", f"find {p} | xargs -n1 du -sh"])) + used_disks = get_used_disks_for_table(node1, name) - assert all(disk == 'jbod1' for disk in used_disks) + assert set(used_disks) == {'jbod1'} # should go to the external disk (jbod is overflown) data = [] # 10MB in total From d5e277f08026d88c2292faa3d3ce4d2c25373c02 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 06:35:57 +0300 Subject: [PATCH 276/432] Better exceptions rethrow --- src/Databases/MySQL/MaterializeMySQLSettings.cpp | 6 ++---- src/Interpreters/ActionsVisitor.cpp | 4 ++-- src/Storages/JoinSettings.cpp | 6 ++---- src/Storages/Kafka/KafkaSettings.cpp | 6 ++---- src/Storages/MergeTree/MergeTreeSettings.cpp | 12 ++++-------- src/Storages/RabbitMQ/RabbitMQSettings.cpp | 6 ++---- src/Storages/SetSettings.cpp | 6 ++---- 7 files changed, 16 insertions(+), 30 deletions(-) diff --git a/src/Databases/MySQL/MaterializeMySQLSettings.cpp b/src/Databases/MySQL/MaterializeMySQLSettings.cpp index 609ce011f91..a8672bf488e 100644 --- a/src/Databases/MySQL/MaterializeMySQLSettings.cpp +++ b/src/Databases/MySQL/MaterializeMySQLSettings.cpp @@ -8,7 +8,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -25,9 +24,8 @@ void MaterializeMySQLSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for database " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for database " + storage_def.engine->name); + throw; } } else diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 96da40e8f6c..aeb2db0523c 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -648,12 +648,12 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & { function_builder = FunctionFactory::instance().get(node.name, data.context); } - catch (DB::Exception & e) + catch (Exception & e) { auto hints = AggregateFunctionFactory::instance().getHints(node.name); if (!hints.empty()) e.addMessage("Or unknown aggregate function " + node.name + ". Maybe you meant: " + toString(hints)); - e.rethrow(); + throw; } Names argument_names; diff --git a/src/Storages/JoinSettings.cpp b/src/Storages/JoinSettings.cpp index 15637d67dea..8a2699746da 100644 --- a/src/Storages/JoinSettings.cpp +++ b/src/Storages/JoinSettings.cpp @@ -10,7 +10,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -27,9 +26,8 @@ void JoinSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for storage " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for storage " + storage_def.engine->name); + throw; } } else diff --git a/src/Storages/Kafka/KafkaSettings.cpp b/src/Storages/Kafka/KafkaSettings.cpp index 4d80419af2d..6ef74511d83 100644 --- a/src/Storages/Kafka/KafkaSettings.cpp +++ b/src/Storages/Kafka/KafkaSettings.cpp @@ -10,7 +10,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -27,9 +26,8 @@ void KafkaSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for storage " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for storage " + storage_def.engine->name); + throw; } } else diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 00580c8d8bb..f7a9a37e6dd 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -11,8 +11,6 @@ namespace DB namespace ErrorCodes { - extern const int INVALID_CONFIG_PARAMETER; - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -34,9 +32,8 @@ void MergeTreeSettings::loadFromConfig(const String & config_elem, const Poco::U catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " in MergeTree config", ErrorCodes::INVALID_CONFIG_PARAMETER); - else - e.rethrow(); + e.addMessage("in MergeTree config"); + throw; } } @@ -51,9 +48,8 @@ void MergeTreeSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for storage " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for storage " + storage_def.engine->name); + throw; } } else diff --git a/src/Storages/RabbitMQ/RabbitMQSettings.cpp b/src/Storages/RabbitMQ/RabbitMQSettings.cpp index f956c520749..93495cdd8ae 100644 --- a/src/Storages/RabbitMQ/RabbitMQSettings.cpp +++ b/src/Storages/RabbitMQ/RabbitMQSettings.cpp @@ -9,7 +9,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -26,9 +25,8 @@ void RabbitMQSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for storage " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for storage " + storage_def.engine->name); + throw; } } else diff --git a/src/Storages/SetSettings.cpp b/src/Storages/SetSettings.cpp index f7ff1c446f2..baa3d231067 100644 --- a/src/Storages/SetSettings.cpp +++ b/src/Storages/SetSettings.cpp @@ -10,7 +10,6 @@ namespace DB namespace ErrorCodes { - extern const int BAD_ARGUMENTS; extern const int UNKNOWN_SETTING; } @@ -27,9 +26,8 @@ void SetSettings::loadFromQuery(ASTStorage & storage_def) catch (Exception & e) { if (e.code() == ErrorCodes::UNKNOWN_SETTING) - throw Exception(e.message() + " for storage " + storage_def.engine->name, ErrorCodes::BAD_ARGUMENTS); - else - e.rethrow(); + e.addMessage("for storage " + storage_def.engine->name); + throw; } } else From 3aa49204d628dcd74090f06449e2cec26f8856a3 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Mon, 26 Oct 2020 07:39:20 +0300 Subject: [PATCH 277/432] Update MergeTreeSettings.cpp --- src/Storages/MergeTree/MergeTreeSettings.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index f7a9a37e6dd..15ff62e0aa6 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -12,6 +12,7 @@ namespace DB namespace ErrorCodes { extern const int UNKNOWN_SETTING; + extern const int BAD_ARGUMENTS; } IMPLEMENT_SETTINGS_TRAITS(MergeTreeSettingsTraits, LIST_OF_MERGE_TREE_SETTINGS) From 18458f36e773ae2a04b35bc244d2751308a553f1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 08:20:37 +0300 Subject: [PATCH 278/432] Fix trash --- docs/en/operations/system-tables/crash-log.md | 6 +++--- docs/ru/operations/system-tables/crash-log.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/operations/system-tables/crash-log.md b/docs/en/operations/system-tables/crash-log.md index 5b338237b83..5e9fec53429 100644 --- a/docs/en/operations/system-tables/crash-log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -1,6 +1,6 @@ -# system.crash_log {#system-tables_crash_log} +# system.crash_log {#system-tables_crash_log} -Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. +Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur. Columns: @@ -39,7 +39,7 @@ trace: [188531193,...] trace_full: ['3. DB::(anonymous namespace)::FunctionFormatReadableTimeDelta::executeImpl(std::__1::vector >&, std::__1::vector > const&, unsigned long, unsigned long) const @ 0xb3cc1f9 in /home/username/work/ClickHouse/build/programs/clickhouse',...] version: ClickHouse 20.11.1.1 revision: 54442 -build_id: +build_id: ``` **See also** diff --git a/docs/ru/operations/system-tables/crash-log.md b/docs/ru/operations/system-tables/crash-log.md index 7b645a06b2d..d2b3ae5c6f5 100644 --- a/docs/ru/operations/system-tables/crash-log.md +++ b/docs/ru/operations/system-tables/crash-log.md @@ -1,6 +1,6 @@ -# system.crash_log {#system-tables_crash_log} +# system.crash_log {#system-tables_crash_log} -Содержит информацию о трассировках стека для фатальных ошибок. Таблица не содержится в базе данных по умолчанию, а создается только при возникновении фатальных ошибок. +Содержит информацию о трассировках стека для фатальных ошибок. Таблица не содержится в базе данных по умолчанию, а создается только при возникновении фатальных ошибок. Колонки: @@ -39,7 +39,7 @@ trace: [188531193,...] trace_full: ['3. DB::(anonymous namespace)::FunctionFormatReadableTimeDelta::executeImpl(std::__1::vector >&, std::__1::vector > const&, unsigned long, unsigned long) const @ 0xb3cc1f9 in /home/username/work/ClickHouse/build/programs/clickhouse',...] version: ClickHouse 20.11.1.1 revision: 54442 -build_id: +build_id: ``` **См. также** From f491903d416fd7d683af36896d9c1105b004048c Mon Sep 17 00:00:00 2001 From: Robert Hodges Date: Sun, 25 Oct 2020 22:23:54 -0700 Subject: [PATCH 279/432] Update cloud.md Adding description of Altinity.Cloud to cloud platforms. --- docs/en/commercial/cloud.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/en/commercial/cloud.md b/docs/en/commercial/cloud.md index 07d843f724a..06216517db8 100644 --- a/docs/en/commercial/cloud.md +++ b/docs/en/commercial/cloud.md @@ -18,4 +18,14 @@ toc_title: Cloud - Encryption and isolation - Automated maintenance +## Altinity.Cloud {#altinity.cloud} + +[Altinity.Cloud](https://altinity.com/cloud-database/) is a fully managed ClickHouse-as-a-Service for the Amazon public cloud. +- Fast deployment of ClickHouse clusters on Amazon resources +- Easy scale-out/scale-in as well as vertical scaling of nodes +- Isolated per-tenant VPCs with public endpoint or VPC peering +- Configurable storage types and volume configurations +- Cross-AZ scaling for performance and high availability +- Built-in monitoring and SQL query editor + {## [Original article](https://clickhouse.tech/docs/en/commercial/cloud/) ##} From a5b56c8b84473cd73caf1ddce0927f2f1a6685d8 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Mon, 26 Oct 2020 10:23:27 +0300 Subject: [PATCH 280/432] Empty commit to re-run checks. From 79895130b75f872346be67603f3f48f09a7cada3 Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Mon, 26 Oct 2020 08:59:15 +0100 Subject: [PATCH 281/432] Better grammar in comments, CI rerun --- programs/local/LocalServer.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index db1838c38f7..f85b5bde965 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -122,8 +122,8 @@ void LocalServer::tryInitPath() } else { - // Path is not provided explicitly - use unique path in the system temporary directory - // (or in current dir if temporary don't exists) + // The path is not provided explicitly - use a unique path in the system temporary directory + // (or in the current dir if temporary don't exist) Poco::Logger * log = &logger(); std::filesystem::path parent_folder; std::filesystem::path default_path; @@ -144,7 +144,7 @@ void LocalServer::tryInitPath() LOG_DEBUG(log, "Will create working directory inside current directory: {}", parent_folder.string()); } - /// we can have other clickhouse-local running simultaneously, even with same pid (for ex. - several docker mounting same folder) + /// we can have another clickhouse-local running simultaneously, even with the same PID (for ex. - several dockers mounting the same folder) /// or it can be some leftovers from other clickhouse-local runs /// as we can't accurately distinguish those situations we don't touch any existent folders /// we just try to pick some free name for our working folder From 54f823aafa3f00c5454a6538200ca5b30d63d70a Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 26 Oct 2020 12:42:54 +0300 Subject: [PATCH 282/432] Update 01114_database_atomic.sh --- tests/queries/0_stateless/01114_database_atomic.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index f00a1c4a4d8..d220491d152 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -65,9 +65,9 @@ $CLICKHOUSE_CLIENT -q "SELECT count() FROM test_01114_1.mt" # result: 5 $CLICKHOUSE_CLIENT -q "SELECT tuple(s, sleepEachRow(3)) FROM test_01114_1.mt" > /dev/null & # 15s sleep 1 -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_1" && echo "dropped" +$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_1" --database_atomic_wait_for_drop_and_detach_synchronously=0 && echo "dropped" wait # for INSERT $CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01114_2.mt" # result: 30, 435 -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_2" +$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01114_2" --database_atomic_wait_for_drop_and_detach_synchronously=0 From cb943599036c6366fb15ba2ef06ec2a576460af1 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 26 Oct 2020 12:45:30 +0300 Subject: [PATCH 283/432] Update 01107_atomic_db_detach_attach.sh --- tests/queries/0_stateless/01107_atomic_db_detach_attach.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh index 7dd3e4e37e6..173bf44e1f1 100755 --- a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh +++ b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh @@ -12,16 +12,16 @@ sleep 1 $CLICKHOUSE_CLIENT -q "DETACH TABLE test_01107.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 $CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" 2>&1 | grep -F "Code: 57" > /dev/null && echo "OK" -$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" 2>&1 | grep -F "Code: 219" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 219" > /dev/null && echo "OK" wait $CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" $CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01107.mt" -$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" +$CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 $CLICKHOUSE_CLIENT -q "ATTACH DATABASE test_01107" $CLICKHOUSE_CLIENT -q "SELECT count(n), sum(n) FROM test_01107.mt" $CLICKHOUSE_CLIENT -q "INSERT INTO test_01107.mt SELECT number + sleepEachRow(1) FROM numbers(5)" && echo "end" & sleep 1 -$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01107" && sleep 1 && echo "dropped" +$CLICKHOUSE_CLIENT -q "DROP DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 && sleep 1 && echo "dropped" wait From fe9db9866c3345a512c6993e62ab669257c6d1cf Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 26 Oct 2020 12:56:50 +0300 Subject: [PATCH 284/432] Check exception message is poisoned in ExpressionActions. --- src/Interpreters/ExpressionActions.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 8513384d31a..6ce9fe2c793 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -22,6 +22,15 @@ # include "config_core.h" #endif +#include + +#if defined(MEMORY_SANITIZER) + #include +#endif + +#if defined(ADDRESS_SANITIZER) + #include +#endif namespace ProfileEvents { @@ -623,6 +632,22 @@ void ExpressionActions::execute(Block & block, bool dry_run) const } catch (Exception & e) { +#if defined(MEMORY_SANITIZER) + const auto & msg = e.message(); + if (__msan_test_shadow(msg.data(), msg.size()) != -1) + { + LOG_FATAL(&Poco::Logger::get("ExpressionActions"), "Poisoned exception message (msan): {}", e.getStackTraceString()); + } +#endif + +#if defined(ADDRESS_SANITIZER) + const auto & msg = e.message(); + if (__asan_region_is_poisoned(msg.data(), msg.size())) + { + LOG_FATAL(&Poco::Logger::get("ExpressionActions"), "Poisoned exception message (asan): {}", e.getStackTraceString()); + } +#endif + e.addMessage(fmt::format("while executing '{}'", action.toString())); throw; } From 8f03e526a4b50f61428e0bf7020cb628d99d2668 Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Mon, 26 Oct 2020 13:09:41 +0300 Subject: [PATCH 285/432] DOCSUP-3200: Edit and translate to Russian (#16204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Edit and translate Отредактировал английскую версию текстов документов и перевел изменения на русский язык. * Update trace_log.md Исправляю битые ссылки. * Fixing broken links Исправил битые ссылки. * Fixing broken links Поправил битую ссылку, добавив раздел в русскую версию. Co-authored-by: Dmitriy --- docs/en/faq/integration/json-import.md | 2 +- docs/en/interfaces/formats.md | 6 +- .../third-party/client-libraries.md | 2 +- docs/en/operations/system-tables/query_log.md | 8 +- .../system-tables/query_thread_log.md | 2 +- docs/en/operations/system-tables/text_log.md | 2 +- docs/en/operations/system-tables/trace_log.md | 2 +- .../statements/alter/sample-by.md | 10 +- docs/en/sql-reference/statements/drop.md | 64 ++-- docs/ru/interfaces/formats.md | 297 ++++++++++++------ docs/ru/operations/system-tables/query_log.md | 100 +++--- .../system-tables/query_thread_log.md | 84 ++--- docs/ru/operations/system-tables/text_log.md | 49 ++- docs/ru/operations/system-tables/trace_log.md | 33 +- .../statements/alter/sample-by.md | 20 ++ docs/ru/sql-reference/statements/drop.md | 64 ++-- docs/ru/sql-reference/statements/grant.md | 2 + 17 files changed, 452 insertions(+), 295 deletions(-) create mode 100644 docs/ru/sql-reference/statements/alter/sample-by.md diff --git a/docs/en/faq/integration/json-import.md b/docs/en/faq/integration/json-import.md index 067b407a079..fb94f226f2b 100644 --- a/docs/en/faq/integration/json-import.md +++ b/docs/en/faq/integration/json-import.md @@ -30,4 +30,4 @@ Instead of inserting data manually, you might consider to use one of [client lib - `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type. !!! note "Note" - Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the CLI interface. + Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface. \ No newline at end of file diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index d96d48bdca3..d310705d1c1 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -460,7 +460,7 @@ See also the [JSONEachRow](#jsoneachrow) format. ## JSONString {#jsonstring} -Differs from JSON only in that data fields are output in strings, not in typed json values. +Differs from JSON only in that data fields are output in strings, not in typed JSON values. Example: @@ -596,7 +596,7 @@ When inserting the data, you should provide a separate JSON value for each row. ## JSONEachRowWithProgress {#jsoneachrowwithprogress} ## JSONStringEachRowWithProgress {#jsonstringeachrowwithprogress} -Differs from JSONEachRow/JSONStringEachRow in that ClickHouse will also yield progress information as JSON objects. +Differs from `JSONEachRow`/`JSONStringEachRow` in that ClickHouse will also yield progress information as JSON values. ```json {"row":{"'hello'":"hello","multiply(42, number)":"0","range(5)":[0,1,2,3,4]}} @@ -608,7 +608,7 @@ Differs from JSONEachRow/JSONStringEachRow in that ClickHouse will also yield pr ## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes} ## JSONCompactStringEachRowWithNamesAndTypes {#jsoncompactstringeachrowwithnamesandtypes} -Differs from JSONCompactEachRow/JSONCompactStringEachRow in that the column names and types are written as the first two rows. +Differs from `JSONCompactEachRow`/`JSONCompactStringEachRow` in that the column names and types are written as the first two rows. ```json ["'hello'", "multiply(42, number)", "range(5)"] diff --git a/docs/en/interfaces/third-party/client-libraries.md b/docs/en/interfaces/third-party/client-libraries.md index bb98c5f936b..4e8347c9a6e 100644 --- a/docs/en/interfaces/third-party/client-libraries.md +++ b/docs/en/interfaces/third-party/client-libraries.md @@ -6,7 +6,7 @@ toc_title: Client Libraries # Client Libraries from Third-party Developers {#client-libraries-from-third-party-developers} !!! warning "Disclaimer" - Yandex does **not** maintain the libraries listed below and haven’t done any extensive testing to ensure their quality. + Yandex does **not** maintain the libraries listed below and hasn’t done any extensive testing to ensure their quality. - Python - [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm) diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index ddd5d327d59..32b2bdf2133 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -20,8 +20,8 @@ The `system.query_log` table registers two kinds of queries: Each query creates one or two rows in the `query_log` table, depending on the status (see the `type` column) of the query: -1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created . -2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created . +1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created. +2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created. 3. If an error occurred before launching the query, a single event with the `ExceptionBeforeStart` type is created. Columns: @@ -37,8 +37,8 @@ Columns: - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds. -- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn’t affect this value. -- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn’t affect this value. +- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it’s `read_rows` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value. +- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number of bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it’s `read_bytes` value, and the server-initiator of the query summarizes all received and local values. The cache volumes don’t affect this value. - `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0. - `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0. - `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query. diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index a6f090e99f3..0ae2e7d5d3b 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -1,6 +1,6 @@ # system.query_thread_log {#system_tables-query_thread_log} -Contains information about threads which execute queries, for example, thread name, thread start time, duration of query processing. +Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing. To start logging: diff --git a/docs/en/operations/system-tables/text_log.md b/docs/en/operations/system-tables/text_log.md index 34f37c2cae7..f5f53c95653 100644 --- a/docs/en/operations/system-tables/text_log.md +++ b/docs/en/operations/system-tables/text_log.md @@ -1,6 +1,6 @@ # system.text_log {#system_tables-text_log} -Contains logging entries. Logging level which goes to this table can be limited with `text_log.level` server setting. +Contains logging entries. The logging level which goes to this table can be limited to the `text_log.level` server setting. Columns: diff --git a/docs/en/operations/system-tables/trace_log.md b/docs/en/operations/system-tables/trace_log.md index bc955ebe4de..8107f60b808 100644 --- a/docs/en/operations/system-tables/trace_log.md +++ b/docs/en/operations/system-tables/trace_log.md @@ -18,7 +18,7 @@ Columns: - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse server build revision. - When connecting to server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. + When connecting to the server by `clickhouse-client`, you see the string similar to `Connected to ClickHouse server version 19.18.1 revision 54429.`. This field contains the `revision`, but not the `version` of a server. - `timer_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Timer type: diff --git a/docs/en/sql-reference/statements/alter/sample-by.md b/docs/en/sql-reference/statements/alter/sample-by.md index bea6364e4ea..df8ff90f196 100644 --- a/docs/en/sql-reference/statements/alter/sample-by.md +++ b/docs/en/sql-reference/statements/alter/sample-by.md @@ -5,16 +5,16 @@ toc_title: SAMPLE BY # Manipulating Sampling-Key Expressions {#manipulations-with-sampling-key-expressions} +Syntax: + ``` sql ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression ``` The command changes the [sampling key](../../../engines/table-engines/mergetree-family/mergetree.md) of the table to `new_expression` (an expression or a tuple of expressions). -The command is lightweight in a sense that it only changes metadata. The primary key must contain the new sample key. +The command is lightweight in the sense that it only changes metadata. The primary key must contain the new sample key. !!! note "Note" - It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including -[replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). - - + It only works for tables in the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family (including +[replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). \ No newline at end of file diff --git a/docs/en/sql-reference/statements/drop.md b/docs/en/sql-reference/statements/drop.md index de4eda64ef6..07761b6b821 100644 --- a/docs/en/sql-reference/statements/drop.md +++ b/docs/en/sql-reference/statements/drop.md @@ -5,84 +5,96 @@ toc_title: DROP # DROP Statements {#drop} -Deletes existing entity. If `IF EXISTS` clause is specified, these queries doesn’t return an error if the entity doesn’t exist. +Deletes existing entity. If the `IF EXISTS` clause is specified, these queries don’t return an error if the entity doesn’t exist. ## DROP DATABASE {#drop-database} +Deletes all tables inside the `db` database, then deletes the `db` database itself. + +Syntax: + ``` sql DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] ``` -Deletes all tables inside the `db` database, then deletes the ‘db’ database itself. - ## DROP TABLE {#drop-table} +Deletes the table. + +Syntax: + ``` sql DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -Deletes the table. - ## DROP DICTIONARY {#drop-dictionary} +Deletes the dictionary. + +Syntax: + ``` sql DROP DICTIONARY [IF EXISTS] [db.]name ``` -Deletes the dictionary. - ## DROP USER {#drop-user-statement} +Deletes a user. + +Syntax: + ``` sql DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -Deletes a user. - ## DROP ROLE {#drop-role-statement} +Deletes a role. The deleted role is revoked from all the entities where it was assigned. + +Syntax: + ``` sql DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -Deletes a role. - -Deleted role is revoked from all the entities where it was assigned. - ## DROP ROW POLICY {#drop-row-policy-statement} +Deletes a row policy. Deleted row policy is revoked from all the entities where it was assigned. + +Syntax: + ``` sql DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] ``` -Deletes a row policy. - -Deleted row policy is revoked from all the entities where it was assigned. - ## DROP QUOTA {#drop-quota-statement} +Deletes a quota. The deleted quota is revoked from all the entities where it was assigned. + +Syntax: + ``` sql DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -Deletes a quota. - -Deleted quota is revoked from all the entities where it was assigned. - ## DROP SETTINGS PROFILE {#drop-settings-profile-statement} +Deletes a settings profile. The deleted settings profile is revoked from all the entities where it was assigned. + +Syntax: + ``` sql DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` -Deletes a settings profile. - -Deleted settings profile is revoked from all the entities where it was assigned. - ## DROP VIEW {#drop-view} +Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view. + +Syntax: + ``` sql DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP VIEW` checks that `[db.]name` is a view. +[Оriginal article](https://clickhouse.tech/docs/en/sql-reference/statements/drop/) \ No newline at end of file diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index 2745139998f..e94810be8d3 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -4,41 +4,53 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT Поддерживаемые форматы и возможность использовать их в запросах `INSERT` и `SELECT` перечислены в таблице ниже. -| Формат | INSERT | SELECT | -|-----------------------------------------------------------------|--------|--------| -| [TabSeparated](#tabseparated) | ✔ | ✔ | -| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ | -| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | -| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [Template](#format-template) | ✔ | ✔ | -| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | -| [CSV](#csv) | ✔ | ✔ | -| [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [CustomSeparated](#format-customseparated) | ✔ | ✔ | -| [Values](#data-format-values) | ✔ | ✔ | -| [Vertical](#vertical) | ✗ | ✔ | -| [JSON](#json) | ✗ | ✔ | -| [JSONCompact](#jsoncompact) | ✗ | ✔ | -| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | -| [TSKV](#tskv) | ✔ | ✔ | -| [Pretty](#pretty) | ✗ | ✔ | -| [PrettyCompact](#prettycompact) | ✗ | ✔ | -| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | -| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | -| [PrettySpace](#prettyspace) | ✗ | ✔ | -| [Protobuf](#protobuf) | ✔ | ✔ | -| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | -| [Parquet](#data-format-parquet) | ✔ | ✔ | -| [Arrow](#data-format-arrow) | ✔ | ✔ | -| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ | -| [ORC](#data-format-orc) | ✔ | ✗ | -| [RowBinary](#rowbinary) | ✔ | ✔ | -| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [Native](#native) | ✔ | ✔ | -| [Null](#null) | ✗ | ✔ | -| [XML](#xml) | ✗ | ✔ | -| [CapnProto](#capnproto) | ✔ | ✗ | -| [LineAsString](#lineasstring) | ✔ | ✗ | +| Формат | INSERT | SELECT | +|-----------------------------------------------------------------------------------------|--------|--------| +| [TabSeparated](#tabseparated) | ✔ | ✔ | +| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ | +| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | +| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +| [Template](#format-template) | ✔ | ✔ | +| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | +| [CSV](#csv) | ✔ | ✔ | +| [CSVWithNames](#csvwithnames) | ✔ | ✔ | +| [CustomSeparated](#format-customseparated) | ✔ | ✔ | +| [Values](#data-format-values) | ✔ | ✔ | +| [Vertical](#vertical) | ✗ | ✔ | +| [VerticalRaw](#verticalraw) | ✗ | ✔ | +| [JSON](#json) | ✗ | ✔ | +| [JSONString](#jsonstring) | ✗ | ✔ | +| [JSONCompact](#jsoncompact) | ✗ | ✔ | +| [JSONCompactString](#jsoncompactstring) | ✗ | ✔ | +| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | +| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | +| [JSONStringEachRow](#jsonstringeachrow) | ✔ | ✔ | +| [JSONStringEachRowWithProgress](#jsonstringeachrowwithprogress) | ✗ | ✔ | +| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ | +| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ | +| [JSONCompactStringEachRow](#jsoncompactstringeachrow) | ✔ | ✔ | +| [JSONCompactStringEachRowWithNamesAndTypes](#jsoncompactstringeachrowwithnamesandtypes) | ✔ | ✔ | +| [TSKV](#tskv) | ✔ | ✔ | +| [Pretty](#pretty) | ✗ | ✔ | +| [PrettyCompact](#prettycompact) | ✗ | ✔ | +| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | +| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | +| [PrettySpace](#prettyspace) | ✗ | ✔ | +| [Protobuf](#protobuf) | ✔ | ✔ | +| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | +| [Avro](#data-format-avro) | ✔ | ✔ | +| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | +| [Parquet](#data-format-parquet) | ✔ | ✔ | +| [Arrow](#data-format-arrow) | ✔ | ✔ | +| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ | +| [ORC](#data-format-orc) | ✔ | ✗ | +| [RowBinary](#rowbinary) | ✔ | ✔ | +| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | +| [Native](#native) | ✔ | ✔ | +| [Null](#null) | ✗ | ✔ | +| [XML](#xml) | ✗ | ✔ | +| [CapnProto](#capnproto) | ✔ | ✗ | +| [LineAsString](#lineasstring) | ✔ | ✗ | Вы можете регулировать некоторые параметры работы с форматами с помощью настроек ClickHouse. За дополнительной информацией обращайтесь к разделу [Настройки](../operations/settings/settings.md). @@ -364,62 +376,41 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA "meta": [ { - "name": "SearchPhrase", + "name": "'hello'", "type": "String" }, { - "name": "c", + "name": "multiply(42, number)", "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" } ], "data": [ { - "SearchPhrase": "", - "c": "8267016" + "'hello'": "hello", + "multiply(42, number)": "0", + "range(5)": [0,1,2,3,4] }, { - "SearchPhrase": "bathroom interior design", - "c": "2166" + "'hello'": "hello", + "multiply(42, number)": "42", + "range(5)": [0,1,2,3,4] }, { - "SearchPhrase": "yandex", - "c": "1655" - }, - { - "SearchPhrase": "spring 2014 fashion", - "c": "1549" - }, - { - "SearchPhrase": "freeform photos", - "c": "1480" + "'hello'": "hello", + "multiply(42, number)": "84", + "range(5)": [0,1,2,3,4] } ], - "totals": - { - "SearchPhrase": "", - "c": "8873898" - }, + "rows": 3, - "extremes": - { - "min": - { - "SearchPhrase": "", - "c": "1480" - }, - "max": - { - "SearchPhrase": "", - "c": "8267016" - } - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 + "rows_before_limit_at_least": 3 } ``` @@ -438,65 +429,167 @@ JSON совместим с JavaScript. Для этого, дополнитель ClickHouse поддерживает [NULL](../sql-reference/syntax.md), который при выводе JSON будет отображен как `null`. Чтобы включить отображение в результате значений `+nan`, `-nan`, `+inf`, `-inf`, установите параметр [output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals) равным 1. -Смотрите также формат [JSONEachRow](#jsoneachrow) . +Смотрите также формат [JSONEachRow](#jsoneachrow). + +## JSONString {#jsonstring} + +Отличается от JSON только тем, что поля данных выводятся в строках, а не в типизированных значениях JSON. + +Пример: + +```json +{ + "meta": + [ + { + "name": "'hello'", + "type": "String" + }, + { + "name": "multiply(42, number)", + "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" + } + ], + + "data": + [ + { + "'hello'": "hello", + "multiply(42, number)": "0", + "range(5)": "[0,1,2,3,4]" + }, + { + "'hello'": "hello", + "multiply(42, number)": "42", + "range(5)": "[0,1,2,3,4]" + }, + { + "'hello'": "hello", + "multiply(42, number)": "84", + "range(5)": "[0,1,2,3,4]" + } + ], + + "rows": 3, + + "rows_before_limit_at_least": 3 +} +``` ## JSONCompact {#jsoncompact} +## JSONCompactString {#jsoncompactstring} Отличается от JSON только тем, что строчки данных выводятся в массивах, а не в object-ах. Пример: ``` json +// JSONCompact { "meta": [ { - "name": "SearchPhrase", + "name": "'hello'", "type": "String" }, { - "name": "c", + "name": "multiply(42, number)", "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" } ], "data": [ - ["", "8267016"], - ["интерьер ванной комнаты", "2166"], - ["яндекс", "1655"], - ["весна 2014 мода", "1549"], - ["фриформ фото", "1480"] + ["hello", "0", [0,1,2,3,4]], + ["hello", "42", [0,1,2,3,4]], + ["hello", "84", [0,1,2,3,4]] ], - "totals": ["","8873898"], + "rows": 3, - "extremes": - { - "min": ["","1480"], - "max": ["","8267016"] - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 + "rows_before_limit_at_least": 3 } ``` -Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). -Смотрите также формат `JSONEachRow`. +```json +// JSONCompactString +{ + "meta": + [ + { + "name": "'hello'", + "type": "String" + }, + { + "name": "multiply(42, number)", + "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" + } + ], -## JSONEachRow {#jsoneachrow} + "data": + [ + ["hello", "0", "[0,1,2,3,4]"], + ["hello", "42", "[0,1,2,3,4]"], + ["hello", "84", "[0,1,2,3,4]"] + ], -При использовании этого формата, ClickHouse выводит каждую запись как объект JSON (каждый объект отдельной строкой), при этом данные в целом — невалидный JSON. + "rows": 3, -``` json -{"SearchPhrase":"дизайн штор","count()":"1064"} -{"SearchPhrase":"баку","count()":"1000"} -{"SearchPhrase":"","count":"8267016"} + "rows_before_limit_at_least": 3 +} ``` -При вставке данных необходимо каждую запись передавать как отдельный объект JSON. +## JSONEachRow {#jsoneachrow} +## JSONStringEachRow {#jsonstringeachrow} +## JSONCompactEachRow {#jsoncompacteachrow} +## JSONCompactStringEachRow {#jsoncompactstringeachrow} + +При использовании этих форматов ClickHouse выводит каждую запись как значения JSON (каждое значение отдельной строкой), при этом данные в целом — невалидный JSON. + +``` json +{"some_int":42,"some_str":"hello","some_tuple":[1,"a"]} // JSONEachRow +[42,"hello",[1,"a"]] // JSONCompactEachRow +["42","hello","(2,'a')"] // JSONCompactStringsEachRow +``` + +При вставке данных вы должны предоставить отдельное значение JSON для каждой строки. + +## JSONEachRowWithProgress {#jsoneachrowwithprogress} +## JSONStringEachRowWithProgress {#jsonstringeachrowwithprogress} + +Отличается от `JSONEachRow`/`JSONStringEachRow` тем, что ClickHouse будет выдавать информацию о ходе выполнения в виде значений JSON. + +```json +{"row":{"'hello'":"hello","multiply(42, number)":"0","range(5)":[0,1,2,3,4]}} +{"row":{"'hello'":"hello","multiply(42, number)":"42","range(5)":[0,1,2,3,4]}} +{"row":{"'hello'":"hello","multiply(42, number)":"84","range(5)":[0,1,2,3,4]}} +{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} +``` + +## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes} +## JSONCompactStringEachRowWithNamesAndTypes {#jsoncompactstringeachrowwithnamesandtypes} + +Отличается от `JSONCompactEachRow`/`JSONCompactStringEachRow` тем, что имена и типы столбцов записываются как первые две строки. + +```json +["'hello'", "multiply(42, number)", "range(5)"] +["String", "UInt64", "Array(UInt8)"] +["hello", "0", [0,1,2,3,4]] +["hello", "42", [0,1,2,3,4]] +["hello", "84", [0,1,2,3,4]] +``` ### Вставка данных {#vstavka-dannykh} @@ -784,6 +877,10 @@ test: string with 'quotes' and with some special Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). +## VerticalRaw {#verticalraw} + +Аналогичен [Vertical](#vertical), но с отключенным выходом. Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). + ## XML {#xml} Формат XML подходит только для вывода данных, не для парсинга. Пример: diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index 27ed93e874e..39f685288d8 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -33,11 +33,12 @@ ClickHouse не удаляет данные из таблица автомати - `'ExceptionWhileProcessing' = 4` — исключение во время обработки запроса. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата начала запроса. - `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала запроса. +- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала запроса с точностью до микросекунд. - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность выполнения запроса в миллисекундах. -- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся. -- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся. +- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся. +- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся. - `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0. - `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0. - `result_rows` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — количество строк в результате запроса `SELECT` или количество строк в запросе `INSERT`. @@ -76,64 +77,67 @@ ClickHouse не удаляет данные из таблица автомати - `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. - `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — количество потоков, участвующих в обработке запросов. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events +- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events - `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`. - `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1. -- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Значения настроек, которые перечислены в столбце `Settings.Names`. +- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — значения настроек, которые перечислены в столбце `Settings.Names`. **Пример** ``` sql -SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical; +SELECT * FROM system.query_log LIMIT 1 \G ``` ``` text Row 1: ────── -type: QueryStart -event_date: 2020-05-13 -event_time: 2020-05-13 14:02:28 -query_start_time: 2020-05-13 14:02:28 -query_duration_ms: 0 -read_rows: 0 -read_bytes: 0 -written_rows: 0 -written_bytes: 0 -result_rows: 0 -result_bytes: 0 -memory_usage: 0 -query: SELECT 1 -exception_code: 0 -exception: -stack_trace: -is_initial_query: 1 -user: default -query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a -address: ::ffff:127.0.0.1 -port: 57720 -initial_user: default -initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a -initial_address: ::ffff:127.0.0.1 -initial_port: 57720 -interface: 1 -os_user: bayonet -client_hostname: clickhouse.ru-central1.internal -client_name: ClickHouse client -client_revision: 54434 -client_version_major: 20 -client_version_minor: 4 -client_version_patch: 1 -http_method: 0 -http_user_agent: -quota_key: -revision: 54434 -thread_ids: [] -ProfileEvents.Names: [] -ProfileEvents.Values: [] -Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage'] -Settings.Values: ['0','random','1','10000000000'] - +type: QueryStart +event_date: 2020-09-11 +event_time: 2020-09-11 10:08:17 +event_time_microseconds: 2020-09-11 10:08:17.063321 +query_start_time: 2020-09-11 10:08:17 +query_start_time_microseconds: 2020-09-11 10:08:17.063321 +query_duration_ms: 0 +read_rows: 0 +read_bytes: 0 +written_rows: 0 +written_bytes: 0 +result_rows: 0 +result_bytes: 0 +memory_usage: 0 +current_database: default +query: INSERT INTO test1 VALUES +exception_code: 0 +exception: +stack_trace: +is_initial_query: 1 +user: default +query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef +address: ::ffff:127.0.0.1 +port: 33452 +initial_user: default +initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef +initial_address: ::ffff:127.0.0.1 +initial_port: 33452 +interface: 1 +os_user: bharatnc +client_hostname: tower +client_name: ClickHouse +client_revision: 54437 +client_version_major: 20 +client_version_minor: 7 +client_version_patch: 2 +http_method: 0 +http_user_agent: +quota_key: +revision: 54440 +thread_ids: [] +ProfileEvents.Names: [] +ProfileEvents.Values: [] +Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage','allow_introspection_functions'] +Settings.Values: ['0','random','1','10000000000','1'] ``` + **Смотрите также** - [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса. diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index 11571f594d5..052baf98035 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -15,6 +15,7 @@ ClickHouse не удаляет данные из таблицы автомати - `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата завершения выполнения запроса потоком. - `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время завершения выполнения запроса потоком. +- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время завершения выполнения запроса потоком с точностью до микросекунд. - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность обработки запроса в миллисекундах. @@ -24,7 +25,7 @@ ClickHouse не удаляет данные из таблицы автомати - `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0. - `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — разница между выделенной и освобождённой памятью в контексте потока. - `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — максимальная разница между выделенной и освобождённой памятью в контексте потока. -- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Имя потока. +- `thread_name` ([String](../../sql-reference/data-types/string.md)) — имя потока. - `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — tid (ID потока операционной системы). - `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — tid (ID потока операционной системы) главного потока. - `query` ([String](../../sql-reference/data-types/string.md)) — текст запроса. @@ -56,56 +57,57 @@ ClickHouse не удаляет данные из таблицы автомати - `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`. - `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`). - `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse. -- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events). +- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events). - `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`. **Пример** ``` sql - SELECT * FROM system.query_thread_log LIMIT 1 FORMAT Vertical + SELECT * FROM system.query_thread_log LIMIT 1 \G ``` ``` text Row 1: ────── -event_date: 2020-05-13 -event_time: 2020-05-13 14:02:28 -query_start_time: 2020-05-13 14:02:28 -query_duration_ms: 0 -read_rows: 1 -read_bytes: 1 -written_rows: 0 -written_bytes: 0 -memory_usage: 0 -peak_memory_usage: 0 -thread_name: QueryPipelineEx -thread_id: 28952 -master_thread_id: 28924 -query: SELECT 1 -is_initial_query: 1 -user: default -query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a -address: ::ffff:127.0.0.1 -port: 57720 -initial_user: default -initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a -initial_address: ::ffff:127.0.0.1 -initial_port: 57720 -interface: 1 -os_user: bayonet -client_hostname: clickhouse.ru-central1.internal -client_name: ClickHouse client -client_revision: 54434 -client_version_major: 20 -client_version_minor: 4 -client_version_patch: 1 -http_method: 0 -http_user_agent: -quota_key: -revision: 54434 -ProfileEvents.Names: ['ContextLock','RealTimeMicroseconds','UserTimeMicroseconds','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds'] -ProfileEvents.Values: [1,97,81,5,81] -... +event_date: 2020-09-11 +event_time: 2020-09-11 10:08:17 +event_time_microseconds: 2020-09-11 10:08:17.134042 +query_start_time: 2020-09-11 10:08:17 +query_start_time_microseconds: 2020-09-11 10:08:17.063150 +query_duration_ms: 70 +read_rows: 0 +read_bytes: 0 +written_rows: 1 +written_bytes: 12 +memory_usage: 4300844 +peak_memory_usage: 4300844 +thread_name: TCPHandler +thread_id: 638133 +master_thread_id: 638133 +query: INSERT INTO test1 VALUES +is_initial_query: 1 +user: default +query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef +address: ::ffff:127.0.0.1 +port: 33452 +initial_user: default +initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef +initial_address: ::ffff:127.0.0.1 +initial_port: 33452 +interface: 1 +os_user: bharatnc +client_hostname: tower +client_name: ClickHouse +client_revision: 54437 +client_version_major: 20 +client_version_minor: 7 +client_version_patch: 2 +http_method: 0 +http_user_agent: +quota_key: +revision: 54440 +ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars'] +ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520] ``` **Смотрите также** diff --git a/docs/ru/operations/system-tables/text_log.md b/docs/ru/operations/system-tables/text_log.md index 01e34c914e6..141c3680c07 100644 --- a/docs/ru/operations/system-tables/text_log.md +++ b/docs/ru/operations/system-tables/text_log.md @@ -4,12 +4,13 @@ Столбцы: -- `event_date` (Date) — Дата создания записи. -- `event_time` (DateTime) — Время создания записи. -- `microseconds` (UInt32) — Время создания записи в микросекундах. -- `thread_name` (String) — Название потока, из которого была сделана запись. -- `thread_id` (UInt64) — Идентификатор потока ОС. -- `level` (Enum8) — Уровень логирования записи. Возможные значения: +- `event_date` (Date) — дата создания записи. +- `event_time` (DateTime) — время создания записи. +- `event_time_microseconds` (DateTime) — время создания записи с точностью до микросекунд. +- `microseconds` (UInt32) — время создания записи в микросекундах. +- `thread_name` (String) — название потока, из которого была сделана запись. +- `thread_id` (UInt64) — идентификатор потока ОС. +- `level` (Enum8) — уровень логирования записи. Возможные значения: - `1` или `'Fatal'`. - `2` или `'Critical'`. - `3` или `'Error'`. @@ -18,11 +19,35 @@ - `6` или `'Information'`. - `7` или `'Debug'`. - `8` или `'Trace'`. -- `query_id` (String) — Идентификатор запроса. -- `logger_name` (LowCardinality(String)) — Название логгера (`DDLWorker`). -- `message` (String) — Само тело записи. -- `revision` (UInt32) — Ревизия ClickHouse. -- `source_file` (LowCardinality(String)) — Исходный файл, из которого была сделана запись. -- `source_line` (UInt64) — Исходная строка, из которой была сделана запись. +- `query_id` (String) — идентификатор запроса. +- `logger_name` (LowCardinality(String)) — название логгера (`DDLWorker`). +- `message` (String) — само тело записи. +- `revision` (UInt32) — ревизия ClickHouse. +- `source_file` (LowCardinality(String)) — исходный файл, из которого была сделана запись. +- `source_line` (UInt64) — исходная строка, из которой была сделана запись. + +**Пример** + +``` sql +SELECT * FROM system.text_log LIMIT 1 \G +``` + +``` text +Row 1: +────── +event_date: 2020-09-10 +event_time: 2020-09-10 11:23:07 +event_time_microseconds: 2020-09-10 11:23:07.871397 +microseconds: 871397 +thread_name: clickhouse-serv +thread_id: 564917 +level: Information +query_id: +logger_name: DNSCacheUpdater +message: Update period 15 seconds +revision: 54440 +source_file: /ClickHouse/src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start() +source_line: 45 +``` [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/text_log) diff --git a/docs/ru/operations/system-tables/trace_log.md b/docs/ru/operations/system-tables/trace_log.md index 8bab8ff646c..3f0a16199d5 100644 --- a/docs/ru/operations/system-tables/trace_log.md +++ b/docs/ru/operations/system-tables/trace_log.md @@ -6,26 +6,28 @@ ClickHouse создает эту таблицу когда утсановлен Для анализа stack traces, используйте функции интроспекции `addressToLine`, `addressToSymbol` и `demangle`. -Колонки: +Столбцы: -- `event_date`([Date](../../sql-reference/data-types/date.md)) — Дата в момент снятия экземпляра стэка адресов вызова. +- `event_date`([Date](../../sql-reference/data-types/date.md)) — дата в момент снятия экземпляра стэка адресов вызова. -- `event_time`([DateTime](../../sql-reference/data-types/datetime.md)) — Дата и время в момент снятия экземпляра стэка адресов вызова. +- `event_time`([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время в момент снятия экземпляра стэка адресов вызова. + +- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — дата и время в момент снятия экземпляра стэка адресов вызова с точностью до микросекунд. - `revision`([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия сборки сервера ClickHouse. Во время соединения с сервером через `clickhouse-client`, вы видите строку похожую на `Connected to ClickHouse server version 19.18.1 revision 54429.`. Это поле содержит номер после `revision`, но не содержит строку после `version`. -- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — Тип таймера: +- `timer_type`([Enum8](../../sql-reference/data-types/enum.md)) — тип таймера: - `Real` означает wall-clock время. - `CPU` означает относительное CPU время. -- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — Идентификатор треда. +- `thread_number`([UInt32](../../sql-reference/data-types/int-uint.md)) — идентификатор треда. -- `query_id`([String](../../sql-reference/data-types/string.md)) — Идентификатор запроса который может быть использован для получения деталей о запросе из таблицы [query_log](query_log.md#system_tables-query_log) system table. +- `query_id`([String](../../sql-reference/data-types/string.md)) — идентификатор запроса который может быть использован для получения деталей о запросе из таблицы [query_log](query_log.md#system_tables-query_log) system table. -- `trace`([Array(UInt64)](../../sql-reference/data-types/array.md)) — Трассировка стека адресов вызова в момент семплирования. Каждый элемент массива это адрес виртуальной памяти внутри процесса сервера ClickHouse. +- `trace`([Array(UInt64)](../../sql-reference/data-types/array.md)) — трассировка стека адресов вызова в момент семплирования. Каждый элемент массива — это адрес виртуальной памяти внутри процесса сервера ClickHouse. **Пример** @@ -36,13 +38,16 @@ SELECT * FROM system.trace_log LIMIT 1 \G ``` text Row 1: ────── -event_date: 2019-11-15 -event_time: 2019-11-15 15:09:38 -revision: 54428 -timer_type: Real -thread_number: 48 -query_id: acc4d61f-5bd1-4a3e-bc91-2180be37c915 -trace: [94222141367858,94222152240175,94222152325351,94222152329944,94222152330796,94222151449980,94222144088167,94222151682763,94222144088167,94222151682763,94222144088167,94222144058283,94222144059248,94222091840750,94222091842302,94222091831228,94222189631488,140509950166747,140509942945935] +event_date: 2020-09-10 +event_time: 2020-09-10 11:23:09 +event_time_microseconds: 2020-09-10 11:23:09.872924 +timestamp_ns: 1599762189872924510 +revision: 54440 +trace_type: Memory +thread_id: 564963 +query_id: +trace: [371912858,371912789,371798468,371799717,371801313,371790250,624462773,566365041,566440261,566445834,566460071,566459914,566459842,566459580,566459469,566459389,566459341,566455774,371993941,371988245,372158848,372187428,372187309,372187093,372185478,140222123165193,140222122205443] +size: 5244400 ``` [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/system_tables/trace_log) diff --git a/docs/ru/sql-reference/statements/alter/sample-by.md b/docs/ru/sql-reference/statements/alter/sample-by.md new file mode 100644 index 00000000000..2235e345a45 --- /dev/null +++ b/docs/ru/sql-reference/statements/alter/sample-by.md @@ -0,0 +1,20 @@ +--- +toc_priority: 41 +toc_title: SAMPLE BY +--- + +# Manipulating Sampling-Key Expressions {#manipulations-with-sampling-key-expressions} + +Синтаксис: + +``` sql +ALTER TABLE [db].name [ON CLUSTER cluster] MODIFY SAMPLE BY new_expression +``` + +Команда меняет [ключ сэмплирования](../../../engines/table-engines/mergetree-family/mergetree.md) таблицы на `new_expression` (выражение или ряд выражений). + +Эта команда является упрощенной в том смысле, что она изменяет только метаданные. Первичный ключ должен содержать новый ключ сэмплирования. + +!!! note "Note" + Это работает только для таблиц в семействе [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) (включая +[реплицируемые](../../../engines/table-engines/mergetree-family/replication.md) таблицы). \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/drop.md b/docs/ru/sql-reference/statements/drop.md index 22e553cfdac..3a6ac22b071 100644 --- a/docs/ru/sql-reference/statements/drop.md +++ b/docs/ru/sql-reference/statements/drop.md @@ -5,106 +5,96 @@ toc_title: DROP # DROP {#drop} -Удаляет существующий объект. -Если указано `IF EXISTS` - не выдавать ошибку, если объекта не существует. +Удаляет существующий объект. Если указано `IF EXISTS` - не выдавать ошибку, если объекта не существует. ## DROP DATABASE {#drop-database} +Удаляет все таблицы в базе данных `db`, затем удаляет саму базу данных `db`. + +Синтаксис: + ``` sql DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] ``` -Удаляет все таблицы в базе данных db, затем удаляет саму базу данных db. - - ## DROP TABLE {#drop-table} +Удаляет таблицу. + +Синтаксис: + ``` sql DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -Удаляет таблицу. - - ## DROP DICTIONARY {#drop-dictionary} +Удаляет словарь. + +Синтаксис: + ``` sql DROP DICTIONARY [IF EXISTS] [db.]name ``` -Удаляет словарь. - - ## DROP USER {#drop-user-statement} Удаляет пользователя. -### Синтаксис {#drop-user-syntax} +Синтаксис: ```sql DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` - ## DROP ROLE {#drop-role-statement} -Удаляет роль. +Удаляет роль. При удалении роль отзывается у всех объектов системы доступа, которым она присвоена. -При удалении роль отзывается у всех объектов системы доступа, которым она присвоена. - -### Синтаксис {#drop-role-syntax} +Синтаксис: ```sql DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` - ## DROP ROW POLICY {#drop-row-policy-statement} -Удаляет политику доступа к строкам. +Удаляет политику доступа к строкам. При удалении политика отзывается у всех объектов системы доступа, которым она присвоена. -При удалении политика отзывается у всех объектов системы доступа, которым она присвоена. - -### Синтаксис {#drop-row-policy-syntax} +Синтаксис: ``` sql DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] ``` - ## DROP QUOTA {#drop-quota-statement} -Удаляет квоту. +Удаляет квоту. При удалении квота отзывается у всех объектов системы доступа, которым она присвоена. -При удалении квота отзывается у всех объектов системы доступа, которым она присвоена. - -### Синтаксис {#drop-quota-syntax} +Синтаксис: ``` sql DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` - ## DROP SETTINGS PROFILE {#drop-settings-profile-statement} -Удаляет профиль настроек. +Удаляет профиль настроек. При удалении профиль отзывается у всех объектов системы доступа, которым он присвоен. -При удалении профиль отзывается у всех объектов системы доступа, которым он присвоен. - -### Синтаксис {#drop-settings-profile-syntax} +Синтаксис: ``` sql DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] ``` - ## DROP VIEW {#drop-view} +Удаляет представление. Представления могут быть удалены и командой `DROP TABLE`, но команда `DROP VIEW` проверяет, что `[db.]name` является представлением. + +Синтаксис: + ``` sql DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` -Удаляет представление. Представления могут быть удалены и командой `DROP TABLE`, но команда `DROP VIEW` проверяет, что `[db.]name` является представлением. - - -[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/drop/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/drop/) \ No newline at end of file diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 8eea84ac594..69137095c4d 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -79,6 +79,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION - `ALTER RENAME COLUMN` - `ALTER INDEX` - `ALTER ORDER BY` + - `ALTER SAMPLE BY` - `ALTER ADD INDEX` - `ALTER DROP INDEX` - `ALTER MATERIALIZE INDEX` @@ -264,6 +265,7 @@ GRANT INSERT(x,y) ON db.table TO john - `ALTER RENAME COLUMN`. Уровень: `COLUMN`. Алиасы: `RENAME COLUMN` - `ALTER INDEX`. Уровень: `GROUP`. Алиасы: `INDEX` - `ALTER ORDER BY`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY ORDER BY`, `MODIFY ORDER BY` + - `ALTER SAMPLE BY`. Уровень: `TABLE`. Алиасы: `ALTER MODIFY SAMPLE BY`, `MODIFY SAMPLE BY` - `ALTER ADD INDEX`. Уровень: `TABLE`. Алиасы: `ADD INDEX` - `ALTER DROP INDEX`. Уровень: `TABLE`. Алиасы: `DROP INDEX` - `ALTER MATERIALIZE INDEX`. Уровень: `TABLE`. Алиасы: `MATERIALIZE INDEX` From 3151de1b4e7f2864362c5fe397b6fb17010ffce9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 26 Oct 2020 13:26:52 +0300 Subject: [PATCH 286/432] Update ColumnConst include. --- src/Columns/ColumnConst.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Columns/ColumnConst.cpp b/src/Columns/ColumnConst.cpp index b7fb22aeb0e..550a44a23a2 100644 --- a/src/Columns/ColumnConst.cpp +++ b/src/Columns/ColumnConst.cpp @@ -6,6 +6,8 @@ #include #include +#include + #if defined(MEMORY_SANITIZER) #include #endif From 9ec78855cded9eb5a84e7fed300ff7f8a4b497c2 Mon Sep 17 00:00:00 2001 From: Roman Bug Date: Mon, 26 Oct 2020 13:29:30 +0300 Subject: [PATCH 287/432] DOCSUP-2806: Add meta header in RU (#15801) * DOCSUP-2806: Add meta intro. * DOCSUP-2806: Update meta intro. * DOCSUP-2806: Fix meta. * DOCSUP-2806: Add quotes for meta headers. * DOCSUP-2806: Remove quotes from meta headers. * DOCSUP-2806: Add meta headers. * DOCSUP-2806: Fix quotes in meta headers. * DOCSUP-2806: Update meta headers. * DOCSUP-2806: Fix link to nowhere in EN. * DOCSUP-2806: Fix link (settings to tune) * DOCSUP-2806: Fix links. * DOCSUP-2806:Fix links EN * DOCSUP-2806: Fix build errors. * DOCSUP-2806: Fix meta intro. * DOCSUP-2806: Fix toc_priority in examples datasets TOC. * DOCSUP-2806: Fix items order in toc. * DOCSUP-2806: Fix order in toc. * DOCSUP-2806: Fix toc order. * DOCSUP-2806: Fix order in toc. * DOCSUP-2806: Fix toc index in create * DOCSUP-2806: Fix toc order in create. Co-authored-by: romanzhukov Co-authored-by: alexey-milovidov --- .../example-datasets/amplab-benchmark.md | 2 +- docs/en/getting-started/example-datasets/criteo.md | 2 +- docs/en/getting-started/example-datasets/index.md | 4 ++-- docs/en/getting-started/example-datasets/metrica.md | 2 +- docs/en/getting-started/example-datasets/nyc-taxi.md | 2 +- docs/en/getting-started/example-datasets/ontime.md | 2 +- .../getting-started/example-datasets/star-schema.md | 2 +- docs/en/getting-started/example-datasets/wikistat.md | 2 +- docs/en/sql-reference/functions/encoding-functions.md | 2 +- docs/en/sql-reference/operators/index.md | 2 +- docs/en/sql-reference/statements/alter/index.md | 2 +- docs/en/sql-reference/statements/attach.md | 2 +- docs/en/sql-reference/statements/check-table.md | 2 +- docs/en/sql-reference/statements/create/database.md | 2 +- docs/en/sql-reference/statements/create/dictionary.md | 2 +- docs/en/sql-reference/statements/create/index.md | 2 +- docs/en/sql-reference/statements/create/quota.md | 2 +- docs/en/sql-reference/statements/create/role.md | 2 +- docs/en/sql-reference/statements/create/row-policy.md | 2 +- .../statements/create/settings-profile.md | 2 +- docs/en/sql-reference/statements/create/table.md | 2 +- docs/en/sql-reference/statements/create/user.md | 2 +- docs/en/sql-reference/statements/create/view.md | 2 +- docs/en/sql-reference/statements/describe-table.md | 2 +- docs/en/sql-reference/statements/detach.md | 2 +- docs/en/sql-reference/statements/drop.md | 2 +- docs/en/sql-reference/statements/exists.md | 2 +- docs/en/sql-reference/statements/grant.md | 2 +- docs/en/sql-reference/statements/insert-into.md | 2 +- docs/en/sql-reference/statements/kill.md | 2 +- docs/en/sql-reference/statements/optimize.md | 2 +- docs/en/sql-reference/statements/rename.md | 2 +- docs/en/sql-reference/statements/revoke.md | 2 +- docs/en/sql-reference/statements/select/index.md | 2 +- docs/en/sql-reference/statements/set-role.md | 2 +- docs/en/sql-reference/statements/set.md | 2 +- docs/en/sql-reference/statements/show.md | 2 +- docs/en/sql-reference/statements/system.md | 2 +- docs/en/sql-reference/statements/truncate.md | 2 +- docs/en/sql-reference/statements/use.md | 2 +- docs/ru/commercial/cloud.md | 5 +++++ docs/ru/development/architecture.md | 5 +++++ docs/ru/development/browse-code.md | 6 ++++++ docs/ru/development/contrib.md | 6 ++++++ docs/ru/development/developer-instruction.md | 5 +++++ docs/ru/development/style.md | 6 ++++++ docs/ru/engines/database-engines/index.md | 7 +++++++ docs/ru/engines/database-engines/lazy.md | 5 +++++ docs/ru/engines/database-engines/mysql.md | 5 +++++ docs/ru/engines/index.md | 6 +++--- docs/ru/engines/table-engines/index.md | 7 +++++++ docs/ru/engines/table-engines/integrations/hdfs.md | 5 +++++ docs/ru/engines/table-engines/integrations/index.md | 2 +- docs/ru/engines/table-engines/integrations/jdbc.md | 5 +++++ docs/ru/engines/table-engines/integrations/kafka.md | 5 +++++ docs/ru/engines/table-engines/integrations/mysql.md | 5 +++++ docs/ru/engines/table-engines/integrations/odbc.md | 5 +++++ docs/ru/engines/table-engines/log-family/index.md | 4 ++-- docs/ru/engines/table-engines/log-family/log.md | 5 +++++ docs/ru/engines/table-engines/log-family/stripelog.md | 5 +++++ docs/ru/engines/table-engines/log-family/tinylog.md | 5 +++++ .../mergetree-family/aggregatingmergetree.md | 5 +++++ .../mergetree-family/collapsingmergetree.md | 5 +++++ .../mergetree-family/custom-partitioning-key.md | 6 ++++++ .../mergetree-family/graphitemergetree.md | 5 +++++ .../engines/table-engines/mergetree-family/index.md | 3 +-- .../mergetree-family/replacingmergetree.md | 5 +++++ .../table-engines/mergetree-family/replication.md | 5 +++++ .../mergetree-family/summingmergetree.md | 5 +++++ .../mergetree-family/versionedcollapsingmergetree.md | 5 +++++ docs/ru/engines/table-engines/special/buffer.md | 5 +++++ docs/ru/engines/table-engines/special/dictionary.md | 5 +++++ docs/ru/engines/table-engines/special/distributed.md | 5 +++++ .../ru/engines/table-engines/special/external-data.md | 5 +++++ docs/ru/engines/table-engines/special/file.md | 5 +++++ docs/ru/engines/table-engines/special/index.md | 2 +- docs/ru/engines/table-engines/special/join.md | 5 +++++ .../engines/table-engines/special/materializedview.md | 5 +++++ docs/ru/engines/table-engines/special/memory.md | 5 +++++ docs/ru/engines/table-engines/special/merge.md | 5 +++++ docs/ru/engines/table-engines/special/null.md | 5 ++++- docs/ru/engines/table-engines/special/set.md | 5 +++++ docs/ru/engines/table-engines/special/url.md | 5 +++++ docs/ru/engines/table-engines/special/view.md | 5 +++++ docs/ru/faq/index.md | 2 +- .../example-datasets/amplab-benchmark.md | 5 +++++ docs/ru/getting-started/example-datasets/criteo.md | 5 +++++ docs/ru/getting-started/example-datasets/index.md | 7 +++---- docs/ru/getting-started/example-datasets/metrica.md | 5 +++++ docs/ru/getting-started/example-datasets/nyc-taxi.md | 5 +++++ docs/ru/getting-started/example-datasets/ontime.md | 5 +++++ .../getting-started/example-datasets/star-schema.md | 5 +++++ docs/ru/getting-started/example-datasets/wikistat.md | 5 +++++ docs/ru/getting-started/index.md | 3 +-- docs/ru/getting-started/install.md | 5 +++++ docs/ru/getting-started/playground.md | 5 +++++ docs/ru/guides/apply-catboost-model.md | 5 +++++ docs/ru/interfaces/cli.md | 5 +++++ docs/ru/interfaces/cpp.md | 5 +++++ docs/ru/interfaces/formats.md | 5 +++++ docs/ru/interfaces/http.md | 5 +++++ docs/ru/interfaces/jdbc.md | 5 +++++ docs/ru/interfaces/mysql.md | 5 +++++ docs/ru/interfaces/odbc.md | 6 ++++++ docs/ru/interfaces/tcp.md | 5 +++++ docs/ru/interfaces/third-party/client-libraries.md | 5 +++++ docs/ru/interfaces/third-party/gui.md | 6 ++++++ docs/ru/interfaces/third-party/index.md | 3 +-- docs/ru/interfaces/third-party/integrations.md | 5 +++++ docs/ru/interfaces/third-party/proxy.md | 5 +++++ docs/ru/introduction/distinctive-features.md | 5 +++++ docs/ru/introduction/history.md | 6 ++++++ docs/ru/introduction/performance.md | 5 +++++ docs/ru/operations/access-rights.md | 5 +++++ docs/ru/operations/backup.md | 5 +++++ docs/ru/operations/configuration-files.md | 6 ++++++ docs/ru/operations/index.md | 2 +- docs/ru/operations/monitoring.md | 5 +++++ docs/ru/operations/quotas.md | 5 +++++ docs/ru/operations/requirements.md | 5 +++++ .../server-configuration-parameters/settings.md | 5 +++++ .../ru/operations/settings/constraints-on-settings.md | 5 +++++ docs/ru/operations/settings/index.md | 4 ++-- .../ru/operations/settings/permissions-for-queries.md | 5 +++++ docs/ru/operations/settings/query-complexity.md | 5 +++++ docs/ru/operations/settings/settings-profiles.md | 5 +++++ docs/ru/operations/settings/settings-users.md | 5 +++++ docs/ru/operations/settings/settings.md | 2 +- docs/ru/operations/system-tables/index.md | 2 +- docs/ru/operations/tips.md | 5 +++++ docs/ru/operations/troubleshooting.md | 5 +++++ docs/ru/operations/update.md | 5 +++++ docs/ru/operations/utilities/clickhouse-copier.md | 5 +++++ docs/ru/operations/utilities/clickhouse-local.md | 5 +++++ docs/ru/operations/utilities/index.md | 6 ++++++ .../sql-reference/aggregate-functions/combinators.md | 6 ++++++ .../aggregate-functions/parametric-functions.md | 5 +++++ docs/ru/sql-reference/data-types/boolean.md | 5 +++++ docs/ru/sql-reference/data-types/date.md | 5 +++++ docs/ru/sql-reference/data-types/datetime.md | 5 +++++ docs/ru/sql-reference/data-types/datetime64.md | 5 +++++ docs/ru/sql-reference/data-types/decimal.md | 5 +++++ docs/ru/sql-reference/data-types/domains/index.md | 4 ++-- docs/ru/sql-reference/data-types/domains/ipv4.md | 5 +++++ docs/ru/sql-reference/data-types/domains/ipv6.md | 5 +++++ docs/ru/sql-reference/data-types/enum.md | 5 +++++ docs/ru/sql-reference/data-types/fixedstring.md | 5 +++++ docs/ru/sql-reference/data-types/float.md | 5 +++++ docs/ru/sql-reference/data-types/int-uint.md | 5 +++++ .../data-types/nested-data-structures/index.md | 7 +++++++ .../data-types/special-data-types/expression.md | 5 +++++ .../data-types/special-data-types/index.md | 7 +++++++ .../data-types/special-data-types/interval.md | 5 +++++ .../data-types/special-data-types/nothing.md | 5 +++++ .../data-types/special-data-types/set.md | 5 +++++ docs/ru/sql-reference/data-types/string.md | 5 +++++ docs/ru/sql-reference/data-types/uuid.md | 6 ++++++ .../external-dicts-dict-hierarchical.md | 5 +++++ .../external-dicts-dict-layout.md | 5 +++++ .../external-dicts-dict-lifetime.md | 5 +++++ .../external-dicts-dict-sources.md | 5 +++++ .../external-dicts-dict-structure.md | 5 +++++ .../external-dictionaries/external-dicts-dict.md | 5 +++++ .../external-dictionaries/external-dicts.md | 6 ++++++ .../dictionaries/external-dictionaries/index.md | 2 +- docs/ru/sql-reference/dictionaries/index.md | 6 ++++++ docs/ru/sql-reference/dictionaries/internal-dicts.md | 5 +++++ docs/ru/sql-reference/distributed-ddl.md | 2 +- .../sql-reference/functions/arithmetic-functions.md | 5 +++++ docs/ru/sql-reference/functions/array-functions.md | 5 +++++ docs/ru/sql-reference/functions/array-join.md | 5 +++++ docs/ru/sql-reference/functions/bit-functions.md | 5 +++++ docs/ru/sql-reference/functions/bitmap-functions.md | 7 ++++++- .../sql-reference/functions/comparison-functions.md | 5 +++++ .../sql-reference/functions/conditional-functions.md | 5 +++++ .../ru/sql-reference/functions/date-time-functions.md | 5 +++++ docs/ru/sql-reference/functions/encoding-functions.md | 5 +++++ docs/ru/sql-reference/functions/ext-dict-functions.md | 5 +++++ .../ru/sql-reference/functions/functions-for-nulls.md | 5 +++++ docs/ru/sql-reference/functions/geo/coordinates.md | 3 ++- docs/ru/sql-reference/functions/geo/geohash.md | 2 +- docs/ru/sql-reference/functions/geo/h3.md | 2 +- docs/ru/sql-reference/functions/geo/index.md | 2 +- docs/ru/sql-reference/functions/hash-functions.md | 5 +++++ docs/ru/sql-reference/functions/in-functions.md | 5 +++++ docs/ru/sql-reference/functions/index.md | 6 ++++++ docs/ru/sql-reference/functions/introspection.md | 5 +++++ .../sql-reference/functions/ip-address-functions.md | 5 +++++ docs/ru/sql-reference/functions/json-functions.md | 5 +++++ docs/ru/sql-reference/functions/logical-functions.md | 5 +++++ .../functions/machine-learning-functions.md | 5 +++++ docs/ru/sql-reference/functions/math-functions.md | 5 +++++ docs/ru/sql-reference/functions/other-functions.md | 11 ++++++++--- docs/ru/sql-reference/functions/random-functions.md | 5 +++++ docs/ru/sql-reference/functions/rounding-functions.md | 5 +++++ .../functions/splitting-merging-functions.md | 5 +++++ docs/ru/sql-reference/functions/string-functions.md | 5 +++++ .../functions/string-replace-functions.md | 5 +++++ .../functions/string-search-functions.md | 5 +++++ .../functions/type-conversion-functions.md | 5 +++++ docs/ru/sql-reference/functions/url-functions.md | 5 +++++ docs/ru/sql-reference/functions/uuid-functions.md | 5 +++++ docs/ru/sql-reference/functions/ym-dict-functions.md | 5 +++++ docs/ru/sql-reference/operators/index.md | 5 +++++ docs/ru/sql-reference/statements/alter/column.md | 2 +- docs/ru/sql-reference/statements/alter/constraint.md | 2 +- docs/ru/sql-reference/statements/alter/index.md | 2 +- docs/ru/sql-reference/statements/alter/index/index.md | 2 +- docs/ru/sql-reference/statements/attach.md | 2 +- docs/ru/sql-reference/statements/check-table.md | 2 +- docs/ru/sql-reference/statements/create/database.md | 4 ++-- docs/ru/sql-reference/statements/create/dictionary.md | 4 ++-- docs/ru/sql-reference/statements/create/index.md | 4 ++-- docs/ru/sql-reference/statements/create/quota.md | 4 ++-- docs/ru/sql-reference/statements/create/role.md | 4 ++-- docs/ru/sql-reference/statements/create/row-policy.md | 4 ++-- .../statements/create/settings-profile.md | 4 ++-- docs/ru/sql-reference/statements/create/table.md | 4 ++-- docs/ru/sql-reference/statements/create/user.md | 4 ++-- docs/ru/sql-reference/statements/create/view.md | 4 ++-- docs/ru/sql-reference/statements/describe-table.md | 2 +- docs/ru/sql-reference/statements/detach.md | 2 +- docs/ru/sql-reference/statements/drop.md | 2 +- docs/ru/sql-reference/statements/exists.md | 2 +- docs/ru/sql-reference/statements/grant.md | 5 +++++ docs/ru/sql-reference/statements/insert-into.md | 2 +- docs/ru/sql-reference/statements/kill.md | 2 +- docs/ru/sql-reference/statements/optimize.md | 2 +- docs/ru/sql-reference/statements/rename.md | 2 +- docs/ru/sql-reference/statements/revoke.md | 5 +++++ docs/ru/sql-reference/statements/select/array-join.md | 4 ++++ docs/ru/sql-reference/statements/select/distinct.md | 4 ++++ docs/ru/sql-reference/statements/select/format.md | 4 ++++ docs/ru/sql-reference/statements/select/from.md | 4 ++++ docs/ru/sql-reference/statements/select/group-by.md | 4 ++++ docs/ru/sql-reference/statements/select/having.md | 4 ++++ docs/ru/sql-reference/statements/select/index.md | 6 ++++-- .../sql-reference/statements/select/into-outfile.md | 4 ++++ docs/ru/sql-reference/statements/select/join.md | 4 ++++ docs/ru/sql-reference/statements/select/limit-by.md | 4 ++++ docs/ru/sql-reference/statements/select/prewhere.md | 4 ++++ docs/ru/sql-reference/statements/select/sample.md | 4 ++++ docs/ru/sql-reference/statements/select/union-all.md | 4 ++++ docs/ru/sql-reference/statements/select/where.md | 4 ++++ docs/ru/sql-reference/statements/select/with.md | 4 ++++ docs/ru/sql-reference/statements/set-role.md | 2 +- docs/ru/sql-reference/statements/set.md | 2 +- docs/ru/sql-reference/statements/show.md | 5 +++++ docs/ru/sql-reference/statements/system.md | 5 +++++ docs/ru/sql-reference/statements/truncate.md | 2 +- docs/ru/sql-reference/statements/use.md | 2 +- docs/ru/sql-reference/syntax.md | 5 +++++ docs/ru/sql-reference/table-functions/file.md | 5 +++++ docs/ru/sql-reference/table-functions/generate.md | 5 +++++ docs/ru/sql-reference/table-functions/hdfs.md | 5 +++++ docs/ru/sql-reference/table-functions/input.md | 5 +++++ docs/ru/sql-reference/table-functions/jdbc.md | 5 +++++ docs/ru/sql-reference/table-functions/merge.md | 5 +++++ docs/ru/sql-reference/table-functions/mysql.md | 5 +++++ docs/ru/sql-reference/table-functions/numbers.md | 5 +++++ docs/ru/sql-reference/table-functions/odbc.md | 5 +++++ docs/ru/sql-reference/table-functions/remote.md | 5 +++++ docs/ru/sql-reference/table-functions/url.md | 5 +++++ docs/ru/whats-new/index.md | 2 +- docs/ru/whats-new/security-changelog.md | 5 +++++ 265 files changed, 998 insertions(+), 118 deletions(-) diff --git a/docs/en/getting-started/example-datasets/amplab-benchmark.md b/docs/en/getting-started/example-datasets/amplab-benchmark.md index 1b740f6194c..27ddfd27f78 100644 --- a/docs/en/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/en/getting-started/example-datasets/amplab-benchmark.md @@ -1,5 +1,5 @@ --- -toc_priority: 17 +toc_priority: 19 toc_title: AMPLab Big Data Benchmark --- diff --git a/docs/en/getting-started/example-datasets/criteo.md b/docs/en/getting-started/example-datasets/criteo.md index a38a1b14210..261d1606fa4 100644 --- a/docs/en/getting-started/example-datasets/criteo.md +++ b/docs/en/getting-started/example-datasets/criteo.md @@ -1,5 +1,5 @@ --- -toc_priority: 19 +toc_priority: 18 toc_title: Terabyte Click Logs from Criteo --- diff --git a/docs/en/getting-started/example-datasets/index.md b/docs/en/getting-started/example-datasets/index.md index eedf4196b23..35ac90f9beb 100644 --- a/docs/en/getting-started/example-datasets/index.md +++ b/docs/en/getting-started/example-datasets/index.md @@ -1,6 +1,6 @@ --- toc_folder_title: Example Datasets -toc_priority: 15 +toc_priority: 14 toc_title: Introduction --- @@ -18,4 +18,4 @@ The list of documented datasets: - [New York Taxi Data](../../getting-started/example-datasets/nyc-taxi.md) - [OnTime](../../getting-started/example-datasets/ontime.md) -[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) +[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) \ No newline at end of file diff --git a/docs/en/getting-started/example-datasets/metrica.md b/docs/en/getting-started/example-datasets/metrica.md index 4131dca78fe..b036973b255 100644 --- a/docs/en/getting-started/example-datasets/metrica.md +++ b/docs/en/getting-started/example-datasets/metrica.md @@ -1,5 +1,5 @@ --- -toc_priority: 14 +toc_priority: 15 toc_title: Yandex.Metrica Data --- diff --git a/docs/en/getting-started/example-datasets/nyc-taxi.md b/docs/en/getting-started/example-datasets/nyc-taxi.md index a180f19be8a..9b9a12ba724 100644 --- a/docs/en/getting-started/example-datasets/nyc-taxi.md +++ b/docs/en/getting-started/example-datasets/nyc-taxi.md @@ -1,5 +1,5 @@ --- -toc_priority: 16 +toc_priority: 20 toc_title: New York Taxi Data --- diff --git a/docs/en/getting-started/example-datasets/ontime.md b/docs/en/getting-started/example-datasets/ontime.md index 91b6913e223..c2c8d5d930e 100644 --- a/docs/en/getting-started/example-datasets/ontime.md +++ b/docs/en/getting-started/example-datasets/ontime.md @@ -1,5 +1,5 @@ --- -toc_priority: 15 +toc_priority: 21 toc_title: OnTime --- diff --git a/docs/en/getting-started/example-datasets/star-schema.md b/docs/en/getting-started/example-datasets/star-schema.md index 7940807ee49..8a019e7ded3 100644 --- a/docs/en/getting-started/example-datasets/star-schema.md +++ b/docs/en/getting-started/example-datasets/star-schema.md @@ -1,5 +1,5 @@ --- -toc_priority: 20 +toc_priority: 16 toc_title: Star Schema Benchmark --- diff --git a/docs/en/getting-started/example-datasets/wikistat.md b/docs/en/getting-started/example-datasets/wikistat.md index 874af8c9423..619711582f4 100644 --- a/docs/en/getting-started/example-datasets/wikistat.md +++ b/docs/en/getting-started/example-datasets/wikistat.md @@ -1,5 +1,5 @@ --- -toc_priority: 18 +toc_priority: 17 toc_title: WikiStat --- diff --git a/docs/en/sql-reference/functions/encoding-functions.md b/docs/en/sql-reference/functions/encoding-functions.md index 44008fbbcbe..bc3f5ca4345 100644 --- a/docs/en/sql-reference/functions/encoding-functions.md +++ b/docs/en/sql-reference/functions/encoding-functions.md @@ -6,7 +6,7 @@ toc_title: Encoding # Encoding Functions {#encoding-functions} ## char {#char} - + Returns the string with the length as the number of passed arguments and each byte has the value of corresponding argument. Accepts multiple arguments of numeric types. If the value of argument is out of range of UInt8 data type, it is converted to UInt8 with possible rounding and overflow. **Syntax** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index 3fe3384fffc..e5554b58e4a 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -1,5 +1,5 @@ --- -toc_priority: 37 +toc_priority: 38 toc_title: Operators --- diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 60b7375f76d..30603122096 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -1,5 +1,5 @@ --- -toc_priority: 36 +toc_priority: 35 toc_title: ALTER --- diff --git a/docs/en/sql-reference/statements/attach.md b/docs/en/sql-reference/statements/attach.md index 6260e15cfa9..035441ef5f1 100644 --- a/docs/en/sql-reference/statements/attach.md +++ b/docs/en/sql-reference/statements/attach.md @@ -1,5 +1,5 @@ --- -toc_priority: 42 +toc_priority: 40 toc_title: ATTACH --- diff --git a/docs/en/sql-reference/statements/check-table.md b/docs/en/sql-reference/statements/check-table.md index b55955eeb40..450447acaf8 100644 --- a/docs/en/sql-reference/statements/check-table.md +++ b/docs/en/sql-reference/statements/check-table.md @@ -1,5 +1,5 @@ --- -toc_priority: 43 +toc_priority: 41 toc_title: CHECK --- diff --git a/docs/en/sql-reference/statements/create/database.md b/docs/en/sql-reference/statements/create/database.md index e874672471d..bdb31d44b0b 100644 --- a/docs/en/sql-reference/statements/create/database.md +++ b/docs/en/sql-reference/statements/create/database.md @@ -1,5 +1,5 @@ --- -toc_priority: 1 +toc_priority: 35 toc_title: DATABASE --- diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index 5313c59396e..b1098c54703 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -1,5 +1,5 @@ --- -toc_priority: 4 +toc_priority: 38 toc_title: DICTIONARY --- diff --git a/docs/en/sql-reference/statements/create/index.md b/docs/en/sql-reference/statements/create/index.md index 71ace2b664c..902a4348bac 100644 --- a/docs/en/sql-reference/statements/create/index.md +++ b/docs/en/sql-reference/statements/create/index.md @@ -1,6 +1,6 @@ --- toc_folder_title: CREATE -toc_priority: 35 +toc_priority: 34 toc_title: Overview --- diff --git a/docs/en/sql-reference/statements/create/quota.md b/docs/en/sql-reference/statements/create/quota.md index 7919dc0f6db..29752050b69 100644 --- a/docs/en/sql-reference/statements/create/quota.md +++ b/docs/en/sql-reference/statements/create/quota.md @@ -1,5 +1,5 @@ --- -toc_priority: 8 +toc_priority: 42 toc_title: QUOTA --- diff --git a/docs/en/sql-reference/statements/create/role.md b/docs/en/sql-reference/statements/create/role.md index 8a682875d63..19db2b79b61 100644 --- a/docs/en/sql-reference/statements/create/role.md +++ b/docs/en/sql-reference/statements/create/role.md @@ -1,5 +1,5 @@ --- -toc_priority: 6 +toc_priority: 40 toc_title: ROLE --- diff --git a/docs/en/sql-reference/statements/create/row-policy.md b/docs/en/sql-reference/statements/create/row-policy.md index b1e7c8e9006..9f8b6b87d3b 100644 --- a/docs/en/sql-reference/statements/create/row-policy.md +++ b/docs/en/sql-reference/statements/create/row-policy.md @@ -1,5 +1,5 @@ --- -toc_priority: 7 +toc_priority: 41 toc_title: ROW POLICY --- diff --git a/docs/en/sql-reference/statements/create/settings-profile.md b/docs/en/sql-reference/statements/create/settings-profile.md index 6fcd1d4e840..196c336bb4d 100644 --- a/docs/en/sql-reference/statements/create/settings-profile.md +++ b/docs/en/sql-reference/statements/create/settings-profile.md @@ -1,5 +1,5 @@ --- -toc_priority: 9 +toc_priority: 43 toc_title: SETTINGS PROFILE --- diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 9aecc6c07f7..35780856ef2 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -1,5 +1,5 @@ --- -toc_priority: 2 +toc_priority: 36 toc_title: TABLE --- diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 98c29363102..273882fd639 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -1,5 +1,5 @@ --- -toc_priority: 5 +toc_priority: 39 toc_title: USER --- diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index dc1200e5979..17a6c26c084 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -1,5 +1,5 @@ --- -toc_priority: 3 +toc_priority: 37 toc_title: VIEW --- diff --git a/docs/en/sql-reference/statements/describe-table.md b/docs/en/sql-reference/statements/describe-table.md index b7ffec1db07..bc197bf0f72 100644 --- a/docs/en/sql-reference/statements/describe-table.md +++ b/docs/en/sql-reference/statements/describe-table.md @@ -1,5 +1,5 @@ --- -toc_priority: 44 +toc_priority: 42 toc_title: DESCRIBE --- diff --git a/docs/en/sql-reference/statements/detach.md b/docs/en/sql-reference/statements/detach.md index 0d236c132df..62a7c0cc1e0 100644 --- a/docs/en/sql-reference/statements/detach.md +++ b/docs/en/sql-reference/statements/detach.md @@ -1,5 +1,5 @@ --- -toc_priority: 45 +toc_priority: 43 toc_title: DETACH --- diff --git a/docs/en/sql-reference/statements/drop.md b/docs/en/sql-reference/statements/drop.md index 07761b6b821..4317a20419e 100644 --- a/docs/en/sql-reference/statements/drop.md +++ b/docs/en/sql-reference/statements/drop.md @@ -1,5 +1,5 @@ --- -toc_priority: 46 +toc_priority: 44 toc_title: DROP --- diff --git a/docs/en/sql-reference/statements/exists.md b/docs/en/sql-reference/statements/exists.md index 2e8072125c7..3b0f4b66343 100644 --- a/docs/en/sql-reference/statements/exists.md +++ b/docs/en/sql-reference/statements/exists.md @@ -1,5 +1,5 @@ --- -toc_priority: 47 +toc_priority: 45 toc_title: EXISTS --- diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index c3e18ca3c75..f3829de2fbb 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -1,5 +1,5 @@ --- -toc_priority: 39 +toc_priority: 38 toc_title: GRANT --- diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index a2afbb2e10a..b49314a1785 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -1,5 +1,5 @@ --- -toc_priority: 34 +toc_priority: 33 toc_title: INSERT INTO --- diff --git a/docs/en/sql-reference/statements/kill.md b/docs/en/sql-reference/statements/kill.md index 1e2d2e925f7..d3f2d9bb5c6 100644 --- a/docs/en/sql-reference/statements/kill.md +++ b/docs/en/sql-reference/statements/kill.md @@ -1,5 +1,5 @@ --- -toc_priority: 48 +toc_priority: 46 toc_title: KILL --- diff --git a/docs/en/sql-reference/statements/optimize.md b/docs/en/sql-reference/statements/optimize.md index 374ef926d6c..a67f282e793 100644 --- a/docs/en/sql-reference/statements/optimize.md +++ b/docs/en/sql-reference/statements/optimize.md @@ -1,5 +1,5 @@ --- -toc_priority: 49 +toc_priority: 47 toc_title: OPTIMIZE --- diff --git a/docs/en/sql-reference/statements/rename.md b/docs/en/sql-reference/statements/rename.md index 315aaac40b5..4f14ad016a3 100644 --- a/docs/en/sql-reference/statements/rename.md +++ b/docs/en/sql-reference/statements/rename.md @@ -1,5 +1,5 @@ --- -toc_priority: 50 +toc_priority: 48 toc_title: RENAME --- diff --git a/docs/en/sql-reference/statements/revoke.md b/docs/en/sql-reference/statements/revoke.md index 8269c1f34a9..71a76546096 100644 --- a/docs/en/sql-reference/statements/revoke.md +++ b/docs/en/sql-reference/statements/revoke.md @@ -1,5 +1,5 @@ --- -toc_priority: 40 +toc_priority: 39 toc_title: REVOKE --- diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index 07be8c2bf45..3107f791eb9 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -1,7 +1,7 @@ --- title: SELECT Query toc_folder_title: SELECT -toc_priority: 33 +toc_priority: 32 toc_title: Overview --- diff --git a/docs/en/sql-reference/statements/set-role.md b/docs/en/sql-reference/statements/set-role.md index 564c8ec859f..cf14a9c6d75 100644 --- a/docs/en/sql-reference/statements/set-role.md +++ b/docs/en/sql-reference/statements/set-role.md @@ -1,5 +1,5 @@ --- -toc_priority: 52 +toc_priority: 51 toc_title: SET ROLE --- diff --git a/docs/en/sql-reference/statements/set.md b/docs/en/sql-reference/statements/set.md index 4a5bbf0baf6..c6c8d28257d 100644 --- a/docs/en/sql-reference/statements/set.md +++ b/docs/en/sql-reference/statements/set.md @@ -1,5 +1,5 @@ --- -toc_priority: 51 +toc_priority: 49 toc_title: SET --- diff --git a/docs/en/sql-reference/statements/show.md b/docs/en/sql-reference/statements/show.md index 81aca1261de..e1f2ef3488a 100644 --- a/docs/en/sql-reference/statements/show.md +++ b/docs/en/sql-reference/statements/show.md @@ -1,5 +1,5 @@ --- -toc_priority: 38 +toc_priority: 37 toc_title: SHOW --- diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 8230177f249..75303fde19e 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -1,5 +1,5 @@ --- -toc_priority: 37 +toc_priority: 36 toc_title: SYSTEM --- diff --git a/docs/en/sql-reference/statements/truncate.md b/docs/en/sql-reference/statements/truncate.md index a8cf802616f..a13936c88ab 100644 --- a/docs/en/sql-reference/statements/truncate.md +++ b/docs/en/sql-reference/statements/truncate.md @@ -1,5 +1,5 @@ --- -toc_priority: 53 +toc_priority: 52 toc_title: TRUNCATE --- diff --git a/docs/en/sql-reference/statements/use.md b/docs/en/sql-reference/statements/use.md index 2932542f052..841c23d333d 100644 --- a/docs/en/sql-reference/statements/use.md +++ b/docs/en/sql-reference/statements/use.md @@ -1,5 +1,5 @@ --- -toc_priority: 54 +toc_priority: 53 toc_title: USE --- diff --git a/docs/ru/commercial/cloud.md b/docs/ru/commercial/cloud.md index 6cbc0e6743b..e6b0309c456 100644 --- a/docs/ru/commercial/cloud.md +++ b/docs/ru/commercial/cloud.md @@ -1,3 +1,8 @@ +--- +toc_priority: 1 +toc_title: "\u041f\u043e\u0441\u0442\u0430\u0432\u0449\u0438\u043a\u0438\u0020\u043e\u0431\u043b\u0430\u0447\u043d\u044b\u0445\u0020\u0443\u0441\u043b\u0443\u0433\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + # Поставщики облачных услуг ClickHouse {#clickhouse-cloud-service-providers} !!! info "Инфо" diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index e66b20f764a..0d8ddb6f795 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -1,3 +1,8 @@ +--- +toc_priority: 62 +toc_title: "\u041e\u0431\u0437\u043e\u0440\u0020\u0430\u0440\u0445\u0438\u0442\u0435\u043a\u0442\u0443\u0440\u044b\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + # Обзор архитектуры ClickHouse {#overview-of-clickhouse-architecture} ClickHouse - полноценная колоночная СУБД. Данные хранятся в колонках, а в процессе обработки - в массивах (векторах или фрагментах (chunk’ах) колонок). По возможности операции выполняются на массивах, а не на индивидуальных значениях. Это называется “векторизованное выполнения запросов” (vectorized query execution), и помогает снизить стоимость фактической обработки данных. diff --git a/docs/ru/development/browse-code.md b/docs/ru/development/browse-code.md index 814b213a6a7..ac17cf0e6f5 100644 --- a/docs/ru/development/browse-code.md +++ b/docs/ru/development/browse-code.md @@ -1,3 +1,9 @@ +--- +toc_priority: 71 +toc_title: "\u041d\u0430\u0432\u0438\u0433\u0430\u0446\u0438\u044f\u0020\u043f\u043e\u0020\u043a\u043e\u0434\u0443\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + + # Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse} Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.tech/codebrowser/html_report///ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно. diff --git a/docs/ru/development/contrib.md b/docs/ru/development/contrib.md index c640ecee79d..e65ab4819e8 100644 --- a/docs/ru/development/contrib.md +++ b/docs/ru/development/contrib.md @@ -1,3 +1,9 @@ +--- +toc_priority: 70 +toc_title: "\u0418\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u043c\u044b\u0435\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0435\u0020\u0431\u0438\u0431\u043b\u0438\u043e\u0442\u0435\u043a\u0438" +--- + + # Используемые сторонние библиотеки {#ispolzuemye-storonnie-biblioteki} | Библиотека | Лицензия | diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 9edcd4cf9f9..b0c84d9de7c 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -1,3 +1,8 @@ +--- +toc_priority: 61 +toc_title: "\u0418\u043d\u0441\u0442\u0440\u0443\u043a\u0446\u0438\u044f\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432" +--- + # Инструкция для разработчиков Сборка ClickHouse поддерживается на Linux, FreeBSD, Mac OS X. diff --git a/docs/ru/development/style.md b/docs/ru/development/style.md index 951537e8113..671293a7bbd 100644 --- a/docs/ru/development/style.md +++ b/docs/ru/development/style.md @@ -1,3 +1,9 @@ +--- +toc_priority: 68 +toc_title: "\u041a\u0430\u043a\u0020\u043f\u0438\u0441\u0430\u0442\u044c\u0020\u043a\u043e\u0434\u0020\u043d\u0430\u0020\u0043\u002b\u002b" +--- + + # Как писать код на C++ {#kak-pisat-kod-na-c} ## Общее {#obshchee} diff --git a/docs/ru/engines/database-engines/index.md b/docs/ru/engines/database-engines/index.md index 79be85518e2..d3dd729e302 100644 --- a/docs/ru/engines/database-engines/index.md +++ b/docs/ru/engines/database-engines/index.md @@ -1,3 +1,10 @@ +--- +toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0431\u0430\u0437\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +toc_priority: 27 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" +--- + + # Движки баз данных {#dvizhki-baz-dannykh} Движки баз данных обеспечивают работу с таблицами. diff --git a/docs/ru/engines/database-engines/lazy.md b/docs/ru/engines/database-engines/lazy.md index c1538ec91bc..c01aae0284e 100644 --- a/docs/ru/engines/database-engines/lazy.md +++ b/docs/ru/engines/database-engines/lazy.md @@ -1,3 +1,8 @@ +--- +toc_priority: 31 +toc_title: Lazy +--- + # Lazy {#lazy} Сохраняет таблицы только в оперативной памяти `expiration_time_in_seconds` через несколько секунд после последнего доступа. Может использоваться только с таблицами \*Log. diff --git a/docs/ru/engines/database-engines/mysql.md b/docs/ru/engines/database-engines/mysql.md index e293cb6ec77..2f8335de73a 100644 --- a/docs/ru/engines/database-engines/mysql.md +++ b/docs/ru/engines/database-engines/mysql.md @@ -1,3 +1,8 @@ +--- +toc_priority: 30 +toc_title: MySQL +--- + # MySQL {#mysql} Позволяет подключаться к базам данных на удалённом MySQL сервере и выполнять запросы `INSERT` и `SELECT` для обмена данными между ClickHouse и MySQL. diff --git a/docs/ru/engines/index.md b/docs/ru/engines/index.md index 333e65dcb7b..28ccc8bcfe6 100644 --- a/docs/ru/engines/index.md +++ b/docs/ru/engines/index.md @@ -1,6 +1,6 @@ --- -toc_folder_title: "\u0414\u0432\u0438\u0436\u043A\u0438" +toc_folder_title: "\u0045\u006e\u0067\u0069\u006e\u0065\u0073" +toc_hidden: true toc_priority: 25 +toc_title: hidden --- - - diff --git a/docs/ru/engines/table-engines/index.md b/docs/ru/engines/table-engines/index.md index 6a954313c60..740588c50a4 100644 --- a/docs/ru/engines/table-engines/index.md +++ b/docs/ru/engines/table-engines/index.md @@ -1,3 +1,10 @@ +--- +toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0442\u0430\u0431\u043b\u0438\u0446" +toc_priority: 26 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" +--- + + # Движки таблиц {#table_engines} Движок таблицы (тип таблицы) определяет: diff --git a/docs/ru/engines/table-engines/integrations/hdfs.md b/docs/ru/engines/table-engines/integrations/hdfs.md index b5234b38d7d..bd8e760fce4 100644 --- a/docs/ru/engines/table-engines/integrations/hdfs.md +++ b/docs/ru/engines/table-engines/integrations/hdfs.md @@ -1,3 +1,8 @@ +--- +toc_priority: 4 +toc_title: HDFS +--- + # HDFS {#table_engines-hdfs} Управляет данными в HDFS. Данный движок похож на движки [File](../special/file.md#table_engines-file) и [URL](../special/url.md#table_engines-url). diff --git a/docs/ru/engines/table-engines/integrations/index.md b/docs/ru/engines/table-engines/integrations/index.md index 0186f6cf19e..02189cf9e55 100644 --- a/docs/ru/engines/table-engines/integrations/index.md +++ b/docs/ru/engines/table-engines/integrations/index.md @@ -1,5 +1,5 @@ --- -toc_folder_title: Integrations +toc_folder_title: "\u0414\u0432\u0438\u0436\u043a\u0438\u0020\u0442\u0430\u0431\u043b\u0438\u0446\u0020\u0434\u043b\u044f\u0020\u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438" toc_priority: 30 --- diff --git a/docs/ru/engines/table-engines/integrations/jdbc.md b/docs/ru/engines/table-engines/integrations/jdbc.md index cded0c51fa0..d7d438e0633 100644 --- a/docs/ru/engines/table-engines/integrations/jdbc.md +++ b/docs/ru/engines/table-engines/integrations/jdbc.md @@ -1,3 +1,8 @@ +--- +toc_priority: 2 +toc_title: JDBC +--- + # JDBC {#table-engine-jdbc} Позволяет ClickHouse подключаться к внешним базам данных с помощью [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 8c47e97f497..bcca349f743 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -1,3 +1,8 @@ +--- +toc_priority: 5 +toc_title: Kafka +--- + # Kafka {#kafka} Движок работает с [Apache Kafka](http://kafka.apache.org/). diff --git a/docs/ru/engines/table-engines/integrations/mysql.md b/docs/ru/engines/table-engines/integrations/mysql.md index 9891fe79959..3370e9b06d0 100644 --- a/docs/ru/engines/table-engines/integrations/mysql.md +++ b/docs/ru/engines/table-engines/integrations/mysql.md @@ -1,3 +1,8 @@ +--- +toc_priority: 3 +toc_title: MySQL +--- + # MySQL {#mysql} Движок MySQL позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом MySQL сервере. diff --git a/docs/ru/engines/table-engines/integrations/odbc.md b/docs/ru/engines/table-engines/integrations/odbc.md index aecdbbf038d..97317d647c8 100644 --- a/docs/ru/engines/table-engines/integrations/odbc.md +++ b/docs/ru/engines/table-engines/integrations/odbc.md @@ -1,3 +1,8 @@ +--- +toc_priority: 1 +toc_title: ODBC +--- + # ODBC {#table-engine-odbc} Позволяет ClickHouse подключаться к внешним базам данных с помощью [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). diff --git a/docs/ru/engines/table-engines/log-family/index.md b/docs/ru/engines/table-engines/log-family/index.md index d2cb0df5a8a..7c6d2f81d7c 100644 --- a/docs/ru/engines/table-engines/log-family/index.md +++ b/docs/ru/engines/table-engines/log-family/index.md @@ -1,6 +1,6 @@ --- -toc_folder_title: Семейство Log -toc_title: Введение +toc_folder_title: "\u0421\u0435\u043c\u0435\u0439\u0441\u0442\u0432\u043e\u0020\u004c\u006f\u0067" +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" toc_priority: 29 --- diff --git a/docs/ru/engines/table-engines/log-family/log.md b/docs/ru/engines/table-engines/log-family/log.md index 826738d6a3d..fad331454c7 100644 --- a/docs/ru/engines/table-engines/log-family/log.md +++ b/docs/ru/engines/table-engines/log-family/log.md @@ -1,3 +1,8 @@ +--- +toc_priority: 33 +toc_title: Log +--- + # Log {#log} Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md). diff --git a/docs/ru/engines/table-engines/log-family/stripelog.md b/docs/ru/engines/table-engines/log-family/stripelog.md index 9523a354ee7..e505aae4c52 100644 --- a/docs/ru/engines/table-engines/log-family/stripelog.md +++ b/docs/ru/engines/table-engines/log-family/stripelog.md @@ -1,3 +1,8 @@ +--- +toc_priority: 32 +toc_title: StripeLog +--- + # StripeLog {#stripelog} Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md). diff --git a/docs/ru/engines/table-engines/log-family/tinylog.md b/docs/ru/engines/table-engines/log-family/tinylog.md index 69bc88b133e..d5c24d41ca4 100644 --- a/docs/ru/engines/table-engines/log-family/tinylog.md +++ b/docs/ru/engines/table-engines/log-family/tinylog.md @@ -1,3 +1,8 @@ +--- +toc_priority: 34 +toc_title: TinyLog +--- + # TinyLog {#tinylog} Движок относится к семейству движков Log. Смотрите общие свойства и различия движков в статье [Семейство Log](index.md). diff --git a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md index 3e18f890af7..99b4ec06765 100644 --- a/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/aggregatingmergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 35 +toc_title: AggregatingMergeTree +--- + # AggregatingMergeTree {#aggregatingmergetree} Движок наследует функциональность [MergeTree](mergetree.md#table_engines-mergetree), изменяя логику слияния кусков данных. Все строки с одинаковым первичным ключом (точнее, с одинаковым [ключом сортировки](mergetree.md)) ClickHouse заменяет на одну (в пределах одного куска данных), которая хранит объединение состояний агрегатных функций. diff --git a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md index f7bc14cc647..8ea3a5a7c92 100644 --- a/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/collapsingmergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 36 +toc_title: CollapsingMergeTree +--- + # CollapsingMergeTree {#table_engine-collapsingmergetree} Движок наследует функциональность от [MergeTree](mergetree.md) и добавляет в алгоритм слияния кусков данных логику сворачивания (удаления) строк. diff --git a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md index 32b5eee7fab..2d26528d964 100644 --- a/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md +++ b/docs/ru/engines/table-engines/mergetree-family/custom-partitioning-key.md @@ -1,3 +1,9 @@ +--- +toc_priority: 32 +toc_title: "\u041f\u0440\u043e\u0438\u0437\u0432\u043e\u043b\u044c\u043d\u044b\u0439\u0020\u043a\u043b\u044e\u0447\u0020\u043f\u0430\u0440\u0442\u0438\u0446\u0438\u043e\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f" +--- + + # Произвольный ключ партиционирования {#proizvolnyi-kliuch-partitsionirovaniia} Партиционирование данных доступно для таблиц семейства [MergeTree](mergetree.md) (включая [реплицированные таблицы](replication.md)). Таблицы [MaterializedView](../special/materializedview.md#materializedview), созданные на основе таблиц MergeTree, также поддерживают партиционирование. diff --git a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md index 861a8b6e633..e47c9127711 100644 --- a/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/graphitemergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: GraphiteMergeTree +--- + # GraphiteMergeTree {#graphitemergetree} Движок предназначен для прореживания и агрегирования/усреднения (rollup) данных [Graphite](http://graphite.readthedocs.io/en/latest/index.html). Он может быть интересен разработчикам, которые хотят использовать ClickHouse как хранилище данных для Graphite. diff --git a/docs/ru/engines/table-engines/mergetree-family/index.md b/docs/ru/engines/table-engines/mergetree-family/index.md index 9e989d807da..abdfdd77d7f 100644 --- a/docs/ru/engines/table-engines/mergetree-family/index.md +++ b/docs/ru/engines/table-engines/mergetree-family/index.md @@ -1,6 +1,5 @@ --- toc_folder_title: MergeTree Family toc_priority: 28 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" --- - - diff --git a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md index 4aa1eb556f3..1228371e8ea 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/replacingmergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 33 +toc_title: ReplacingMergeTree +--- + # ReplacingMergeTree {#replacingmergetree} Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением [ключа сортировки](mergetree.md)). diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 410cf06c0c5..426c456ba3a 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -1,3 +1,8 @@ +--- +toc_priority: 31 +toc_title: "\u0420\u0435\u043f\u043b\u0438\u043a\u0430\u0446\u0438\u044f\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +--- + # Репликация данных {#table_engines-replication} Репликация поддерживается только для таблиц семейства MergeTree: diff --git a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md index 070cd109c30..7b9c11adc2e 100644 --- a/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/summingmergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 34 +toc_title: SummingMergeTree +--- + # SummingMergeTree {#summingmergetree} Движок наследует функциональность [MergeTree](mergetree.md#table_engines-mergetree). Отличие заключается в том, что для таблиц `SummingMergeTree` при слиянии кусков данных ClickHouse все строки с одинаковым первичным ключом (точнее, с одинаковым [ключом сортировки](mergetree.md)) заменяет на одну, которая хранит только суммы значений из столбцов с цифровым типом данных. Если ключ сортировки подобран таким образом, что одному значению ключа соответствует много строк, это значительно уменьшает объём хранения и ускоряет последующую выборку данных. diff --git a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md index bf280eb52bc..2adb8cc0d77 100644 --- a/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/versionedcollapsingmergetree.md @@ -1,3 +1,8 @@ +--- +toc_priority: 37 +toc_title: VersionedCollapsingMergeTree +--- + # VersionedCollapsingMergeTree {#versionedcollapsingmergetree} Движок: diff --git a/docs/ru/engines/table-engines/special/buffer.md b/docs/ru/engines/table-engines/special/buffer.md index 72d2822af98..986fe9adbb9 100644 --- a/docs/ru/engines/table-engines/special/buffer.md +++ b/docs/ru/engines/table-engines/special/buffer.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: Buffer +--- + # Buffer {#buffer} Буферизует записываемые данные в оперативке, периодически сбрасывая их в другую таблицу. При чтении, производится чтение данных одновременно из буфера и из другой таблицы. diff --git a/docs/ru/engines/table-engines/special/dictionary.md b/docs/ru/engines/table-engines/special/dictionary.md index b35c8cecc8f..048da157b2d 100644 --- a/docs/ru/engines/table-engines/special/dictionary.md +++ b/docs/ru/engines/table-engines/special/dictionary.md @@ -1,3 +1,8 @@ +--- +toc_priority: 35 +toc_title: Dictionary +--- + # Dictionary {#dictionary} Движок `Dictionary` отображает данные [словаря](../../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) как таблицу ClickHouse. diff --git a/docs/ru/engines/table-engines/special/distributed.md b/docs/ru/engines/table-engines/special/distributed.md index c8f7fe6eba7..7ab0b916337 100644 --- a/docs/ru/engines/table-engines/special/distributed.md +++ b/docs/ru/engines/table-engines/special/distributed.md @@ -1,3 +1,8 @@ +--- +toc_priority: 33 +toc_title: Distributed +--- + # Distributed {#distributed} **Движок Distributed не хранит данные самостоятельно**, а позволяет обрабатывать запросы распределённо, на нескольких серверах. Чтение автоматически распараллеливается. При чтении будут использованы индексы таблиц на удалённых серверах, если есть. diff --git a/docs/ru/engines/table-engines/special/external-data.md b/docs/ru/engines/table-engines/special/external-data.md index 41708978367..7e383c0c12d 100644 --- a/docs/ru/engines/table-engines/special/external-data.md +++ b/docs/ru/engines/table-engines/special/external-data.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: "\u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0434\u0430\u043d\u043d\u044b\u0435\u0020\u0434\u043b\u044f\u0020\u043e\u0431\u0440\u0430\u0431\u043e\u0442\u043a\u0438\u0020\u0437\u0430\u043f\u0440\u043e\u0441\u0430" +--- + # Внешние данные для обработки запроса {#vneshnie-dannye-dlia-obrabotki-zaprosa} ClickHouse позволяет отправить на сервер данные, необходимые для обработки одного запроса, вместе с запросом SELECT. Такие данные будут положены во временную таблицу (см. раздел «Временные таблицы») и смогут использоваться в запросе (например, в операторах IN). diff --git a/docs/ru/engines/table-engines/special/file.md b/docs/ru/engines/table-engines/special/file.md index 907988a47aa..6a55ef31732 100644 --- a/docs/ru/engines/table-engines/special/file.md +++ b/docs/ru/engines/table-engines/special/file.md @@ -1,3 +1,8 @@ +--- +toc_priority: 37 +toc_title: File +--- + # File(Format) {#table_engines-file} Управляет данными в одном файле на диске в указанном формате. diff --git a/docs/ru/engines/table-engines/special/index.md b/docs/ru/engines/table-engines/special/index.md index dcf46c503b2..0d86461dd2d 100644 --- a/docs/ru/engines/table-engines/special/index.md +++ b/docs/ru/engines/table-engines/special/index.md @@ -1,5 +1,5 @@ --- -toc_folder_title: Special +toc_folder_title: "\u0421\u043f\u0435\u0446\u0438\u0430\u043b\u044c\u043d\u044b\u0435\u0020\u0434\u0432\u0438\u0436\u043a\u0438\u0020\u0442\u0430\u0431\u043b\u0438\u0446" toc_priority: 31 --- diff --git a/docs/ru/engines/table-engines/special/join.md b/docs/ru/engines/table-engines/special/join.md index aa734f5ca55..65bece4724c 100644 --- a/docs/ru/engines/table-engines/special/join.md +++ b/docs/ru/engines/table-engines/special/join.md @@ -1,3 +1,8 @@ +--- +toc_priority: 40 +toc_title: Join +--- + # Join {#join} Подготовленная структура данных для использования в операциях [JOIN](../../../engines/table-engines/special/join.md#select-join). diff --git a/docs/ru/engines/table-engines/special/materializedview.md b/docs/ru/engines/table-engines/special/materializedview.md index ae7a9965a24..1281d1db9ab 100644 --- a/docs/ru/engines/table-engines/special/materializedview.md +++ b/docs/ru/engines/table-engines/special/materializedview.md @@ -1,3 +1,8 @@ +--- +toc_priority: 43 +toc_title: MaterializedView +--- + # MaterializedView {#materializedview} Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create/table.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. diff --git a/docs/ru/engines/table-engines/special/memory.md b/docs/ru/engines/table-engines/special/memory.md index 6de7978d134..9ca189ef3b2 100644 --- a/docs/ru/engines/table-engines/special/memory.md +++ b/docs/ru/engines/table-engines/special/memory.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: Memory +--- + # Memory {#memory} Хранит данные в оперативке, в несжатом виде. Данные хранятся именно в таком виде, в каком они получаются при чтении. То есть, само чтение из этой таблицы полностью бесплатно. diff --git a/docs/ru/engines/table-engines/special/merge.md b/docs/ru/engines/table-engines/special/merge.md index 4f6e1751591..656aa7cfd6b 100644 --- a/docs/ru/engines/table-engines/special/merge.md +++ b/docs/ru/engines/table-engines/special/merge.md @@ -1,3 +1,8 @@ +--- +toc_priority: 36 +toc_title: Merge +--- + # Merge {#merge} Движок `Merge` (не путайте с движком `MergeTree`) не хранит данные самостоятельно, а позволяет читать одновременно из произвольного количества других таблиц. diff --git a/docs/ru/engines/table-engines/special/null.md b/docs/ru/engines/table-engines/special/null.md index ac8c7942f79..2c3af1ce11e 100644 --- a/docs/ru/engines/table-engines/special/null.md +++ b/docs/ru/engines/table-engines/special/null.md @@ -1,4 +1,7 @@ -# Null {#null} +--- +toc_priority: 38 +toc_title: 'Null' +--- При записи в таблицу типа Null, данные игнорируются. При чтении из таблицы типа Null, возвращается пустота. diff --git a/docs/ru/engines/table-engines/special/set.md b/docs/ru/engines/table-engines/special/set.md index 497d52821d1..73fe3c3cf50 100644 --- a/docs/ru/engines/table-engines/special/set.md +++ b/docs/ru/engines/table-engines/special/set.md @@ -1,3 +1,8 @@ +--- +toc_priority: 39 +toc_title: Set +--- + # Set {#set} Представляет собой множество, постоянно находящееся в оперативке. Предназначено для использования в правой части оператора IN (смотрите раздел «Операторы IN»). diff --git a/docs/ru/engines/table-engines/special/url.md b/docs/ru/engines/table-engines/special/url.md index 69c1567a1c2..cdb5afddf75 100644 --- a/docs/ru/engines/table-engines/special/url.md +++ b/docs/ru/engines/table-engines/special/url.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: URL +--- + # URL(URL, Format) {#table_engines-url} Управляет данными на удаленном HTTP/HTTPS сервере. Данный движок похож diff --git a/docs/ru/engines/table-engines/special/view.md b/docs/ru/engines/table-engines/special/view.md index a732a675d4c..18813a55da2 100644 --- a/docs/ru/engines/table-engines/special/view.md +++ b/docs/ru/engines/table-engines/special/view.md @@ -1,3 +1,8 @@ +--- +toc_priority: 42 +toc_title: View +--- + # View {#table_engines-view} Используется для реализации представлений (подробнее см. запрос `CREATE VIEW`). Не хранит данные, а хранит только указанный запрос `SELECT`. При чтении из таблицы, выполняет его (с удалением из запроса всех ненужных столбцов). diff --git a/docs/ru/faq/index.md b/docs/ru/faq/index.md index 2ee9d51e83b..d9eccf59c5c 100644 --- a/docs/ru/faq/index.md +++ b/docs/ru/faq/index.md @@ -1,6 +1,6 @@ --- toc_folder_title: F.A.Q. +toc_hidden: true toc_priority: 76 --- - diff --git a/docs/ru/getting-started/example-datasets/amplab-benchmark.md b/docs/ru/getting-started/example-datasets/amplab-benchmark.md index b503fc909f3..bc59672ab26 100644 --- a/docs/ru/getting-started/example-datasets/amplab-benchmark.md +++ b/docs/ru/getting-started/example-datasets/amplab-benchmark.md @@ -1,3 +1,8 @@ +--- +toc_priority: 19 +toc_title: AMPLab Big Data Benchmark +--- + # AMPLab Big Data Benchmark {#amplab-big-data-benchmark} См. https://amplab.cs.berkeley.edu/benchmark/ diff --git a/docs/ru/getting-started/example-datasets/criteo.md b/docs/ru/getting-started/example-datasets/criteo.md index 4abb8c43f35..7a58da5b695 100644 --- a/docs/ru/getting-started/example-datasets/criteo.md +++ b/docs/ru/getting-started/example-datasets/criteo.md @@ -1,3 +1,8 @@ +--- +toc_priority: 18 +toc_title: "\u0422\u0435\u0440\u0430\u0431\u0430\u0439\u0442\u0020\u043b\u043e\u0433\u043e\u0432\u0020\u043a\u043b\u0438\u043a\u043e\u0432\u0020\u043e\u0442\u0020\u0043\u0072\u0069\u0074\u0065\u006f" +--- + # Терабайт логов кликов от Criteo {#terabait-logov-klikov-ot-criteo} Скачайте данные с http://labs.criteo.com/downloads/download-terabyte-click-logs/ diff --git a/docs/ru/getting-started/example-datasets/index.md b/docs/ru/getting-started/example-datasets/index.md index c349e369b1d..eff944a7980 100644 --- a/docs/ru/getting-started/example-datasets/index.md +++ b/docs/ru/getting-started/example-datasets/index.md @@ -1,8 +1,7 @@ --- -toc_folder_title: "\u0422\u0435\u0441\u0442\u043E\u0432\u044B\u0435 \u043C\u0430\u0441\ - \u0441\u0438\u0432\u044B \u0434\u0430\u043D\u043D\u044B\u0445" -toc_priority: 12 -toc_title: "\u041E\u0431\u0437\u043E\u0440" +toc_folder_title: "\u0422\u0435\u0441\u0442\u043e\u0432\u044b\u0435\u0020\u043c\u0430\u0441\u0441\u0438\u0432\u044b\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +toc_priority: 14 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" --- # Тестовые массивы данных {#testovye-massivy-dannykh} diff --git a/docs/ru/getting-started/example-datasets/metrica.md b/docs/ru/getting-started/example-datasets/metrica.md index ab2fe3d6c64..e8a3163376c 100644 --- a/docs/ru/getting-started/example-datasets/metrica.md +++ b/docs/ru/getting-started/example-datasets/metrica.md @@ -1,3 +1,8 @@ +--- +toc_priority: 15 +toc_title: "\u0410\u043d\u043e\u043d\u0438\u043c\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0435\u0020\u0434\u0430\u043d\u043d\u044b\u0435\u0020\u042f\u043d\u0434\u0435\u043a\u0441\u002e\u041c\u0435\u0442\u0440\u0438\u043a\u0438" +--- + # Анонимизированные данные Яндекс.Метрики {#anonimizirovannye-dannye-iandeks-metriki} Датасет состоит из двух таблиц, содержащих анонимизированные данные о хитах (`hits_v1`) и визитах (`visits_v1`) Яндекс.Метрики. Каждую из таблиц можно скачать в виде сжатого `.tsv.xz`-файла или в виде уже готовых партиций. Также можно скачать расширенную версию таблицы `hits`, содержащую 100 миллионов строк в виде [архива c файлами TSV](https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz) и в виде [готовых партиций](https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz). diff --git a/docs/ru/getting-started/example-datasets/nyc-taxi.md b/docs/ru/getting-started/example-datasets/nyc-taxi.md index 64202b2e765..1f981324261 100644 --- a/docs/ru/getting-started/example-datasets/nyc-taxi.md +++ b/docs/ru/getting-started/example-datasets/nyc-taxi.md @@ -1,3 +1,8 @@ +--- +toc_priority: 20 +toc_title: "\u0414\u0430\u043d\u043d\u044b\u0435\u0020\u043e\u0020\u0442\u0430\u043a\u0441\u0438\u0020\u0432\u0020\u041d\u044c\u044e\u002d\u0419\u043e\u0440\u043a\u0435" +--- + # Данные о такси в Нью-Йорке {#dannye-o-taksi-v-niu-iorke} Этот датасет может быть получен двумя способами: diff --git a/docs/ru/getting-started/example-datasets/ontime.md b/docs/ru/getting-started/example-datasets/ontime.md index 7e7a6be2196..4d3eea14da6 100644 --- a/docs/ru/getting-started/example-datasets/ontime.md +++ b/docs/ru/getting-started/example-datasets/ontime.md @@ -1,3 +1,8 @@ +--- +toc_priority: 21 +toc_title: OnTime +--- + # OnTime {#ontime} Этот датасет может быть получен двумя способами: diff --git a/docs/ru/getting-started/example-datasets/star-schema.md b/docs/ru/getting-started/example-datasets/star-schema.md index d938b1e8620..f7502e8409d 100644 --- a/docs/ru/getting-started/example-datasets/star-schema.md +++ b/docs/ru/getting-started/example-datasets/star-schema.md @@ -1,3 +1,8 @@ +--- +toc_priority: 16 +toc_title: Star Schema Benchmark +--- + # Star Schema Benchmark {#star-schema-benchmark} Компиляция dbgen: diff --git a/docs/ru/getting-started/example-datasets/wikistat.md b/docs/ru/getting-started/example-datasets/wikistat.md index 0e3e269fe9f..c5a877ff8fd 100644 --- a/docs/ru/getting-started/example-datasets/wikistat.md +++ b/docs/ru/getting-started/example-datasets/wikistat.md @@ -1,3 +1,8 @@ +--- +toc_priority: 17 +toc_title: WikiStat +--- + # WikiStat {#wikistat} См: http://dumps.wikimedia.org/other/pagecounts-raw/ diff --git a/docs/ru/getting-started/index.md b/docs/ru/getting-started/index.md index b2b292c4725..ab72ce4a1d2 100644 --- a/docs/ru/getting-started/index.md +++ b/docs/ru/getting-started/index.md @@ -1,6 +1,5 @@ --- -toc_folder_title: "\u041D\u0430\u0447\u0430\u043B\u043E \u0440\u0430\u0431\u043E\u0442\ - \u044B" +toc_folder_title: "\u041d\u0430\u0447\u0430\u043b\u043e\u0020\u0440\u0430\u0431\u043e\u0442\u044b" toc_hidden: true toc_priority: 8 toc_title: hidden diff --git a/docs/ru/getting-started/install.md b/docs/ru/getting-started/install.md index 3e9270ef3bc..fb14ecfe599 100644 --- a/docs/ru/getting-started/install.md +++ b/docs/ru/getting-started/install.md @@ -1,3 +1,8 @@ +--- +toc_priority: 11 +toc_title: "\u0423\u0441\u0442\u0430\u043d\u043e\u0432\u043a\u0430" +--- + # Установка {#ustanovka} ## Системные требования {#sistemnye-trebovaniia} diff --git a/docs/ru/getting-started/playground.md b/docs/ru/getting-started/playground.md index e3bae5c9756..86a5cd5272c 100644 --- a/docs/ru/getting-started/playground.md +++ b/docs/ru/getting-started/playground.md @@ -1,3 +1,8 @@ +--- +toc_priority: 14 +toc_title: Playground +--- + # ClickHouse Playground {#clickhouse-playground} [ClickHouse Playground](https://play.clickhouse.tech) позволяет пользователям экспериментировать с ClickHouse, мгновенно выполняя запросы без настройки своего сервера или кластера. diff --git a/docs/ru/guides/apply-catboost-model.md b/docs/ru/guides/apply-catboost-model.md index 3515d2731c2..026b4d9d75e 100644 --- a/docs/ru/guides/apply-catboost-model.md +++ b/docs/ru/guides/apply-catboost-model.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: "\u041f\u0440\u0438\u043c\u0435\u043d\u0435\u043d\u0438\u0435\u0020\u043c\u043e\u0434\u0435\u043b\u0438\u0020\u0043\u0061\u0074\u0042\u006f\u006f\u0073\u0074\u0020\u0432\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + # Применение модели CatBoost в ClickHouse {#applying-catboost-model-in-clickhouse} [CatBoost](https://catboost.ai) — открытая программная библиотека разработанная компанией [Яндекс](https://yandex.ru/company/) для машинного обучения, которая использует схему градиентного бустинга. diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md index 816b5962280..222af33f952 100644 --- a/docs/ru/interfaces/cli.md +++ b/docs/ru/interfaces/cli.md @@ -1,3 +1,8 @@ +--- +toc_priority: 17 +toc_title: "\u041a\u043b\u0438\u0435\u043d\u0442\u0020\u043a\u043e\u043c\u0430\u043d\u0434\u043d\u043e\u0439\u0020\u0441\u0442\u0440\u043e\u043a\u0438" +--- + # Клиент командной строки {#klient-komandnoi-stroki} ClickHouse предоставляет собственный клиент командной строки: `clickhouse-client`. Клиент поддерживает запуск с аргументами командной строки и с конфигурационными файлами. Подробнее читайте в разделе [Конфигурирование](#interfaces_cli_configuration). diff --git a/docs/ru/interfaces/cpp.md b/docs/ru/interfaces/cpp.md index 7136af3197b..264b4f82500 100644 --- a/docs/ru/interfaces/cpp.md +++ b/docs/ru/interfaces/cpp.md @@ -1,3 +1,8 @@ +--- +toc_priority: 24 +toc_title: "\u0043\u002b\u002b\u0020\u043a\u043b\u0438\u0435\u043d\u0442\u0441\u043a\u0430\u044f\u0020\u0431\u0438\u0431\u043b\u0438\u043e\u0442\u0435\u043a\u0430" +--- + # C++ клиентская библиотека {#c-klientskaia-biblioteka} См. README в репозитории [clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp). diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index e94810be8d3..042c62e310c 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -1,3 +1,8 @@ +--- +toc_priority: 21 +toc_title: "\u0424\u043e\u0440\u043c\u0430\u0442\u044b\u0020\u0432\u0445\u043e\u0434\u043d\u044b\u0445\u0020\u0438\u0020\u0432\u044b\u0445\u043e\u0434\u043d\u044b\u0445\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +--- + # Форматы входных и выходных данных {#formats} ClickHouse может принимать (`INSERT`) и отдавать (`SELECT`) данные в различных форматах. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index ffe4b2e5276..279f2916c78 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -1,3 +1,8 @@ +--- +toc_priority: 19 +toc_title: "\u0048\u0054\u0054\u0050\u002d\u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441" +--- + # HTTP-интерфейс {#http-interface} HTTP интерфейс позволяет использовать ClickHouse на любой платформе, из любого языка программирования. У нас он используется для работы из Java и Perl, а также из shell-скриптов. В других отделах, HTTP интерфейс используется из Perl, Python и Go. HTTP интерфейс более ограничен по сравнению с родным интерфейсом, но является более совместимым. diff --git a/docs/ru/interfaces/jdbc.md b/docs/ru/interfaces/jdbc.md index e83cd2cfacf..196dba64933 100644 --- a/docs/ru/interfaces/jdbc.md +++ b/docs/ru/interfaces/jdbc.md @@ -1,3 +1,8 @@ +--- +toc_priority: 22 +toc_title: "\u004a\u0044\u0042\u0043\u002d\u0434\u0440\u0430\u0439\u0432\u0435\u0440" +--- + # JDBC-драйвер {#jdbc-draiver} - **[Официальный драйвер](https://github.com/ClickHouse/clickhouse-jdbc)** diff --git a/docs/ru/interfaces/mysql.md b/docs/ru/interfaces/mysql.md index 79368aa1ba2..fa0003e0bea 100644 --- a/docs/ru/interfaces/mysql.md +++ b/docs/ru/interfaces/mysql.md @@ -1,3 +1,8 @@ +--- +toc_priority: 20 +toc_title: "\u004d\u0079\u0053\u0051\u004c\u002d\u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441" +--- + # MySQL-интерфейс {#mysql-interface} ClickHouse поддерживает взаимодействие по протоколу MySQL. Данная функция включается настройкой [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) в конфигурационном файле: diff --git a/docs/ru/interfaces/odbc.md b/docs/ru/interfaces/odbc.md index 0f4a6279539..728c4bd6979 100644 --- a/docs/ru/interfaces/odbc.md +++ b/docs/ru/interfaces/odbc.md @@ -1,3 +1,9 @@ +--- +toc_priority: 23 +toc_title: "\u004f\u0044\u0042\u0043\u002d\u0434\u0440\u0430\u0439\u0432\u0435\u0440" +--- + + # ODBC-драйвер {#odbc-draiver} - [Официальный драйвер](https://github.com/ClickHouse/clickhouse-odbc). diff --git a/docs/ru/interfaces/tcp.md b/docs/ru/interfaces/tcp.md index 6bdfb286ac2..d89646f15b7 100644 --- a/docs/ru/interfaces/tcp.md +++ b/docs/ru/interfaces/tcp.md @@ -1,3 +1,8 @@ +--- +toc_priority: 18 +toc_title: "\u0420\u043e\u0434\u043d\u043e\u0439\u0020\u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u0020\u0028\u0054\u0043\u0050\u0029" +--- + # Родной интерфейс (TCP) {#rodnoi-interfeis-tcp} Нативный протокол используется в [клиенте командной строки](cli.md), для взаимодействия между серверами во время обработки распределенных запросов, а также в других программах на C++. К сожалению, у родного протокола ClickHouse пока нет формальной спецификации, но в нем можно разобраться с использованием исходного кода ClickHouse (начиная с [примерно этого места](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)) и/или путем перехвата и анализа TCP трафика. diff --git a/docs/ru/interfaces/third-party/client-libraries.md b/docs/ru/interfaces/third-party/client-libraries.md index 66a2252b23a..f35acb9e968 100644 --- a/docs/ru/interfaces/third-party/client-libraries.md +++ b/docs/ru/interfaces/third-party/client-libraries.md @@ -1,3 +1,8 @@ +--- +toc_priority: 26 +toc_title: "\u041a\u043b\u0438\u0435\u043d\u0442\u0441\u043a\u0438\u0435\u0020\u0431\u0438\u0431\u043b\u0438\u043e\u0442\u0435\u043a\u0438\u0020\u043e\u0442\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0445\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432" +--- + # Клиентские библиотеки от сторонних разработчиков {#klientskie-biblioteki-ot-storonnikh-razrabotchikov} !!! warning "Disclaimer" diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index f7eaa5cc77f..1fabdb8a31c 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -1,3 +1,9 @@ +--- +toc_priority: 28 +toc_title: "\u0412\u0438\u0437\u0443\u0430\u043b\u044c\u043d\u044b\u0435\u0020\u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u044b\u0020\u043e\u0442\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0445\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432" +--- + + # Визуальные интерфейсы от сторонних разработчиков {#vizualnye-interfeisy-ot-storonnikh-razrabotchikov} ## С открытым исходным кодом {#s-otkrytym-iskhodnym-kodom} diff --git a/docs/ru/interfaces/third-party/index.md b/docs/ru/interfaces/third-party/index.md index bfdd554aebd..a57169df73b 100644 --- a/docs/ru/interfaces/third-party/index.md +++ b/docs/ru/interfaces/third-party/index.md @@ -1,6 +1,5 @@ --- -toc_folder_title: "\u041E\u0442 \u0441\u0442\u043E\u0440\u043E\u043D\u043D\u0438\u0445\ - \ \u0440\u0430\u0437\u0440\u0430\u0431\u043E\u0442\u0447\u0438\u043A\u043E\u0432" +toc_folder_title: "\u0421\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0435\u0020\u0438\u043d\u0442\u0435\u0440\u0444\u0435\u0439\u0441\u044b" toc_priority: 24 --- diff --git a/docs/ru/interfaces/third-party/integrations.md b/docs/ru/interfaces/third-party/integrations.md index 527f65343db..60d6181ab3f 100644 --- a/docs/ru/interfaces/third-party/integrations.md +++ b/docs/ru/interfaces/third-party/integrations.md @@ -1,3 +1,8 @@ +--- +toc_priority: 27 +toc_title: "\u0411\u0438\u0431\u043b\u0438\u043e\u0442\u0435\u043a\u0438\u0020\u0434\u043b\u044f\u0020\u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u0438\u0020\u043e\u0442\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0445\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432" +--- + # Библиотеки для интеграции от сторонних разработчиков {#biblioteki-dlia-integratsii-ot-storonnikh-razrabotchikov} !!! warning "Disclaimer" diff --git a/docs/ru/interfaces/third-party/proxy.md b/docs/ru/interfaces/third-party/proxy.md index 225c3fee150..fc66ecde293 100644 --- a/docs/ru/interfaces/third-party/proxy.md +++ b/docs/ru/interfaces/third-party/proxy.md @@ -1,3 +1,8 @@ +--- +toc_priority: 29 +toc_title: "\u041f\u0440\u043e\u043a\u0441\u0438\u002d\u0441\u0435\u0440\u0432\u0435\u0440\u044b\u0020\u043e\u0442\u0020\u0441\u0442\u043e\u0440\u043e\u043d\u043d\u0438\u0445\u0020\u0440\u0430\u0437\u0440\u0430\u0431\u043e\u0442\u0447\u0438\u043a\u043e\u0432" +--- + # Прокси-серверы от сторонних разработчиков {#proksi-servery-ot-storonnikh-razrabotchikov} ## chproxy {#chproxy} diff --git a/docs/ru/introduction/distinctive-features.md b/docs/ru/introduction/distinctive-features.md index 98dbe6df134..4eeeef4a443 100644 --- a/docs/ru/introduction/distinctive-features.md +++ b/docs/ru/introduction/distinctive-features.md @@ -1,3 +1,8 @@ +--- +toc_priority: 4 +toc_title: "\u041e\u0442\u043b\u0438\u0447\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435\u0020\u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u0438\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + # Отличительные возможности ClickHouse {#otlichitelnye-vozmozhnosti-clickhouse} ## По-настоящему столбцовая СУБД {#po-nastoiashchemu-stolbtsovaia-subd} diff --git a/docs/ru/introduction/history.md b/docs/ru/introduction/history.md index 65254b0f4f0..ab740954bbe 100644 --- a/docs/ru/introduction/history.md +++ b/docs/ru/introduction/history.md @@ -1,3 +1,9 @@ +--- +toc_priority: 7 +toc_title: "\u0418\u0441\u0442\u043e\u0440\u0438\u044f\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + + # История ClickHouse {#istoriia-clickhouse} ClickHouse изначально разрабатывался для обеспечения работы [Яндекс.Метрики](https://metrika.yandex.ru/), [второй крупнейшей в мире](http://w3techs.com/technologies/overview/traffic_analysis/all) платформы для веб аналитики, и продолжает быть её ключевым компонентом. При более 13 триллионах записей в базе данных и более 20 миллиардах событий в сутки, ClickHouse позволяет генерировать индивидуально настроенные отчёты на лету напрямую из неагрегированных данных. Данная статья вкратце демонстрирует какие цели исторически стояли перед ClickHouse на ранних этапах его развития. diff --git a/docs/ru/introduction/performance.md b/docs/ru/introduction/performance.md index cbefa10cf7c..c449e76a6ea 100644 --- a/docs/ru/introduction/performance.md +++ b/docs/ru/introduction/performance.md @@ -1,3 +1,8 @@ +--- +toc_priority: 6 +toc_title: "\u041f\u0440\u043e\u0438\u0437\u0432\u043e\u0434\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0441\u0442\u044c" +--- + # Производительность {#proizvoditelnost} По результатам внутреннего тестирования в Яндексе, ClickHouse обладает наиболее высокой производительностью (как наиболее высокой пропускной способностью на длинных запросах, так и наиболее низкой задержкой на коротких запросах), при соответствующем сценарии работы, среди доступных для тестирования систем подобного класса. Результаты тестирования можно посмотреть на [отдельной странице](https://clickhouse.tech/benchmark/dbms/). diff --git a/docs/ru/operations/access-rights.md b/docs/ru/operations/access-rights.md index 27dbc2fbf62..9973de91161 100644 --- a/docs/ru/operations/access-rights.md +++ b/docs/ru/operations/access-rights.md @@ -1,3 +1,8 @@ +--- +toc_priority: 48 +toc_title: "\u0423\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u0435\u0020\u0434\u043e\u0441\u0442\u0443\u043f\u043e\u043c" +--- + # Управление доступом {#access-control} ClickHouse поддерживает управление доступом на основе подхода [RBAC](https://ru.wikipedia.org/wiki/Управление_доступом_на_основе_ролей). diff --git a/docs/ru/operations/backup.md b/docs/ru/operations/backup.md index 89fb0403543..0dcb6fd307d 100644 --- a/docs/ru/operations/backup.md +++ b/docs/ru/operations/backup.md @@ -1,3 +1,8 @@ +--- +toc_priority: 49 +toc_title: "\u0420\u0435\u0437\u0435\u0440\u0432\u043d\u043e\u0435\u0020\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +--- + # Резервное копирование данных {#rezervnoe-kopirovanie-dannykh} [Репликация](../engines/table-engines/mergetree-family/replication.md) обеспечивает защиту от аппаратных сбоев, но не защищает от человеческих ошибок: случайного удаления данных, удаления не той таблицы, которую надо было, или таблицы на не том кластере, а также программных ошибок, которые приводят к неправильной обработке данных или их повреждению. Во многих случаях подобные ошибки влияют на все реплики. ClickHouse имеет встроенные средства защиты для предотвращения некоторых типов ошибок — например, по умолчанию [не получится удалить таблицы \*MergeTree, содержащие более 50 Гб данных, одной командой](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). Однако эти средства защиты не охватывают все возможные случаи и могут быть обойдены. diff --git a/docs/ru/operations/configuration-files.md b/docs/ru/operations/configuration-files.md index df56ab9ac7d..a4cc9182427 100644 --- a/docs/ru/operations/configuration-files.md +++ b/docs/ru/operations/configuration-files.md @@ -1,3 +1,9 @@ +--- +toc_priority: 50 +toc_title: "\u041a\u043e\u043d\u0444\u0438\u0433\u0443\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0435\u0020\u0444\u0430\u0439\u043b\u044b" +--- + + # Конфигурационные файлы {#configuration_files} Основной конфигурационный файл сервера - `config.xml`. Он расположен в директории `/etc/clickhouse-server/`. diff --git a/docs/ru/operations/index.md b/docs/ru/operations/index.md index 0a15959d652..74a1d135967 100644 --- a/docs/ru/operations/index.md +++ b/docs/ru/operations/index.md @@ -1,7 +1,7 @@ --- toc_folder_title: "\u042d\u043a\u0441\u043f\u043b\u0443\u0430\u0442\u0430\u0446\u0438\u044f" toc_priority: 41 -toc_title: intro +toc_title: "\u042d\u043a\u0441\u043f\u043b\u0443\u0430\u0442\u0430\u0446\u0438\u044f" --- # Эксплуатация {#operations} diff --git a/docs/ru/operations/monitoring.md b/docs/ru/operations/monitoring.md index a1013f5eae5..52d0b5ecc8a 100644 --- a/docs/ru/operations/monitoring.md +++ b/docs/ru/operations/monitoring.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: "\u041c\u043e\u043d\u0438\u0442\u043e\u0440\u0438\u043d\u0433" +--- + # Мониторинг {#monitoring} Вы можете отслеживать: diff --git a/docs/ru/operations/quotas.md b/docs/ru/operations/quotas.md index 399e80d2011..92533eef0c1 100644 --- a/docs/ru/operations/quotas.md +++ b/docs/ru/operations/quotas.md @@ -1,3 +1,8 @@ +--- +toc_priority: 51 +toc_title: "\u041a\u0432\u043e\u0442\u044b" +--- + # Квоты {#quotas} Квоты позволяют ограничить использование ресурсов за некоторый интервал времени, или просто подсчитывать использование ресурсов. diff --git a/docs/ru/operations/requirements.md b/docs/ru/operations/requirements.md index e1ecafd9f3b..36a7dd30b34 100644 --- a/docs/ru/operations/requirements.md +++ b/docs/ru/operations/requirements.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: "\u0422\u0440\u0435\u0431\u043e\u0432\u0430\u043d\u0438\u044f" +--- + # Требования {#trebovaniia} ## Процессор {#protsessor} diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 2ca881d19f0..17966ef0547 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1,3 +1,8 @@ +--- +toc_priority: 57 +toc_title: "\u041a\u043e\u043d\u0444\u0438\u0433\u0443\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0435\u0020\u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u044b\u0020\u0441\u0435\u0440\u0432\u0435\u0440\u0430" +--- + # Конфигурационные параметры сервера {#server-configuration-parameters-reference} ## builtin_dictionaries_reload_interval {#builtin-dictionaries-reload-interval} diff --git a/docs/ru/operations/settings/constraints-on-settings.md b/docs/ru/operations/settings/constraints-on-settings.md index dd324caecc6..b23be22958c 100644 --- a/docs/ru/operations/settings/constraints-on-settings.md +++ b/docs/ru/operations/settings/constraints-on-settings.md @@ -1,3 +1,8 @@ +--- +toc_priority: 62 +toc_title: "\u041e\u0433\u0440\u0430\u043d\u0438\u0447\u0435\u043d\u0438\u044f\u0020\u043d\u0430\u0020\u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0435\u0020\u043d\u0430\u0441\u0442\u0440\u043e\u0435\u043a" +--- + # Ограничения на изменение настроек {#constraints-on-settings} Ограничения на изменение настроек могут находиться внутри секции `profiles` файла `user.xml` и запрещают пользователю менять некоторые настройки с помощью запроса `SET`. diff --git a/docs/ru/operations/settings/index.md b/docs/ru/operations/settings/index.md index edc8d2d3014..c24b7053c46 100644 --- a/docs/ru/operations/settings/index.md +++ b/docs/ru/operations/settings/index.md @@ -1,7 +1,7 @@ --- -toc_folder_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" +toc_folder_title: Settings toc_priority: 55 -toc_title: "\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438" +toc_title: Introduction --- # Настройки {#session-settings-intro} diff --git a/docs/ru/operations/settings/permissions-for-queries.md b/docs/ru/operations/settings/permissions-for-queries.md index cb65e652331..ae896dac77c 100644 --- a/docs/ru/operations/settings/permissions-for-queries.md +++ b/docs/ru/operations/settings/permissions-for-queries.md @@ -1,3 +1,8 @@ +--- +toc_priority: 58 +toc_title: "\u0420\u0430\u0437\u0440\u0435\u0448\u0435\u043d\u0438\u044f\u0020\u0434\u043b\u044f\u0020\u0437\u0430\u043f\u0440\u043e\u0441\u043e\u0432" +--- + # Разрешения для запросов {#permissions_for_queries} Запросы в ClickHouse можно разделить на несколько типов: diff --git a/docs/ru/operations/settings/query-complexity.md b/docs/ru/operations/settings/query-complexity.md index aeb5445a054..a62e7523207 100644 --- a/docs/ru/operations/settings/query-complexity.md +++ b/docs/ru/operations/settings/query-complexity.md @@ -1,3 +1,8 @@ +--- +toc_priority: 59 +toc_title: "\u041e\u0433\u0440\u0430\u043d\u0438\u0447\u0435\u043d\u0438\u044f\u0020\u043d\u0430\u0020\u0441\u043b\u043e\u0436\u043d\u043e\u0441\u0442\u044c\u0020\u0437\u0430\u043f\u0440\u043e\u0441\u0430" +--- + # Ограничения на сложность запроса {#restrictions-on-query-complexity} Ограничения на сложность запроса - часть настроек. diff --git a/docs/ru/operations/settings/settings-profiles.md b/docs/ru/operations/settings/settings-profiles.md index d1e24490120..10feda01850 100644 --- a/docs/ru/operations/settings/settings-profiles.md +++ b/docs/ru/operations/settings/settings-profiles.md @@ -1,3 +1,8 @@ +--- +toc_priority: 61 +toc_title: "\u041f\u0440\u043e\u0444\u0438\u043b\u0438\u0020\u043d\u0430\u0441\u0442\u0440\u043e\u0435\u043a" +--- + # Профили настроек {#settings-profiles} Профиль настроек — это набор настроек, сгруппированных под одним именем. diff --git a/docs/ru/operations/settings/settings-users.md b/docs/ru/operations/settings/settings-users.md index 7c12780823a..2069922d0ea 100644 --- a/docs/ru/operations/settings/settings-users.md +++ b/docs/ru/operations/settings/settings-users.md @@ -1,3 +1,8 @@ +--- +toc_priority: 63 +toc_title: "\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438\u0020\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0435\u0439" +--- + # Настройки пользователей {#nastroiki-polzovatelei} Раздел `users` конфигурационного файла `user.xml` содержит настройки для пользователей. diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 8a65f3781c1..3192b523d48 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1,6 +1,6 @@ --- toc_priority: 60 -toc_title: Settings +toc_title: "\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0438" --- # Настройки {#settings} diff --git a/docs/ru/operations/system-tables/index.md b/docs/ru/operations/system-tables/index.md index 6fa989d3d0d..93ea1c92068 100644 --- a/docs/ru/operations/system-tables/index.md +++ b/docs/ru/operations/system-tables/index.md @@ -1,6 +1,6 @@ --- toc_priority: 52 -toc_title: Системные таблицы +toc_title: "\u0421\u0438\u0441\u0442\u0435\u043c\u043d\u044b\u0435\u0020\u0442\u0430\u0431\u043b\u0438\u0446\u044b" --- # Системные таблицы {#system-tables} diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md index f076fab1311..40035309c03 100644 --- a/docs/ru/operations/tips.md +++ b/docs/ru/operations/tips.md @@ -1,3 +1,8 @@ +--- +toc_priority: 58 +toc_title: "\u0421\u043e\u0432\u0435\u0442\u044b\u0020\u043f\u043e\u0020\u044d\u043a\u0441\u043f\u043b\u0443\u0430\u0442\u0430\u0446\u0438\u0438" +--- + # Советы по эксплуатации {#sovety-po-ekspluatatsii} ## CPU Scaling Governor {#cpu-scaling-governor} diff --git a/docs/ru/operations/troubleshooting.md b/docs/ru/operations/troubleshooting.md index cb86dfef5e8..3df2a1dd46c 100644 --- a/docs/ru/operations/troubleshooting.md +++ b/docs/ru/operations/troubleshooting.md @@ -1,3 +1,8 @@ +--- +toc_priority: 46 +toc_title: "\u0423\u0441\u0442\u0440\u0430\u043d\u0435\u043d\u0438\u0435\u0020\u043d\u0435\u0438\u0441\u043f\u0440\u0430\u0432\u043d\u043e\u0441\u0442\u0435\u0439" +--- + # Устранение неисправностей {#ustranenie-neispravnostei} - [Установка дистрибутива](#troubleshooting-installation-errors) diff --git a/docs/ru/operations/update.md b/docs/ru/operations/update.md index f1998864f40..c74b28b3fd7 100644 --- a/docs/ru/operations/update.md +++ b/docs/ru/operations/update.md @@ -1,3 +1,8 @@ +--- +toc_priority: 47 +toc_title: "\u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u0435\u0020\u0043\u006c\u0069\u0063\u006b\u0048\u006f\u0075\u0073\u0065" +--- + # Обновление ClickHouse {#obnovlenie-clickhouse} Если ClickHouse установлен с помощью deb-пакетов, выполните следующие команды на сервере: diff --git a/docs/ru/operations/utilities/clickhouse-copier.md b/docs/ru/operations/utilities/clickhouse-copier.md index b43f5ccaf7a..64e3c1eee12 100644 --- a/docs/ru/operations/utilities/clickhouse-copier.md +++ b/docs/ru/operations/utilities/clickhouse-copier.md @@ -1,3 +1,8 @@ +--- +toc_priority: 59 +toc_title: clickhouse-copier +--- + # clickhouse-copier {#clickhouse-copier} Копирует данные из таблиц одного кластера в таблицы другого (или этого же) кластера. diff --git a/docs/ru/operations/utilities/clickhouse-local.md b/docs/ru/operations/utilities/clickhouse-local.md index 7dfa9587686..962b63e2b70 100644 --- a/docs/ru/operations/utilities/clickhouse-local.md +++ b/docs/ru/operations/utilities/clickhouse-local.md @@ -1,3 +1,8 @@ +--- +toc_priority: 60 +toc_title: clickhouse-local +--- + # clickhouse-local {#clickhouse-local} Принимает на вход данные, которые можно представить в табличном виде и выполняет над ними операции, заданные на [языке запросов](../../operations/utilities/clickhouse-local.md) ClickHouse. diff --git a/docs/ru/operations/utilities/index.md b/docs/ru/operations/utilities/index.md index 91ee649ee3a..5b55ebd798d 100644 --- a/docs/ru/operations/utilities/index.md +++ b/docs/ru/operations/utilities/index.md @@ -1,3 +1,9 @@ +--- +toc_folder_title: "\u0423\u0442\u0438\u043b\u0438\u0442\u044b" +toc_priority: 56 +toc_title: "\u041e\u0431\u0437\u043e\u0440" +--- + # Утилиты ClickHouse {#utility-clickhouse} - [clickhouse-local](clickhouse-local.md) diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index ca65f6ac615..ef014906423 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -1,3 +1,9 @@ +--- +toc_priority: 37 +toc_title: "\u041a\u043e\u043c\u0431\u0438\u043d\u0430\u0442\u043e\u0440\u044b\u0020\u0430\u0433\u0440\u0435\u0433\u0430\u0442\u043d\u044b\u0445\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0439" +--- + + # Комбинаторы агрегатных функций {#aggregate_functions_combinators} К имени агрегатной функции может быть приписан некоторый суффикс. При этом, работа агрегатной функции некоторым образом модифицируется. diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index 7cc0bdef581..f20acaa45c3 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: "\u041f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u0438\u0447\u0435\u0441\u043a\u0438\u0435\u0020\u0430\u0433\u0440\u0435\u0433\u0430\u0442\u043d\u044b\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Параметрические агрегатные функции {#aggregate_functions_parametric} Некоторые агрегатные функции могут принимать не только столбцы-аргументы (по которым производится свёртка), но и набор параметров - констант для инициализации. Синтаксис - две пары круглых скобок вместо одной. Первая - для параметров, вторая - для аргументов. diff --git a/docs/ru/sql-reference/data-types/boolean.md b/docs/ru/sql-reference/data-types/boolean.md index f868ebf6d14..bb0cd50c739 100644 --- a/docs/ru/sql-reference/data-types/boolean.md +++ b/docs/ru/sql-reference/data-types/boolean.md @@ -1,3 +1,8 @@ +--- +toc_priority: 43 +toc_title: "\u0411\u0443\u043b\u0435\u0432\u044b\u0020\u0437\u043d\u0430\u0447\u0435\u043d\u0438\u044f" +--- + # Булевы значения {#bulevy-znacheniia} Отдельного типа для булевых значений нет. Для них используется тип UInt8, в котором используются только значения 0 и 1. diff --git a/docs/ru/sql-reference/data-types/date.md b/docs/ru/sql-reference/data-types/date.md index 97f58ec30f6..9bcae2c1d72 100644 --- a/docs/ru/sql-reference/data-types/date.md +++ b/docs/ru/sql-reference/data-types/date.md @@ -1,3 +1,8 @@ +--- +toc_priority: 47 +toc_title: Date +--- + # Date {#data-type-date} Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2106 года, последний полностью поддерживаемый год - 2105). diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index d60e81c74d5..87c5da68f35 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -1,3 +1,8 @@ +--- +toc_priority: 48 +toc_title: DateTime +--- + # DateTime {#data_type-datetime} Позволяет хранить момент времени, который может быть представлен как календарная дата и время. diff --git a/docs/ru/sql-reference/data-types/datetime64.md b/docs/ru/sql-reference/data-types/datetime64.md index bc9394276ee..0a602e44636 100644 --- a/docs/ru/sql-reference/data-types/datetime64.md +++ b/docs/ru/sql-reference/data-types/datetime64.md @@ -1,3 +1,8 @@ +--- +toc_priority: 49 +toc_title: DateTime64 +--- + # DateTime64 {#data_type-datetime64} Позволяет хранить момент времени, который может быть представлен как календарная дата и время, с заданной суб-секундной точностью. diff --git a/docs/ru/sql-reference/data-types/decimal.md b/docs/ru/sql-reference/data-types/decimal.md index 29192cb8819..bdcd3c767b9 100644 --- a/docs/ru/sql-reference/data-types/decimal.md +++ b/docs/ru/sql-reference/data-types/decimal.md @@ -1,3 +1,8 @@ +--- +toc_priority: 42 +toc_title: Decimal +--- + # Decimal(P, S), Decimal32(S), Decimal64(S), Decimal128(S), Decimal256(S) {#decimalp-s-decimal32s-decimal64s-decimal128s} Знаковые дробные числа с сохранением точности операций сложения, умножения и вычитания. Для деления осуществляется отбрасывание (не округление) знаков, не попадающих в младший десятичный разряд. diff --git a/docs/ru/sql-reference/data-types/domains/index.md b/docs/ru/sql-reference/data-types/domains/index.md index fe5c7ab7349..4449469b1bc 100644 --- a/docs/ru/sql-reference/data-types/domains/index.md +++ b/docs/ru/sql-reference/data-types/domains/index.md @@ -1,6 +1,6 @@ --- -toc_folder_title: Домены -toc_title_title: Обзор +toc_folder_title: "\u0414\u043e\u043c\u0435\u043d\u044b" +toc_title_title: "\u041e\u0431\u0437\u043e\u0440" toc_priority: 56 --- diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/domains/ipv4.md index 68b67bcca60..57d6f12ab17 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/domains/ipv4.md @@ -1,3 +1,8 @@ +--- +toc_priority: 59 +toc_title: IPv4 +--- + ## IPv4 {#ipv4} `IPv4` — это домен, базирующийся на типе данных `UInt32` предназначенный для хранения адресов IPv4. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы. diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/domains/ipv6.md index c88ee74adea..04c5fd0d491 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/domains/ipv6.md @@ -1,3 +1,8 @@ +--- +toc_priority: 60 +toc_title: IPv6 +--- + ## IPv6 {#ipv6} `IPv6` — это домен, базирующийся на типе данных `FixedString(16)`, предназначенный для хранения адресов IPv6. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы. diff --git a/docs/ru/sql-reference/data-types/enum.md b/docs/ru/sql-reference/data-types/enum.md index 792c82f7410..b86d15c19a8 100644 --- a/docs/ru/sql-reference/data-types/enum.md +++ b/docs/ru/sql-reference/data-types/enum.md @@ -1,3 +1,8 @@ +--- +toc_priority: 50 +toc_title: Enum +--- + # Enum {#enum} Перечисляемый тип данных, содержащий именованные значения. diff --git a/docs/ru/sql-reference/data-types/fixedstring.md b/docs/ru/sql-reference/data-types/fixedstring.md index 4a26b2f76af..21115418e30 100644 --- a/docs/ru/sql-reference/data-types/fixedstring.md +++ b/docs/ru/sql-reference/data-types/fixedstring.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: FixedString(N) +--- + # FixedString {#fixedstring} Строка фиксированной длины `N` байт (не символов, не кодовых точек). diff --git a/docs/ru/sql-reference/data-types/float.md b/docs/ru/sql-reference/data-types/float.md index 91d4b655e2a..f2e85f35041 100644 --- a/docs/ru/sql-reference/data-types/float.md +++ b/docs/ru/sql-reference/data-types/float.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: Float32, Float64 +--- + # Float32, Float64 {#float32-float64} [Числа с плавающей запятой](https://en.wikipedia.org/wiki/IEEE_754). diff --git a/docs/ru/sql-reference/data-types/int-uint.md b/docs/ru/sql-reference/data-types/int-uint.md index 3a33c95e4c3..c45c639aace 100644 --- a/docs/ru/sql-reference/data-types/int-uint.md +++ b/docs/ru/sql-reference/data-types/int-uint.md @@ -1,3 +1,8 @@ +--- +toc_priority: 40 +toc_title: UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 +--- + # UInt8, UInt16, UInt32, UInt64, UInt256, Int8, Int16, Int32, Int64, Int128, Int256 {#uint8-uint16-uint32-uint64-int8-int16-int32-int64} Целые числа фиксированной длины, без знака или со знаком. diff --git a/docs/ru/sql-reference/data-types/nested-data-structures/index.md b/docs/ru/sql-reference/data-types/nested-data-structures/index.md index 73d9019c96a..d53cabc6652 100644 --- a/docs/ru/sql-reference/data-types/nested-data-structures/index.md +++ b/docs/ru/sql-reference/data-types/nested-data-structures/index.md @@ -1,3 +1,10 @@ +--- +toc_folder_title: "\u0412\u043b\u043e\u0436\u0435\u043d\u043d\u044b\u0435\u0020\u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u044b\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +toc_hidden: true +toc_priority: 54 +toc_title: hidden +--- + # Вложенные структуры данных {#vlozhennye-struktury-dannykh} [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/nested_data_structures/) diff --git a/docs/ru/sql-reference/data-types/special-data-types/expression.md b/docs/ru/sql-reference/data-types/special-data-types/expression.md index 1f4b960fb10..718fcc886a6 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/expression.md +++ b/docs/ru/sql-reference/data-types/special-data-types/expression.md @@ -1,3 +1,8 @@ +--- +toc_priority: 58 +toc_title: Expression +--- + # Expression {#expression} Используется для представления лямбда-выражений в функциях высшего порядка. diff --git a/docs/ru/sql-reference/data-types/special-data-types/index.md b/docs/ru/sql-reference/data-types/special-data-types/index.md index d5aff1501db..29c057472ea 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/index.md +++ b/docs/ru/sql-reference/data-types/special-data-types/index.md @@ -1,3 +1,10 @@ +--- +toc_folder_title: "\u0421\u043b\u0443\u0436\u0435\u0431\u043d\u044b\u0435\u0020\u0442\u0438\u043f\u044b\u0020\u0434\u0430\u043d\u043d\u044b\u0445" +toc_hidden: true +toc_priority: 55 +toc_title: hidden +--- + # Служебные типы данных {#sluzhebnye-tipy-dannykh} Значения служебных типов данных не могут сохраняться в таблицу и выводиться в качестве результата, а возникают как промежуточный результат выполнения запроса. diff --git a/docs/ru/sql-reference/data-types/special-data-types/interval.md b/docs/ru/sql-reference/data-types/special-data-types/interval.md index a77d05ab8be..31240b49c97 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/interval.md +++ b/docs/ru/sql-reference/data-types/special-data-types/interval.md @@ -1,3 +1,8 @@ +--- +toc_priority: 61 +toc_title: Interval +--- + # Interval {#data-type-interval} Семейство типов данных, представляющих интервалы дат и времени. Оператор [INTERVAL](../../../sql-reference/data-types/special-data-types/interval.md#operator-interval) возвращает значения этих типов. diff --git a/docs/ru/sql-reference/data-types/special-data-types/nothing.md b/docs/ru/sql-reference/data-types/special-data-types/nothing.md index 9644f102522..c6a9cb868d8 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/nothing.md +++ b/docs/ru/sql-reference/data-types/special-data-types/nothing.md @@ -1,3 +1,8 @@ +--- +toc_priority: 60 +toc_title: Nothing +--- + # Nothing {#nothing} Этот тип данных предназначен только для того, чтобы представлять [NULL](../../../sql-reference/data-types/special-data-types/nothing.md), т.е. отсутствие значения. diff --git a/docs/ru/sql-reference/data-types/special-data-types/set.md b/docs/ru/sql-reference/data-types/special-data-types/set.md index 9a99ed0ca9b..4c2f4ed2c66 100644 --- a/docs/ru/sql-reference/data-types/special-data-types/set.md +++ b/docs/ru/sql-reference/data-types/special-data-types/set.md @@ -1,3 +1,8 @@ +--- +toc_priority: 59 +toc_title: Set +--- + # Set {#set} Используется для представления правой части выражения IN. diff --git a/docs/ru/sql-reference/data-types/string.md b/docs/ru/sql-reference/data-types/string.md index 4669d154df2..798caec4d62 100644 --- a/docs/ru/sql-reference/data-types/string.md +++ b/docs/ru/sql-reference/data-types/string.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: String +--- + # String {#string} Строки произвольной длины. Длина не ограничена. Значение может содержать произвольный набор байт, включая нулевые байты. diff --git a/docs/ru/sql-reference/data-types/uuid.md b/docs/ru/sql-reference/data-types/uuid.md index 9d667a50526..b780190f6f4 100644 --- a/docs/ru/sql-reference/data-types/uuid.md +++ b/docs/ru/sql-reference/data-types/uuid.md @@ -1,3 +1,9 @@ +--- +toc_priority: 46 +toc_title: UUID +--- + + # UUID {#uuid-data-type} Универсальный уникальный идентификатор (UUID) - это 16-байтовое число, используемое для идентификации записей. Подробнее про UUID читайте на [Википедии](https://en.wikipedia.org/wiki/Universally_unique_identifier). diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md index 0869b409b0b..350e391dbed 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: "\u0418\u0435\u0440\u0430\u0440\u0445\u0438\u0447\u0435\u0441\u043a\u0438\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0438" +--- + # Иерархические словари {#ierarkhicheskie-slovari} ClickHouse поддерживает иерархические словари с [числовыми ключом](external-dicts-dict-structure.md#ext_dict-numeric-key). diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md index 61275cc6652..979f1f2e5b9 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: "\u0425\u0440\u0430\u043d\u0435\u043d\u0438\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0435\u0439\u0020\u0432\u0020\u043f\u0430\u043c\u044f\u0442\u0438" +--- + # Хранение словарей в памяти {#dicts-external-dicts-dict-layout} Словари можно размещать в памяти множеством способов. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md index 2f287795296..4dbf4be9f96 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md @@ -1,3 +1,8 @@ +--- +toc_priority: 42 +toc_title: "\u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0435\u0439" +--- + # Обновление словарей {#obnovlenie-slovarei} ClickHouse периодически обновляет словари. Интервал обновления для полностью загружаемых словарей и интервал инвалидации для кэшируемых словарей определяется в теге `` в секундах. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 61d98f37cfd..0015edfdf72 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -1,3 +1,8 @@ +--- +toc_priority: 43 +toc_title: "\u0418\u0441\u0442\u043e\u0447\u043d\u0438\u043a\u0438\u0020\u0432\u043d\u0435\u0448\u043d\u0438\u0445\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0435\u0439" +--- + # Источники внешних словарей {#dicts-external-dicts-dict-sources} Внешний словарь можно подключить из множества источников. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md index 4c3b4eb22e4..bf87ce61b9e 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: "\u041a\u043b\u044e\u0447\u0020\u0438\u0020\u043f\u043e\u043b\u044f\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u044f" +--- + # Ключ и поля словаря {#kliuch-i-polia-slovaria} Секция `` описывает ключ словаря и поля, доступные для запросов. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md index 4ecc9b6c093..ff18f906926 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md @@ -1,3 +1,8 @@ +--- +toc_priority: 40 +toc_title: "\u041d\u0430\u0441\u0442\u0440\u043e\u0439\u043a\u0430\u0020\u0432\u043d\u0435\u0448\u043d\u0435\u0433\u043e\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u044f" +--- + # Настройка внешнего словаря {#dicts-external-dicts-dict} XML-конфигурация словаря имеет следующую структуру: diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md index 734de8cffdf..c18af68c15e 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts.md @@ -1,3 +1,9 @@ +--- +toc_priority: 39 +toc_title: "\u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0438" +--- + + # Внешние словари {#dicts-external-dicts} Существует возможность подключать собственные словари из различных источников данных. Источником данных для словаря может быть локальный текстовый/исполняемый файл, HTTP(s) ресурс или другая СУБД. Подробнее смотрите в разделе «[Источники внешних словарей](external-dicts-dict-sources.md)». diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md index 4098ac38060..b448858b1fa 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/index.md @@ -1,5 +1,5 @@ --- -toc_folder_title: External Dictionaries +toc_folder_title: "\u0412\u043d\u0435\u0448\u043d\u0438\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0438" toc_priority: 37 --- diff --git a/docs/ru/sql-reference/dictionaries/index.md b/docs/ru/sql-reference/dictionaries/index.md index 8625a29b1e1..5a4119b4dd5 100644 --- a/docs/ru/sql-reference/dictionaries/index.md +++ b/docs/ru/sql-reference/dictionaries/index.md @@ -1,3 +1,9 @@ +--- +toc_folder_title: "\u0421\u043b\u043e\u0432\u0430\u0440\u0438" +toc_priority: 35 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" +--- + # Словари {#slovari} Словарь — это отображение (`ключ -> атрибуты`), которое удобно использовать для различного вида справочников. diff --git a/docs/ru/sql-reference/dictionaries/internal-dicts.md b/docs/ru/sql-reference/dictionaries/internal-dicts.md index 0e8e7c82a6e..d8103efa6ae 100644 --- a/docs/ru/sql-reference/dictionaries/internal-dicts.md +++ b/docs/ru/sql-reference/dictionaries/internal-dicts.md @@ -1,3 +1,8 @@ +--- +toc_priority: 39 +toc_title: "\u0412\u0441\u0442\u0440\u043e\u0435\u043d\u043d\u044b\u0435\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u0438" +--- + # Встроенные словари {#internal_dicts} ClickHouse содержит встроенную возможность работы с геобазой. diff --git a/docs/ru/sql-reference/distributed-ddl.md b/docs/ru/sql-reference/distributed-ddl.md index a06a931680a..275709320f6 100644 --- a/docs/ru/sql-reference/distributed-ddl.md +++ b/docs/ru/sql-reference/distributed-ddl.md @@ -1,6 +1,6 @@ --- toc_priority: 32 -toc_title: Распределенные DDL запросы +toc_title: "\u0420\u0430\u0441\u043f\u0440\u0435\u0434\u0435\u043b\u0435\u043d\u043d\u044b\u0435\u0020\u0044\u0044\u004c\u0020\u0437\u0430\u043f\u0440\u043e\u0441\u044b\u000a" --- # Распределенные DDL запросы (секция ON CLUSTER) {#raspredelennye-ddl-zaprosy-sektsiia-on-cluster} diff --git a/docs/ru/sql-reference/functions/arithmetic-functions.md b/docs/ru/sql-reference/functions/arithmetic-functions.md index 8513737f025..16c3e8fd8f0 100644 --- a/docs/ru/sql-reference/functions/arithmetic-functions.md +++ b/docs/ru/sql-reference/functions/arithmetic-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 34 +toc_title: "\u0410\u0440\u0438\u0444\u043c\u0435\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Арифметические функции {#arifmeticheskie-funktsii} Для всех арифметических функций, тип результата вычисляется, как минимальный числовой тип, который может вместить результат, если такой тип есть. Минимум берётся одновременно по числу бит, знаковости и «плавучести». Если бит не хватает, то берётся тип максимальной битности. diff --git a/docs/ru/sql-reference/functions/array-functions.md b/docs/ru/sql-reference/functions/array-functions.md index 91c0443c85d..62181eebb4b 100644 --- a/docs/ru/sql-reference/functions/array-functions.md +++ b/docs/ru/sql-reference/functions/array-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 35 +toc_title: "\u041c\u0430\u0441\u0441\u0438\u0432\u044b" +--- + # Массивы {#functions-for-working-with-arrays} ## empty {#function-empty} diff --git a/docs/ru/sql-reference/functions/array-join.md b/docs/ru/sql-reference/functions/array-join.md index afbc7bfe3e8..2ed3d25fa92 100644 --- a/docs/ru/sql-reference/functions/array-join.md +++ b/docs/ru/sql-reference/functions/array-join.md @@ -1,3 +1,8 @@ +--- +toc_priority: 61 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u044f\u0020\u0041\u0072\u0072\u0061\u0079\u004a\u006f\u0069\u006e" +--- + # Функция ArrayJoin {#functions_arrayjoin} Это совсем необычная функция. diff --git a/docs/ru/sql-reference/functions/bit-functions.md b/docs/ru/sql-reference/functions/bit-functions.md index 9d78dd861e9..8c7808437a5 100644 --- a/docs/ru/sql-reference/functions/bit-functions.md +++ b/docs/ru/sql-reference/functions/bit-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 48 +toc_title: "\u0411\u0438\u0442\u043e\u0432\u044b\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Битовые функции {#bitovye-funktsii} Битовые функции работают для любой пары типов из UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64. diff --git a/docs/ru/sql-reference/functions/bitmap-functions.md b/docs/ru/sql-reference/functions/bitmap-functions.md index aa154831b48..b21ddea94e4 100644 --- a/docs/ru/sql-reference/functions/bitmap-functions.md +++ b/docs/ru/sql-reference/functions/bitmap-functions.md @@ -1,4 +1,9 @@ -# Функции для битмапов {#bitmap-functions} +--- +toc_priority: 49 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0431\u0438\u0442\u043c\u0430\u043f\u043e\u0432" +--- + +# Функции для битовых масок {#bitmap-functions} ## bitmapBuild {#bitmap_functions-bitmapbuild} diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md index 9b921d6fe55..a98c97ec96c 100644 --- a/docs/ru/sql-reference/functions/comparison-functions.md +++ b/docs/ru/sql-reference/functions/comparison-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 36 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0441\u0440\u0430\u0432\u043d\u0435\u043d\u0438\u044f" +--- + # Функции сравнения {#funktsii-sravneniia} Функции сравнения возвращают всегда 0 или 1 (UInt8). diff --git a/docs/ru/sql-reference/functions/conditional-functions.md b/docs/ru/sql-reference/functions/conditional-functions.md index 7efb6f7bfc5..83268b68959 100644 --- a/docs/ru/sql-reference/functions/conditional-functions.md +++ b/docs/ru/sql-reference/functions/conditional-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 43 +toc_title: "\u0423\u0441\u043b\u043e\u0432\u043d\u044b\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Условные функции {#uslovnye-funktsii} ## if {#if} diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md index aa9fdee478d..d24de2faae1 100644 --- a/docs/ru/sql-reference/functions/date-time-functions.md +++ b/docs/ru/sql-reference/functions/date-time-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 39 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0434\u0430\u0442\u0430\u043c\u0438\u0020\u0438\u0020\u0432\u0440\u0435\u043c\u0435\u043d\u0435\u043c" +--- + # Функции для работы с датами и временем {#funktsii-dlia-raboty-s-datami-i-vremenem} Поддержка часовых поясов diff --git a/docs/ru/sql-reference/functions/encoding-functions.md b/docs/ru/sql-reference/functions/encoding-functions.md index b8ea481364d..6f1c2aad6cb 100644 --- a/docs/ru/sql-reference/functions/encoding-functions.md +++ b/docs/ru/sql-reference/functions/encoding-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 52 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043a\u043e\u0434\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f" +--- + # Функции кодирования {#funktsii-kodirovaniia} ## char {#char} diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index 792afd1775d..6054ed141d4 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 58 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0432\u043d\u0435\u0448\u043d\u0438\u043c\u0438\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u044f\u043c\u0438" +--- + # Функции для работы с внешними словарями {#ext_dict_functions} Информацию о подключении и настройке внешних словарей смотрите в разделе [Внешние словари](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md). diff --git a/docs/ru/sql-reference/functions/functions-for-nulls.md b/docs/ru/sql-reference/functions/functions-for-nulls.md index 5fbfb4bd27b..17da1ea9194 100644 --- a/docs/ru/sql-reference/functions/functions-for-nulls.md +++ b/docs/ru/sql-reference/functions/functions-for-nulls.md @@ -1,3 +1,8 @@ +--- +toc_priority: 63 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u004e\u0075\u006c\u006c\u0061\u0062\u006c\u0065\u002d\u0430\u0440\u0433\u0443\u043c\u0435\u043d\u0442\u0430\u043c\u0438" +--- + # Функции для работы с Nullable-аргументами {#funktsii-dlia-raboty-s-nullable-argumentami} ## isNull {#isnull} diff --git a/docs/ru/sql-reference/functions/geo/coordinates.md b/docs/ru/sql-reference/functions/geo/coordinates.md index 9d65139e69e..1931a9b932f 100644 --- a/docs/ru/sql-reference/functions/geo/coordinates.md +++ b/docs/ru/sql-reference/functions/geo/coordinates.md @@ -1,5 +1,6 @@ --- -toc_title: Функции для работы с географическими координатами +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0433\u0435\u043e\u0433\u0440\u0430\u0444\u0438\u0447\u0435\u0441\u043a\u0438\u043c\u0438\u0020\u043a\u043e\u043e\u0440\u0434\u0438\u043d\u0430\u0442\u0430\u043c\u0438" +toc_priority: 62 --- # Функции для работы с географическими координатами {#geographical-coordinates} diff --git a/docs/ru/sql-reference/functions/geo/geohash.md b/docs/ru/sql-reference/functions/geo/geohash.md index 7273d58b1d9..38c64f11b10 100644 --- a/docs/ru/sql-reference/functions/geo/geohash.md +++ b/docs/ru/sql-reference/functions/geo/geohash.md @@ -1,5 +1,5 @@ --- -toc_title: Geohash +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0441\u0438\u0441\u0442\u0435\u043c\u043e\u0439\u0020\u0047\u0065\u006f\u0068\u0061\u0073\u0068" --- # Функции для работы с системой Geohash {#geohash} diff --git a/docs/ru/sql-reference/functions/geo/h3.md b/docs/ru/sql-reference/functions/geo/h3.md index d9a5e72721b..69d06b5dfa6 100644 --- a/docs/ru/sql-reference/functions/geo/h3.md +++ b/docs/ru/sql-reference/functions/geo/h3.md @@ -1,5 +1,5 @@ --- -toc_title: Индексы H3 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0438\u043d\u0434\u0435\u043a\u0441\u0430\u043c\u0438\u0020\u0048\u0033" --- # Функции для работы с индексами H3 {#h3index} diff --git a/docs/ru/sql-reference/functions/geo/index.md b/docs/ru/sql-reference/functions/geo/index.md index bfea32a245b..cedaafaa31d 100644 --- a/docs/ru/sql-reference/functions/geo/index.md +++ b/docs/ru/sql-reference/functions/geo/index.md @@ -1,6 +1,6 @@ --- toc_priority: 62 -toc_folder_title: Гео-данные +toc_folder_title: "\u0413\u0435\u043e\u002d\u0434\u0430\u043d\u043d\u044b\u0435" toc_title: hidden --- diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 201fc934cea..92fc69227f4 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 50 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0445\u044d\u0448\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f" +--- + # Функции хэширования {#funktsii-kheshirovaniia} Функции хэширования могут использоваться для детерминированного псевдослучайного разбрасывания элементов. diff --git a/docs/ru/sql-reference/functions/in-functions.md b/docs/ru/sql-reference/functions/in-functions.md index 679fcbccc21..e137187a36b 100644 --- a/docs/ru/sql-reference/functions/in-functions.md +++ b/docs/ru/sql-reference/functions/in-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 60 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0435\u0430\u043b\u0438\u0437\u0430\u0446\u0438\u0438\u0020\u043e\u043f\u0435\u0440\u0430\u0442\u043e\u0440\u0430\u0020\u0049\u004e" +--- + # Функции для реализации оператора IN {#funktsii-dlia-realizatsii-operatora-in} ## in, notIn, globalIn, globalNotIn {#in-functions} diff --git a/docs/ru/sql-reference/functions/index.md b/docs/ru/sql-reference/functions/index.md index 9c1c0c5ca9d..25d3b6de067 100644 --- a/docs/ru/sql-reference/functions/index.md +++ b/docs/ru/sql-reference/functions/index.md @@ -1,3 +1,9 @@ +--- +toc_folder_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438" +toc_priority: 32 +toc_title: "\u0412\u0432\u0435\u0434\u0435\u043d\u0438\u0435" +--- + # Функции {#funktsii} Функции бывают как минимум\* двух видов - обычные функции (называются просто, функциями) и агрегатные функции. Это совершенно разные вещи. Обычные функции работают так, как будто применяются к каждой строке по отдельности (для каждой строки, результат вычисления функции не зависит от других строк). Агрегатные функции аккумулируют множество значений из разных строк (то есть, зависят от целого множества строк). diff --git a/docs/ru/sql-reference/functions/introspection.md b/docs/ru/sql-reference/functions/introspection.md index c09efd35093..9f4f2ebd1e9 100644 --- a/docs/ru/sql-reference/functions/introspection.md +++ b/docs/ru/sql-reference/functions/introspection.md @@ -1,3 +1,8 @@ +--- +toc_priority: 65 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0438\u043d\u0442\u0440\u043e\u0441\u043f\u0435\u043a\u0446\u0438\u0438" +--- + # Функции интроспекции {#introspection-functions} Функции из этого раздела могут использоваться для интроспекции [ELF](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) и [DWARF](https://en.wikipedia.org/wiki/DWARF) в целях профилирования запросов. diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index a9a0a7f919a..724fb97c0d5 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 55 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0049\u0050\u002d\u0430\u0434\u0440\u0435\u0441\u0430\u043c\u0438" +--- + # Функции для работы с IP-адресами {#funktsii-dlia-raboty-s-ip-adresami} ## IPv4NumToString(num) {#ipv4numtostringnum} diff --git a/docs/ru/sql-reference/functions/json-functions.md b/docs/ru/sql-reference/functions/json-functions.md index 752b70b7c5f..69b8f8f98f5 100644 --- a/docs/ru/sql-reference/functions/json-functions.md +++ b/docs/ru/sql-reference/functions/json-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 56 +toc_title: JSON +--- + # Функции для работы с JSON {#funktsii-dlia-raboty-s-json} В Яндекс.Метрике пользователями передаётся JSON в качестве параметров визитов. Для работы с таким JSON-ом, реализованы некоторые функции. (Хотя в большинстве случаев, JSON-ы дополнительно обрабатываются заранее, и полученные значения кладутся в отдельные столбцы в уже обработанном виде.) Все эти функции исходят из сильных допущений о том, каким может быть JSON, и при этом стараются почти ничего не делать. diff --git a/docs/ru/sql-reference/functions/logical-functions.md b/docs/ru/sql-reference/functions/logical-functions.md index de2a3d51729..9b1ee6a66a7 100644 --- a/docs/ru/sql-reference/functions/logical-functions.md +++ b/docs/ru/sql-reference/functions/logical-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 37 +toc_title: "\u041b\u043e\u0433\u0438\u0447\u0435\u0441\u043a\u0438\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Логические функции {#logicheskie-funktsii} Логические функции принимают любые числовые типы, а возвращают число типа UInt8, равное 0 или 1. diff --git a/docs/ru/sql-reference/functions/machine-learning-functions.md b/docs/ru/sql-reference/functions/machine-learning-functions.md index e9b0e8c9bc9..2ffdfd05613 100644 --- a/docs/ru/sql-reference/functions/machine-learning-functions.md +++ b/docs/ru/sql-reference/functions/machine-learning-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 64 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043c\u0430\u0448\u0438\u043d\u043d\u043e\u0433\u043e\u0020\u043e\u0431\u0443\u0447\u0435\u043d\u0438\u044f" +--- + # Функции машинного обучения {#funktsii-mashinnogo-obucheniia} ## evalMLMethod (prediction) {#machine_learning_methods-evalmlmethod} diff --git a/docs/ru/sql-reference/functions/math-functions.md b/docs/ru/sql-reference/functions/math-functions.md index e52b14d26c6..6df366d129f 100644 --- a/docs/ru/sql-reference/functions/math-functions.md +++ b/docs/ru/sql-reference/functions/math-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: "\u041c\u0430\u0442\u0435\u043c\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Математические функции {#matematicheskie-funktsii} Все функции возвращают число типа Float64. Точность результата близка к максимально возможной, но результат может не совпадать с наиболее близким к соответствующему вещественному числу машинно представимым числом. diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index bb5371ff449..9367f3be00c 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 66 +toc_title: "\u041f\u0440\u043e\u0447\u0438\u0435\u0020\u0444\u0443\u043d\u043a\u0446\u0438\u0438" +--- + # Прочие функции {#other-functions} ## hostName() {#hostname} @@ -1468,7 +1473,7 @@ SELECT getSetting('custom_a'); ## isDecimalOverflow {#is-decimal-overflow} -Проверяет, находится ли число [Decimal](../../sql-reference/data-types/decimal.md#decimalp-s-decimal32s-decimal64s-decimal128s) вне собственной (или заданной) области значений. +Проверяет, находится ли число [Decimal](../../sql-reference/data-types/decimal.md) вне собственной (или заданной) области значений. **Синтаксис** @@ -1478,7 +1483,7 @@ isDecimalOverflow(d, [p]) **Параметры** -- `d` — число. [Decimal](../../sql-reference/data-types/decimal.md#decimalp-s-decimal32s-decimal64s-decimal128s). +- `d` — число. [Decimal](../../sql-reference/data-types/decimal.md). - `p` — точность. Необязательный параметр. Если опущен, используется исходная точность первого аргумента. Использование этого параметра может быть полезно для извлечения данных в другую СУБД или файл. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). **Возвращаемое значение** @@ -1515,7 +1520,7 @@ countDigits(x) **Параметры** -- `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md#decimalp-s-decimal32s-decimal64s-decimal128s) число. +- `x` — [целое](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64) или [дробное](../../sql-reference/data-types/decimal.md) число. **Возвращаемое значение** diff --git a/docs/ru/sql-reference/functions/random-functions.md b/docs/ru/sql-reference/functions/random-functions.md index 21dcfeeb3c0..f3889504fa6 100644 --- a/docs/ru/sql-reference/functions/random-functions.md +++ b/docs/ru/sql-reference/functions/random-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 51 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0433\u0435\u043d\u0435\u0440\u0430\u0446\u0438\u0438\u0020\u043f\u0441\u0435\u0432\u0434\u043e\u0441\u043b\u0443\u0447\u0430\u0439\u043d\u044b\u0445\u0020\u0447\u0438\u0441\u0435\u043b" +--- + # Функции генерации псевдослучайных чисел {#functions-for-generating-pseudo-random-numbers} Используются не криптографические генераторы псевдослучайных чисел. diff --git a/docs/ru/sql-reference/functions/rounding-functions.md b/docs/ru/sql-reference/functions/rounding-functions.md index c643bccf3e2..78033160396 100644 --- a/docs/ru/sql-reference/functions/rounding-functions.md +++ b/docs/ru/sql-reference/functions/rounding-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043e\u043a\u0440\u0443\u0433\u043b\u0435\u043d\u0438\u044f" +--- + # Функции округления {#funktsii-okrugleniia} ## floor(x\[, N\]) {#floorx-n} diff --git a/docs/ru/sql-reference/functions/splitting-merging-functions.md b/docs/ru/sql-reference/functions/splitting-merging-functions.md index bf4e76c3bb1..d451eabc407 100644 --- a/docs/ru/sql-reference/functions/splitting-merging-functions.md +++ b/docs/ru/sql-reference/functions/splitting-merging-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 47 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0440\u0430\u0437\u0431\u0438\u0435\u043d\u0438\u044f\u0020\u0438\u0020\u0441\u043b\u0438\u044f\u043d\u0438\u044f\u0020\u0441\u0442\u0440\u043e\u043a\u0020\u0438\u0020\u043c\u0430\u0441\u0441\u0438\u0432\u043e\u0432" +--- + # Функции разбиения и слияния строк и массивов {#funktsii-razbieniia-i-sliianiia-strok-i-massivov} ## splitByChar(separator, s) {#splitbycharseparator-s} diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md index 58ed582b399..cc488fb2d9c 100644 --- a/docs/ru/sql-reference/functions/string-functions.md +++ b/docs/ru/sql-reference/functions/string-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 40 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u043e\u0020\u0441\u0442\u0440\u043e\u043a\u0430\u043c\u0438" +--- + # Функции для работы со строками {#funktsii-dlia-raboty-so-strokami} ## empty {#empty} diff --git a/docs/ru/sql-reference/functions/string-replace-functions.md b/docs/ru/sql-reference/functions/string-replace-functions.md index 8a2db9bf472..f334d6804f9 100644 --- a/docs/ru/sql-reference/functions/string-replace-functions.md +++ b/docs/ru/sql-reference/functions/string-replace-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 42 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043f\u043e\u0438\u0441\u043a\u0430\u0020\u0438\u0020\u0437\u0430\u043c\u0435\u043d\u044b\u0020\u0432\u0020\u0441\u0442\u0440\u043e\u043a\u0430\u0445" +--- + # Функции поиска и замены в строках {#funktsii-poiska-i-zameny-v-strokakh} ## replaceOne(haystack, pattern, replacement) {#replaceonehaystack-pattern-replacement} diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index 078a09a8aa4..d2f1119783b 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043f\u043e\u0438\u0441\u043a\u0430\u0020\u0432\u0020\u0441\u0442\u0440\u043e\u043a\u0430\u0445" +--- + # Функции поиска в строках {#funktsii-poiska-v-strokakh} Во всех функциях, поиск регистрозависимый по умолчанию. Существуют варианты функций для регистронезависимого поиска. diff --git a/docs/ru/sql-reference/functions/type-conversion-functions.md b/docs/ru/sql-reference/functions/type-conversion-functions.md index 3bee1a3656f..773850b65ce 100644 --- a/docs/ru/sql-reference/functions/type-conversion-functions.md +++ b/docs/ru/sql-reference/functions/type-conversion-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u043f\u0440\u0435\u043e\u0431\u0440\u0430\u0437\u043e\u0432\u0430\u043d\u0438\u044f\u0020\u0442\u0438\u043f\u043e\u0432" +--- + # Функции преобразования типов {#funktsii-preobrazovaniia-tipov} ## Общие проблемы преобразования чисел {#numeric-conversion-issues} diff --git a/docs/ru/sql-reference/functions/url-functions.md b/docs/ru/sql-reference/functions/url-functions.md index 54b752d7c68..8f10a1ebd2b 100644 --- a/docs/ru/sql-reference/functions/url-functions.md +++ b/docs/ru/sql-reference/functions/url-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 54 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0055\u0052\u004c" +--- + # Функции для работы с URL {#funktsii-dlia-raboty-s-url} Все функции работают не по RFC - то есть, максимально упрощены ради производительности. diff --git a/docs/ru/sql-reference/functions/uuid-functions.md b/docs/ru/sql-reference/functions/uuid-functions.md index b2b567c6a06..389ce751ce0 100644 --- a/docs/ru/sql-reference/functions/uuid-functions.md +++ b/docs/ru/sql-reference/functions/uuid-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 53 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u0020\u0055\u0055\u0049\u0044" +--- + # Функции для работы с UUID {#funktsii-dlia-raboty-s-uuid} ## generateUUIDv4 {#uuid-function-generate} diff --git a/docs/ru/sql-reference/functions/ym-dict-functions.md b/docs/ru/sql-reference/functions/ym-dict-functions.md index 7ac27c0d285..c3b04e4ab66 100644 --- a/docs/ru/sql-reference/functions/ym-dict-functions.md +++ b/docs/ru/sql-reference/functions/ym-dict-functions.md @@ -1,3 +1,8 @@ +--- +toc_priority: 59 +toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438\u0020\u0434\u043b\u044f\u0020\u0440\u0430\u0431\u043e\u0442\u044b\u0020\u0441\u043e\u0020\u0441\u043b\u043e\u0432\u0430\u0440\u044f\u043c\u0438\u0020\u042f\u043d\u0434\u0435\u043a\u0441\u002e\u041c\u0435\u0442\u0440\u0438\u043a\u0438" +--- + # Функции для работы со словарями Яндекс.Метрики {#ym-dict-functions} Чтобы указанные ниже функции работали, в конфиге сервера должны быть указаны пути и адреса для получения всех словарей Яндекс.Метрики. Словари загружаются при первом вызове любой из этих функций. Если справочники не удаётся загрузить - будет выкинуто исключение. diff --git a/docs/ru/sql-reference/operators/index.md b/docs/ru/sql-reference/operators/index.md index 08594193d4c..3befb18687d 100644 --- a/docs/ru/sql-reference/operators/index.md +++ b/docs/ru/sql-reference/operators/index.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: "\u041e\u043f\u0435\u0440\u0430\u0442\u043e\u0440\u044b" +--- + # Операторы {#operatory} Все операторы преобразуются в соответствующие функции на этапе парсинга запроса, с учётом их приоритетов и ассоциативности. diff --git a/docs/ru/sql-reference/statements/alter/column.md b/docs/ru/sql-reference/statements/alter/column.md index 811539d60d3..a8fe0ccf642 100644 --- a/docs/ru/sql-reference/statements/alter/column.md +++ b/docs/ru/sql-reference/statements/alter/column.md @@ -1,6 +1,6 @@ --- toc_priority: 37 -toc_title: COLUMN +toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u0020\u0441\u043e\u0020\u0441\u0442\u043e\u043b\u0431\u0446\u0430\u043c\u0438" --- # Манипуляции со столбцами {#manipuliatsii-so-stolbtsami} diff --git a/docs/ru/sql-reference/statements/alter/constraint.md b/docs/ru/sql-reference/statements/alter/constraint.md index bacdff9ff57..e26db208493 100644 --- a/docs/ru/sql-reference/statements/alter/constraint.md +++ b/docs/ru/sql-reference/statements/alter/constraint.md @@ -1,6 +1,6 @@ --- toc_priority: 43 -toc_title: CONSTRAINT +toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u0020\u0441\u0020\u043e\u0433\u0440\u0430\u043d\u0438\u0447\u0435\u043d\u0438\u044f\u043c\u0438" --- # Манипуляции с ограничениями (constraints) {#manipuliatsii-s-ogranicheniiami-constraints} diff --git a/docs/ru/sql-reference/statements/alter/index.md b/docs/ru/sql-reference/statements/alter/index.md index 035be934eb4..830c4a5745b 100644 --- a/docs/ru/sql-reference/statements/alter/index.md +++ b/docs/ru/sql-reference/statements/alter/index.md @@ -1,5 +1,5 @@ --- -toc_priority: 36 +toc_priority: 35 toc_title: ALTER --- diff --git a/docs/ru/sql-reference/statements/alter/index/index.md b/docs/ru/sql-reference/statements/alter/index/index.md index 863f8b875dd..2cadbbe065e 100644 --- a/docs/ru/sql-reference/statements/alter/index/index.md +++ b/docs/ru/sql-reference/statements/alter/index/index.md @@ -1,7 +1,7 @@ --- toc_hidden_folder: true toc_priority: 42 -toc_title: INDEX +toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u0020\u0441\u0020\u0438\u043d\u0434\u0435\u043a\u0441\u0430\u043c\u0438" --- # Манипуляции с индексами {#manipuliatsii-s-indeksami} diff --git a/docs/ru/sql-reference/statements/attach.md b/docs/ru/sql-reference/statements/attach.md index 2aaf133130b..259ab893e63 100644 --- a/docs/ru/sql-reference/statements/attach.md +++ b/docs/ru/sql-reference/statements/attach.md @@ -1,5 +1,5 @@ --- -toc_priority: 42 +toc_priority: 40 toc_title: ATTACH --- diff --git a/docs/ru/sql-reference/statements/check-table.md b/docs/ru/sql-reference/statements/check-table.md index fa37b24851f..3dc135d87c6 100644 --- a/docs/ru/sql-reference/statements/check-table.md +++ b/docs/ru/sql-reference/statements/check-table.md @@ -1,5 +1,5 @@ --- -toc_priority: 43 +toc_priority: 41 toc_title: CHECK --- diff --git a/docs/ru/sql-reference/statements/create/database.md b/docs/ru/sql-reference/statements/create/database.md index a6c8b81c8b8..e6c561f8e0b 100644 --- a/docs/ru/sql-reference/statements/create/database.md +++ b/docs/ru/sql-reference/statements/create/database.md @@ -1,6 +1,6 @@ --- -toc_priority: 1 -toc_title: База данных +toc_priority: 35 +toc_title: "\u0411\u0430\u0437\u0430\u0020\u0434\u0430\u043d\u043d\u044b\u0445" --- # CREATE DATABASE {#query-language-create-database} diff --git a/docs/ru/sql-reference/statements/create/dictionary.md b/docs/ru/sql-reference/statements/create/dictionary.md index da9443e1b3a..a20dc812e02 100644 --- a/docs/ru/sql-reference/statements/create/dictionary.md +++ b/docs/ru/sql-reference/statements/create/dictionary.md @@ -1,6 +1,6 @@ --- -toc_priority: 4 -toc_title: Словарь +toc_priority: 38 +toc_title: "\u0421\u043b\u043e\u0432\u0430\u0440\u044c" --- # CREATE DICTIONARY {#create-dictionary-query} diff --git a/docs/ru/sql-reference/statements/create/index.md b/docs/ru/sql-reference/statements/create/index.md index f6399cfba11..28ddce2afe3 100644 --- a/docs/ru/sql-reference/statements/create/index.md +++ b/docs/ru/sql-reference/statements/create/index.md @@ -1,7 +1,7 @@ --- toc_folder_title: CREATE -toc_priority: 35 -toc_title: Обзор +toc_priority: 34 +toc_title: "\u041e\u0431\u0437\u043e\u0440" --- # Запросы CREATE {#create-queries} diff --git a/docs/ru/sql-reference/statements/create/quota.md b/docs/ru/sql-reference/statements/create/quota.md index 6351de2d38a..fe18869bf2e 100644 --- a/docs/ru/sql-reference/statements/create/quota.md +++ b/docs/ru/sql-reference/statements/create/quota.md @@ -1,6 +1,6 @@ --- -toc_priority: 8 -toc_title: Квота +toc_priority: 42 +toc_title: "\u041a\u0432\u043e\u0442\u0430" --- # CREATE QUOTA {#create-quota-statement} diff --git a/docs/ru/sql-reference/statements/create/role.md b/docs/ru/sql-reference/statements/create/role.md index b8c0fc2b453..b9e529fb213 100644 --- a/docs/ru/sql-reference/statements/create/role.md +++ b/docs/ru/sql-reference/statements/create/role.md @@ -1,6 +1,6 @@ --- -toc_priority: 6 -toc_title: Роль +toc_priority: 40 +toc_title: "\u0420\u043e\u043b\u044c" --- # CREATE ROLE {#create-role-statement} diff --git a/docs/ru/sql-reference/statements/create/row-policy.md b/docs/ru/sql-reference/statements/create/row-policy.md index 7ec28761452..a62e275a046 100644 --- a/docs/ru/sql-reference/statements/create/row-policy.md +++ b/docs/ru/sql-reference/statements/create/row-policy.md @@ -1,6 +1,6 @@ --- -toc_priority: 7 -toc_title: Политика доступа +toc_priority: 41 +toc_title: "\u041f\u043e\u043b\u0438\u0442\u0438\u043a\u0430\u0020\u0434\u043e\u0441\u0442\u0443\u043f\u0430" --- # CREATE ROW POLICY {#create-row-policy-statement} diff --git a/docs/ru/sql-reference/statements/create/settings-profile.md b/docs/ru/sql-reference/statements/create/settings-profile.md index 70c90b53565..9d525023af2 100644 --- a/docs/ru/sql-reference/statements/create/settings-profile.md +++ b/docs/ru/sql-reference/statements/create/settings-profile.md @@ -1,6 +1,6 @@ --- -toc_priority: 9 -toc_title: Профиль настроек +toc_priority: 43 +toc_title: "\u041f\u0440\u043e\u0444\u0438\u043b\u044c\u0020\u043d\u0430\u0441\u0442\u0440\u043e\u0435\u043a" --- # CREATE SETTINGS PROFILE {#create-settings-profile-statement} diff --git a/docs/ru/sql-reference/statements/create/table.md b/docs/ru/sql-reference/statements/create/table.md index c6093bbc6de..d54ec189a1a 100644 --- a/docs/ru/sql-reference/statements/create/table.md +++ b/docs/ru/sql-reference/statements/create/table.md @@ -1,6 +1,6 @@ --- -toc_priority: 2 -toc_title: Таблица +toc_priority: 36 +toc_title: "\u0422\u0430\u0431\u043b\u0438\u0446\u0430" --- # CREATE TABLE {#create-table-query} diff --git a/docs/ru/sql-reference/statements/create/user.md b/docs/ru/sql-reference/statements/create/user.md index 3c04b4df86c..e7af1659a1b 100644 --- a/docs/ru/sql-reference/statements/create/user.md +++ b/docs/ru/sql-reference/statements/create/user.md @@ -1,6 +1,6 @@ --- -toc_priority: 5 -toc_title: Пользователь +toc_priority: 39 +toc_title: "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c" --- # CREATE USER {#create-user-statement} diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index caa3d04659e..891e33bc9b3 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -1,6 +1,6 @@ --- -toc_priority: 3 -toc_title: Представление +toc_priority: 37 +toc_title: "\u041f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u0438\u0435" --- # CREATE VIEW {#create-view} diff --git a/docs/ru/sql-reference/statements/describe-table.md b/docs/ru/sql-reference/statements/describe-table.md index 5b38dca5833..64ed61de232 100644 --- a/docs/ru/sql-reference/statements/describe-table.md +++ b/docs/ru/sql-reference/statements/describe-table.md @@ -1,5 +1,5 @@ --- -toc_priority: 44 +toc_priority: 42 toc_title: DESCRIBE --- diff --git a/docs/ru/sql-reference/statements/detach.md b/docs/ru/sql-reference/statements/detach.md index 172e86179c8..00d0a4b20c6 100644 --- a/docs/ru/sql-reference/statements/detach.md +++ b/docs/ru/sql-reference/statements/detach.md @@ -1,5 +1,5 @@ --- -toc_priority: 45 +toc_priority: 43 toc_title: DETACH --- diff --git a/docs/ru/sql-reference/statements/drop.md b/docs/ru/sql-reference/statements/drop.md index 3a6ac22b071..514a92db91f 100644 --- a/docs/ru/sql-reference/statements/drop.md +++ b/docs/ru/sql-reference/statements/drop.md @@ -1,5 +1,5 @@ --- -toc_priority: 46 +toc_priority: 44 toc_title: DROP --- diff --git a/docs/ru/sql-reference/statements/exists.md b/docs/ru/sql-reference/statements/exists.md index 865d23ad622..0b2fd69273c 100644 --- a/docs/ru/sql-reference/statements/exists.md +++ b/docs/ru/sql-reference/statements/exists.md @@ -1,5 +1,5 @@ --- -toc_priority: 47 +toc_priority: 45 toc_title: EXISTS --- diff --git a/docs/ru/sql-reference/statements/grant.md b/docs/ru/sql-reference/statements/grant.md index 69137095c4d..d38e2ea38a0 100644 --- a/docs/ru/sql-reference/statements/grant.md +++ b/docs/ru/sql-reference/statements/grant.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: GRANT +--- + # GRANT - Присваивает [привилегии](#grant-privileges) пользователям или ролям ClickHouse. diff --git a/docs/ru/sql-reference/statements/insert-into.md b/docs/ru/sql-reference/statements/insert-into.md index 281b708f3f9..8ea7c83bec8 100644 --- a/docs/ru/sql-reference/statements/insert-into.md +++ b/docs/ru/sql-reference/statements/insert-into.md @@ -1,5 +1,5 @@ --- -toc_priority: 34 +toc_priority: 33 toc_title: INSERT INTO --- diff --git a/docs/ru/sql-reference/statements/kill.md b/docs/ru/sql-reference/statements/kill.md index dd2f24ffa27..e2556a7f782 100644 --- a/docs/ru/sql-reference/statements/kill.md +++ b/docs/ru/sql-reference/statements/kill.md @@ -1,5 +1,5 @@ --- -toc_priority: 48 +toc_priority: 46 toc_title: KILL --- diff --git a/docs/ru/sql-reference/statements/optimize.md b/docs/ru/sql-reference/statements/optimize.md index dc70d86a1a0..9b94c31a8f7 100644 --- a/docs/ru/sql-reference/statements/optimize.md +++ b/docs/ru/sql-reference/statements/optimize.md @@ -1,5 +1,5 @@ --- -toc_priority: 49 +toc_priority: 47 toc_title: OPTIMIZE --- diff --git a/docs/ru/sql-reference/statements/rename.md b/docs/ru/sql-reference/statements/rename.md index 9f6069d8a94..94bf3c682a1 100644 --- a/docs/ru/sql-reference/statements/rename.md +++ b/docs/ru/sql-reference/statements/rename.md @@ -1,5 +1,5 @@ --- -toc_priority: 50 +toc_priority: 48 toc_title: RENAME --- diff --git a/docs/ru/sql-reference/statements/revoke.md b/docs/ru/sql-reference/statements/revoke.md index 1d2928bb76e..339746b8591 100644 --- a/docs/ru/sql-reference/statements/revoke.md +++ b/docs/ru/sql-reference/statements/revoke.md @@ -1,3 +1,8 @@ +--- +toc_priority: 39 +toc_title: REVOKE +--- + # REVOKE Отзывает привилегии у пользователей или ролей. diff --git a/docs/ru/sql-reference/statements/select/array-join.md b/docs/ru/sql-reference/statements/select/array-join.md index f8f11ba1b17..a3abf9e5e2e 100644 --- a/docs/ru/sql-reference/statements/select/array-join.md +++ b/docs/ru/sql-reference/statements/select/array-join.md @@ -1,3 +1,7 @@ +--- +toc_title: ARRAY JOIN +--- + # Секция ARRAY JOIN {#select-array-join-clause} Типовая операция для таблиц, содержащих столбец-массив — произвести новую таблицу, которая будет иметь столбец с каждым отдельным элементом массивов из изначального столбца, в то время как значения других столбцов дублируются. Это основной сценарий использования секции `ARRAY JOIN`. diff --git a/docs/ru/sql-reference/statements/select/distinct.md b/docs/ru/sql-reference/statements/select/distinct.md index 62e2e25b7e5..9d620079f6b 100644 --- a/docs/ru/sql-reference/statements/select/distinct.md +++ b/docs/ru/sql-reference/statements/select/distinct.md @@ -1,3 +1,7 @@ +--- +toc_title: DISTINCT +--- + # Секция DISTINCT {#select-distinct} Если указан `SELECT DISTINCT`, то в результате запроса останутся только уникальные строки. Таким образом, из всех наборов полностью совпадающих строк в результате останется только одна строка. diff --git a/docs/ru/sql-reference/statements/select/format.md b/docs/ru/sql-reference/statements/select/format.md index dad0ef0d62c..18972ddd82b 100644 --- a/docs/ru/sql-reference/statements/select/format.md +++ b/docs/ru/sql-reference/statements/select/format.md @@ -1,3 +1,7 @@ +--- +toc_title: FORMAT +--- + # Секция FORMAT {#format-clause} ClickHouse поддерживает широкий спектр [форматов сериализации](../../../interfaces/formats.md) это может быть использовано, в частности, для результатов запросов. Существует несколько способов выбора формата для `SELECT`, один из них заключается в том, чтобы указать `FORMAT format` в конце запроса, чтобы получить результирующие данные в любом конкретном формате. diff --git a/docs/ru/sql-reference/statements/select/from.md b/docs/ru/sql-reference/statements/select/from.md index ac0ab1dcd3f..491bbfe892b 100644 --- a/docs/ru/sql-reference/statements/select/from.md +++ b/docs/ru/sql-reference/statements/select/from.md @@ -1,3 +1,7 @@ +--- +toc_title: FROM +--- + # Секция FROM {#select-from} В секции `FROM` указывается источник, из которого будут читаться данные: diff --git a/docs/ru/sql-reference/statements/select/group-by.md b/docs/ru/sql-reference/statements/select/group-by.md index 9581f477af5..a0454ef1d91 100644 --- a/docs/ru/sql-reference/statements/select/group-by.md +++ b/docs/ru/sql-reference/statements/select/group-by.md @@ -1,3 +1,7 @@ +--- +toc_title: GROUP BY +--- + # Секция GROUP BY {#select-group-by-clause} Секция `GROUP BY` переключает `SELECT` запрос в режим агрегации, который работает следующим образом: diff --git a/docs/ru/sql-reference/statements/select/having.md b/docs/ru/sql-reference/statements/select/having.md index 83f58c5566f..dc701df906f 100644 --- a/docs/ru/sql-reference/statements/select/having.md +++ b/docs/ru/sql-reference/statements/select/having.md @@ -1,3 +1,7 @@ +--- +toc_title: HAVING +--- + # Секция HAVING {#having-clause} Позволяет фильтровать результаты агрегации, полученные с помощью [GROUP BY](group-by.md). Разница с [WHERE](where.md) в том, что `WHERE` выполняется перед агрегацией, в то время как `HAVING` выполняется после него. diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index b735d37118c..f5fe2788370 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -1,6 +1,8 @@ --- -toc_priority: 33 -toc_title: SELECT +title: "\u0421\u0438\u043d\u0442\u0430\u043a\u0441\u0438\u0441\u0020\u0437\u0430\u043f\u0440\u043e\u0441\u043e\u0432\u0020\u0053\u0045\u004c\u0045\u0043\u0054" +toc_folder_title: SELECT +toc_priority: 32 +toc_title: "\u041e\u0431\u0437\u043e\u0440" --- # Синтаксис запросов SELECT {#select-queries-syntax} diff --git a/docs/ru/sql-reference/statements/select/into-outfile.md b/docs/ru/sql-reference/statements/select/into-outfile.md index 0f5cf01e9d1..f956903f8b4 100644 --- a/docs/ru/sql-reference/statements/select/into-outfile.md +++ b/docs/ru/sql-reference/statements/select/into-outfile.md @@ -1,3 +1,7 @@ +--- +toc_title: INTO OUTFILE +--- + # Секция INTO OUTFILE {#into-outfile-clause} Чтобы перенаправить вывод `SELECT` запроса в указанный файл на стороне клиента, добавьте к нему секцию `INTO OUTFILE filename` (где filename — строковый литерал). diff --git a/docs/ru/sql-reference/statements/select/join.md b/docs/ru/sql-reference/statements/select/join.md index 6f1b5e2cde6..c5548d74156 100644 --- a/docs/ru/sql-reference/statements/select/join.md +++ b/docs/ru/sql-reference/statements/select/join.md @@ -1,3 +1,7 @@ +--- +toc_title: JOIN +--- + # Секция JOIN {#select-join} Join создаёт новую таблицу путем объединения столбцов из одной или нескольких таблиц с использованием общих для каждой из них значений. Это обычная операция в базах данных с поддержкой SQL, которая соответствует join из [реляционной алгебры](https://en.wikipedia.org/wiki/Relational_algebra#Joins_and_join-like_operators). Частный случай соединения одной таблицы часто называют «self-join». diff --git a/docs/ru/sql-reference/statements/select/limit-by.md b/docs/ru/sql-reference/statements/select/limit-by.md index ea5d467ae4f..fba81c023b5 100644 --- a/docs/ru/sql-reference/statements/select/limit-by.md +++ b/docs/ru/sql-reference/statements/select/limit-by.md @@ -1,3 +1,7 @@ +--- +toc_title: LIMIT BY +--- + # Секция LIMIT BY {#limit-by-clause} Запрос с секцией `LIMIT n BY expressions` выбирает первые `n` строк для каждого отличного значения `expressions`. Ключ `LIMIT BY` может содержать любое количество [выражений](../../syntax.md#syntax-expressions). diff --git a/docs/ru/sql-reference/statements/select/prewhere.md b/docs/ru/sql-reference/statements/select/prewhere.md index 1c8595d8e0c..c2a02b1a436 100644 --- a/docs/ru/sql-reference/statements/select/prewhere.md +++ b/docs/ru/sql-reference/statements/select/prewhere.md @@ -1,3 +1,7 @@ +--- +toc_title: PREWHERE +--- + # Секция PREWHERE {#prewhere-clause} Prewhere — это оптимизация для более эффективного применения фильтрации. Она включена по умолчанию, даже если секция `PREWHERE` явно не указана. В этом случае работает автоматическое перемещение части выражения из [WHERE](where.md) до стадии prewhere. Роль секции `PREWHERE` только для управления этой оптимизацией, если вы думаете, что знаете, как сделать перемещение условия лучше, чем это происходит по умолчанию. diff --git a/docs/ru/sql-reference/statements/select/sample.md b/docs/ru/sql-reference/statements/select/sample.md index ca6b49c9ad6..a71e8256f0f 100644 --- a/docs/ru/sql-reference/statements/select/sample.md +++ b/docs/ru/sql-reference/statements/select/sample.md @@ -1,3 +1,7 @@ +--- +toc_title: SAMPLE +--- + # Секция SAMPLE {#select-sample-clause} Секция `SAMPLE` позволяет выполнять запросы приближённо. Например, чтобы посчитать статистику по всем визитам, можно обработать 1/10 всех визитов и результат домножить на 10. diff --git a/docs/ru/sql-reference/statements/select/union-all.md b/docs/ru/sql-reference/statements/select/union-all.md index bffd667fa1d..b9d1f485a7b 100644 --- a/docs/ru/sql-reference/statements/select/union-all.md +++ b/docs/ru/sql-reference/statements/select/union-all.md @@ -1,3 +1,7 @@ +--- +toc_title: UNION ALL +--- + # Секция UNION ALL {#union-all-clause} Вы можете использовать `UNION ALL` чтобы объединить любое количество `SELECT` запросы путем расширения их результатов. Пример: diff --git a/docs/ru/sql-reference/statements/select/where.md b/docs/ru/sql-reference/statements/select/where.md index 63d081db43d..8cb8e634303 100644 --- a/docs/ru/sql-reference/statements/select/where.md +++ b/docs/ru/sql-reference/statements/select/where.md @@ -1,3 +1,7 @@ +--- +toc_title: WHERE +--- + # Секция WHERE {#select-where} Позволяет задать выражение, которое ClickHouse использует для фильтрации данных перед всеми другими действиями в запросе кроме выражений, содержащихся в секции [PREWHERE](prewhere.md#prewhere-clause). Обычно, это выражение с логическими операторами. diff --git a/docs/ru/sql-reference/statements/select/with.md b/docs/ru/sql-reference/statements/select/with.md index a5be733866f..4feae232bd7 100644 --- a/docs/ru/sql-reference/statements/select/with.md +++ b/docs/ru/sql-reference/statements/select/with.md @@ -1,3 +1,7 @@ +--- +toc_title: WITH +--- + # Секция WITH {#sektsiia-with} Данная секция представляет собой [Common Table Expressions](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивные_запросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. diff --git a/docs/ru/sql-reference/statements/set-role.md b/docs/ru/sql-reference/statements/set-role.md index 5d840fcddb7..ccbef41aa9b 100644 --- a/docs/ru/sql-reference/statements/set-role.md +++ b/docs/ru/sql-reference/statements/set-role.md @@ -1,5 +1,5 @@ --- -toc_priority: 52 +toc_priority: 50 toc_title: SET ROLE --- diff --git a/docs/ru/sql-reference/statements/set.md b/docs/ru/sql-reference/statements/set.md index 0e12e2ee7bc..b60dfcf8324 100644 --- a/docs/ru/sql-reference/statements/set.md +++ b/docs/ru/sql-reference/statements/set.md @@ -1,5 +1,5 @@ --- -toc_priority: 51 +toc_priority: 49 toc_title: SET --- diff --git a/docs/ru/sql-reference/statements/show.md b/docs/ru/sql-reference/statements/show.md index 4b226765632..4516a401de9 100644 --- a/docs/ru/sql-reference/statements/show.md +++ b/docs/ru/sql-reference/statements/show.md @@ -1,3 +1,8 @@ +--- +toc_priority: 37 +toc_title: SHOW +--- + # SHOW Queries {#show-queries} ## SHOW CREATE TABLE {#show-create-table} diff --git a/docs/ru/sql-reference/statements/system.md b/docs/ru/sql-reference/statements/system.md index 1ba3a7555b9..4780e9b613f 100644 --- a/docs/ru/sql-reference/statements/system.md +++ b/docs/ru/sql-reference/statements/system.md @@ -1,3 +1,8 @@ +--- +toc_priority: 36 +toc_title: SYSTEM +--- + # Запросы SYSTEM {#query-language-system} - [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries) diff --git a/docs/ru/sql-reference/statements/truncate.md b/docs/ru/sql-reference/statements/truncate.md index f8806496e48..4909d349658 100644 --- a/docs/ru/sql-reference/statements/truncate.md +++ b/docs/ru/sql-reference/statements/truncate.md @@ -1,5 +1,5 @@ --- -toc_priority: 53 +toc_priority: 51 toc_title: TRUNCATE --- diff --git a/docs/ru/sql-reference/statements/use.md b/docs/ru/sql-reference/statements/use.md index 84b0f5ed13f..c84329ea5ff 100644 --- a/docs/ru/sql-reference/statements/use.md +++ b/docs/ru/sql-reference/statements/use.md @@ -1,5 +1,5 @@ --- -toc_priority: 54 +toc_priority: 52 toc_title: USE --- diff --git a/docs/ru/sql-reference/syntax.md b/docs/ru/sql-reference/syntax.md index 5c819002cbe..ca73d3a137e 100644 --- a/docs/ru/sql-reference/syntax.md +++ b/docs/ru/sql-reference/syntax.md @@ -1,3 +1,8 @@ +--- +toc_priority: 31 +toc_title: "\u0421\u0438\u043d\u0442\u0430\u043a\u0441\u0438\u0441" +--- + # Синтаксис {#sintaksis} В системе есть два вида парсеров: полноценный парсер SQL (recursive descent parser) и парсер форматов данных (быстрый потоковый парсер). diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 4581fa081d7..d3e6e106125 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -1,3 +1,8 @@ +--- +toc_priority: 37 +toc_title: file +--- + # file {#file} Создаёт таблицу из файла. Данная табличная функция похожа на табличные функции [file](file.md) и [hdfs](hdfs.md). diff --git a/docs/ru/sql-reference/table-functions/generate.md b/docs/ru/sql-reference/table-functions/generate.md index 9e6d36b2a4b..47b7e43bc86 100644 --- a/docs/ru/sql-reference/table-functions/generate.md +++ b/docs/ru/sql-reference/table-functions/generate.md @@ -1,3 +1,8 @@ +--- +toc_priority: 47 +toc_title: generateRandom +--- + # generateRandom {#generaterandom} Генерирует случайные данные с заданной схемой. diff --git a/docs/ru/sql-reference/table-functions/hdfs.md b/docs/ru/sql-reference/table-functions/hdfs.md index e90f27a2eca..6edd70b7b1b 100644 --- a/docs/ru/sql-reference/table-functions/hdfs.md +++ b/docs/ru/sql-reference/table-functions/hdfs.md @@ -1,3 +1,8 @@ +--- +toc_priority: 45 +toc_title: hdfs +--- + # hdfs {#hdfs} Создаёт таблицу из файла в HDFS. Данная табличная функция похожа на табличные функции [url](url.md) и [file](file.md). diff --git a/docs/ru/sql-reference/table-functions/input.md b/docs/ru/sql-reference/table-functions/input.md index 399268f9af6..96cf7515d52 100644 --- a/docs/ru/sql-reference/table-functions/input.md +++ b/docs/ru/sql-reference/table-functions/input.md @@ -1,3 +1,8 @@ +--- +toc_priority: 46 +toc_title: input +--- + # input {#input} `input(structure)` - табличная функция, позволяющая эффективно преобразовывать и вставлять отправленные на сервер данные, diff --git a/docs/ru/sql-reference/table-functions/jdbc.md b/docs/ru/sql-reference/table-functions/jdbc.md index 20ef0d1f107..d388262606f 100644 --- a/docs/ru/sql-reference/table-functions/jdbc.md +++ b/docs/ru/sql-reference/table-functions/jdbc.md @@ -1,3 +1,8 @@ +--- +toc_priority: 43 +toc_title: jdbc +--- + # jdbc {#jdbc} `jdbc(jdbc_connection_uri, schema, table)` - возвращает таблицу, соединение с которой происходит через JDBC-драйвер. diff --git a/docs/ru/sql-reference/table-functions/merge.md b/docs/ru/sql-reference/table-functions/merge.md index d4e66391382..0822fdfe535 100644 --- a/docs/ru/sql-reference/table-functions/merge.md +++ b/docs/ru/sql-reference/table-functions/merge.md @@ -1,3 +1,8 @@ +--- +toc_priority: 38 +toc_title: merge +--- + # merge {#merge} `merge(db_name, 'tables_regexp')` - создаёт временную таблицу типа Merge. Подробнее смотрите раздел «Движки таблиц, Merge». diff --git a/docs/ru/sql-reference/table-functions/mysql.md b/docs/ru/sql-reference/table-functions/mysql.md index 99d82022df4..21841eee67a 100644 --- a/docs/ru/sql-reference/table-functions/mysql.md +++ b/docs/ru/sql-reference/table-functions/mysql.md @@ -1,3 +1,8 @@ +--- +toc_priority: 42 +toc_title: mysql +--- + # mysql {#mysql} Позволяет выполнять запросы `SELECT` над данными, хранящимися на удалённом MySQL сервере. diff --git a/docs/ru/sql-reference/table-functions/numbers.md b/docs/ru/sql-reference/table-functions/numbers.md index 79d01dd0b92..005f400e082 100644 --- a/docs/ru/sql-reference/table-functions/numbers.md +++ b/docs/ru/sql-reference/table-functions/numbers.md @@ -1,3 +1,8 @@ +--- +toc_priority: 39 +toc_title: numbers +--- + # numbers {#numbers} `numbers(N)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `0` до `N-1`. diff --git a/docs/ru/sql-reference/table-functions/odbc.md b/docs/ru/sql-reference/table-functions/odbc.md index 38da5066cbd..19203123840 100644 --- a/docs/ru/sql-reference/table-functions/odbc.md +++ b/docs/ru/sql-reference/table-functions/odbc.md @@ -1,3 +1,8 @@ +--- +toc_priority: 44 +toc_title: odbc +--- + # odbc {#table-functions-odbc} Возвращает таблицу, подключенную через [ODBC](https://en.wikipedia.org/wiki/Open_Database_Connectivity). diff --git a/docs/ru/sql-reference/table-functions/remote.md b/docs/ru/sql-reference/table-functions/remote.md index 944500b57a0..901317a805d 100644 --- a/docs/ru/sql-reference/table-functions/remote.md +++ b/docs/ru/sql-reference/table-functions/remote.md @@ -1,3 +1,8 @@ +--- +toc_priority: 40 +toc_title: remote +--- + # remote, remoteSecure {#remote-remotesecure} Позволяет обратиться к удалённым серверам без создания таблицы типа `Distributed`. diff --git a/docs/ru/sql-reference/table-functions/url.md b/docs/ru/sql-reference/table-functions/url.md index 4c49bc76751..0cd7c24c663 100644 --- a/docs/ru/sql-reference/table-functions/url.md +++ b/docs/ru/sql-reference/table-functions/url.md @@ -1,3 +1,8 @@ +--- +toc_priority: 41 +toc_title: url +--- + # url {#url} `url(URL, format, structure)` - возвращает таблицу со столбцами, указанными в diff --git a/docs/ru/whats-new/index.md b/docs/ru/whats-new/index.md index a8c908088d0..b8ba6133454 100644 --- a/docs/ru/whats-new/index.md +++ b/docs/ru/whats-new/index.md @@ -1,6 +1,6 @@ --- toc_folder_title: "\u0427\u0442\u043E \u043D\u043E\u0432\u043E\u0433\u043E?" -toc_priority: 72 +toc_priority: 82 --- diff --git a/docs/ru/whats-new/security-changelog.md b/docs/ru/whats-new/security-changelog.md index ad55e1eeb51..1f46535833d 100644 --- a/docs/ru/whats-new/security-changelog.md +++ b/docs/ru/whats-new/security-changelog.md @@ -1,3 +1,8 @@ +--- +toc_priority: 76 +toc_title: Security Changelog +--- + # Security Changelog {#security-changelog} ## Исправлено в релизе 19.14.3.3, 2019-09-10 {#ispravleno-v-relize-19-14-3-3-2019-09-10} From 1be7a182de47b40617d20a697cc432e6da40bf05 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 26 Oct 2020 14:08:37 +0300 Subject: [PATCH 288/432] Revert "Revert "Optionally upload clickhouse binary in fast test"" --- docker/test/fasttest/Dockerfile | 1 + docker/test/fasttest/run.sh | 3 +++ 2 files changed, 4 insertions(+) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index bed438a6579..f6c665ff3fd 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -82,6 +82,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV COMMIT_SHA='' ENV PULL_REQUEST_NUMBER='' +ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0 COPY run.sh / CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 06eb0668fe6..85a5dd866f8 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -172,6 +172,9 @@ function build ( cd "$FASTTEST_BUILD" time ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt" +if [ "$COPY_CLICKHOUSE_BINARY_TO_OUTPUT" -eq "1" ]; then + cp programs/clickhouse "$FASTTEST_OUTPUT/clickhouse" +fi ccache --show-stats ||: ) } From 4e8bea397b8b4ef5cfd6d6757781a0d59d5ab6a5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 26 Oct 2020 15:10:16 +0300 Subject: [PATCH 289/432] Update ExpressionActions.cpp --- src/Interpreters/ExpressionActions.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 6ce9fe2c793..36d128b4234 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -642,7 +642,7 @@ void ExpressionActions::execute(Block & block, bool dry_run) const #if defined(ADDRESS_SANITIZER) const auto & msg = e.message(); - if (__asan_region_is_poisoned(msg.data(), msg.size())) + if (__asan_region_is_poisoned(const_cast(msg.data()), msg.size())) { LOG_FATAL(&Poco::Logger::get("ExpressionActions"), "Poisoned exception message (asan): {}", e.getStackTraceString()); } From 9ed4668dbbd8b2687586e1eac181170d8c8ba5f8 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 26 Oct 2020 15:40:55 +0300 Subject: [PATCH 290/432] Refactor common part of background list --- src/Interpreters/Context.cpp | 1 + src/Storages/MergeTree/MergeList.h | 85 +++++-------------- src/Storages/MergeTree/MergeTreeData.h | 1 - .../MergeTree/MergeTreeDataMergerMutator.h | 1 - 4 files changed, 21 insertions(+), 67 deletions(-) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 9d2ea6ded86..197c183a06b 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index c1166c55703..cf25721f2df 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -1,21 +1,20 @@ #pragma once +#include +#include #include #include #include -#include -#include #include +#include +#include +#include #include #include #include #include -/** Maintains a list of currently running merges. - * For implementation of system.merges table. - */ - namespace CurrentMetrics { extern const Metric Merge; @@ -101,68 +100,36 @@ struct MergeListElement : boost::noncopyable ~MergeListElement(); }; +using MergeListEntry = BackgroundProcessListEntry; -class MergeList; - -class MergeListEntry +/** Maintains a list of currently running merges. + * For implementation of system.merges table. + */ +class MergeList final : public BackgroundProcessList { - MergeList & list; - - using container_t = std::list; - container_t::iterator it; - - CurrentMetrics::Increment num_merges {CurrentMetrics::Merge}; - -public: - MergeListEntry(const MergeListEntry &) = delete; - MergeListEntry & operator=(const MergeListEntry &) = delete; - - MergeListEntry(MergeList & list_, const container_t::iterator it_) : list(list_), it{it_} {} - ~MergeListEntry(); - - MergeListElement * operator->() { return &*it; } - const MergeListElement * operator->() const { return &*it; } -}; - - -class MergeList -{ - friend class MergeListEntry; - - using container_t = std::list; - using info_container_t = std::list; - - mutable std::mutex mutex; - container_t merges; - + using Parent = BackgroundProcessList; std::atomic merges_with_ttl_counter = 0; public: - using Entry = MergeListEntry; - using EntryPtr = std::unique_ptr; + MergeList() + : Parent(CurrentMetrics::Merge) + {} - template - EntryPtr insert(Args &&... args) + void onEntryCreate(const Parent::Entry & entry) override { - std::lock_guard lock{mutex}; - auto entry = std::make_unique(*this, merges.emplace(merges.end(), std::forward(args)...)); - if (isTTLMergeType((*entry)->merge_type)) + if (isTTLMergeType(entry->merge_type)) ++merges_with_ttl_counter; - return entry; } - info_container_t get() const + void onEntryDestroy(const Parent::Entry & entry) override { - std::lock_guard lock{mutex}; - info_container_t res; - for (const auto & merge_element : merges) - res.emplace_back(merge_element.getInfo()); - return res; + if (isTTLMergeType(entry->merge_type)) + --merges_with_ttl_counter; } void cancelPartMutations(const String & partition_id, Int64 mutation_version) { std::lock_guard lock{mutex}; - for (auto & merge_element : merges) + for (auto & merge_element : entries) { if ((partition_id.empty() || merge_element.partition_id == partition_id) && merge_element.source_data_version < mutation_version @@ -177,16 +144,4 @@ public: } }; - -inline MergeListEntry::~MergeListEntry() -{ - std::lock_guard lock{list.mutex}; - - if (isTTLMergeType(it->merge_type)) - --list.merges_with_ttl_counter; - - list.merges.erase(it); -} - - } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 5c18661dad1..889e4fb16b4 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -35,7 +35,6 @@ namespace DB { -class MergeListEntry; class AlterCommands; class MergeTreePartsMover; class MutationCommands; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 0ad525d1901..570bbecb165 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -13,7 +13,6 @@ namespace DB { -class MergeListEntry; class MergeProgressCallback; /// Auxiliary struct holding metainformation for the future merged or mutated part. From bd17843bdb2a1ee2a209d11b5f1d6dd8e197c99a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 26 Oct 2020 16:00:49 +0300 Subject: [PATCH 291/432] Update uuid-functions.md --- docs/en/sql-reference/functions/uuid-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md index b747ac07bb8..01a61c65b67 100644 --- a/docs/en/sql-reference/functions/uuid-functions.md +++ b/docs/en/sql-reference/functions/uuid-functions.md @@ -71,7 +71,7 @@ toUUIDOrNull(String) **Returned value** -The Nullable UUID type value. +The Nullable(UUID) type value. **Usage example** From ab4c43cb812a3f06628db5b04813bc62882926b3 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Mon, 26 Oct 2020 16:44:46 +0300 Subject: [PATCH 292/432] fix group by with totals/rollup/cube modifers and min/max functions over group by keys --- src/Interpreters/TreeOptimizer.cpp | 7 ++++++- .../01532_min_max_with_modifiers.reference | 20 +++++++++++++++++++ .../01532_min_max_with_modifiers.sql | 18 +++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01532_min_max_with_modifiers.reference create mode 100644 tests/queries/0_stateless/01532_min_max_with_modifiers.sql diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index 9d52e30cea4..61ca933dd53 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -644,8 +644,13 @@ void TreeOptimizer::apply(ASTPtr & query, Aliases & aliases, const NameSet & sou optimizeInjectiveFunctionsInsideUniq(query, context); /// Eliminate min/max/any aggregators of functions of GROUP BY keys - if (settings.optimize_aggregators_of_group_by_keys) + if (settings.optimize_aggregators_of_group_by_keys + && !select_query->group_by_with_totals + && !select_query->group_by_with_rollup + && !select_query->group_by_with_cube) + { optimizeAggregateFunctionsOfGroupByKeys(select_query, query); + } /// Remove duplicate items from ORDER BY. optimizeDuplicatesInOrderBy(select_query); diff --git a/tests/queries/0_stateless/01532_min_max_with_modifiers.reference b/tests/queries/0_stateless/01532_min_max_with_modifiers.reference new file mode 100644 index 00000000000..9e1683ca6f9 --- /dev/null +++ b/tests/queries/0_stateless/01532_min_max_with_modifiers.reference @@ -0,0 +1,20 @@ +totals +1 1 1 +2 2 2 +3 3 3 + +0 1 3 +rollup +1 1 1 +2 2 2 +3 3 3 +0 1 3 +cube +1 1 1 +2 2 2 +3 3 3 +0 1 3 +======= +1 1 2 1 +2 2 3 1 +0 1 3 2 diff --git a/tests/queries/0_stateless/01532_min_max_with_modifiers.sql b/tests/queries/0_stateless/01532_min_max_with_modifiers.sql new file mode 100644 index 00000000000..0c8651c0f01 --- /dev/null +++ b/tests/queries/0_stateless/01532_min_max_with_modifiers.sql @@ -0,0 +1,18 @@ +SELECT 'totals'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH TOTALS; +SELECT 'rollup'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH ROLLUP; +SELECT 'cube'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH CUBE; +SELECT '======='; + +SELECT + x, + min(x) AS lower, + max(x) + 1 AS upper, + upper - lower AS range +FROM +( + SELECT arrayJoin([1, 2]) AS x +) +GROUP BY x WITH ROLLUP; From 44162c28c00654b75c98a990d065c04c270807aa Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 26 Oct 2020 19:09:43 +0300 Subject: [PATCH 293/432] Update LocalServer.cpp --- programs/local/LocalServer.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index f85b5bde965..63667307876 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -451,9 +451,7 @@ void LocalServer::setupUsers() void LocalServer::cleanup() { - // Delete the temporary directory if needed. Just in case, check that it is - // in the system temporary directory, not to delete user data if there is a - // bug. + // Delete the temporary directory if needed. if (temporary_directory_to_delete) { const auto dir = *temporary_directory_to_delete; From 79a6be08d132e6fe2a55c638e5abeb3677aafd2a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 26 Oct 2020 19:09:53 +0300 Subject: [PATCH 294/432] Update src/Interpreters/Context.cpp --- src/Interpreters/Context.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 6e5274cd5b3..328e122fdb5 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -2029,6 +2029,8 @@ void Context::reloadConfig() const void Context::shutdown() { + // Disk selector might not be initialized if there was some error during + // its initialization. Don't try to initialize it again on shutdown. if (shared->merge_tree_disk_selector) { for (auto & [disk_name, disk] : getDisksMap()) From 60db7c2aa5882b74b9b30798f750cfd68b34fbc3 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 26 Oct 2020 19:17:54 +0300 Subject: [PATCH 295/432] fix a typo in test name --- docker/test/fasttest/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 85a5dd866f8..8300c31681e 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -271,7 +271,7 @@ TESTS_TO_SKIP=( 00974_query_profiler # Look at DistributedFilesToInsert, so cannot run in parallel. - 01457_DistributedFilesToInsert + 01460_DistributedFilesToInsert 01541_max_memory_usage_for_user From e27dd538fd4e51dc7823c7530509241e97b28e51 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Mon, 26 Oct 2020 19:35:37 +0300 Subject: [PATCH 296/432] performance comparison --- docker/test/performance-comparison/compare.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 258bc0a95f7..30443f21fba 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -77,12 +77,9 @@ function restart while killall clickhouse-server; do echo . ; sleep 1 ; done echo all killed - # Disable percpu arenas because they segfault when the process is bound to - # a particular NUMA node: https://github.com/jemalloc/jemalloc/pull/1939 - # - # About the jemalloc settings: + # Change the jemalloc settings here. # https://github.com/jemalloc/jemalloc/wiki/Getting-Started - export MALLOC_CONF="percpu_arena:disabled,confirm_conf:true" + export MALLOC_CONF="confirm_conf:true" set -m # Spawn servers in their own process groups From 880f4bbd05d0a9ec3dc6b3d0aec357b9890e9ec0 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 26 Oct 2020 19:38:35 +0300 Subject: [PATCH 297/432] System fetches --- src/IO/ReadWriteBufferFromHTTP.h | 8 ++ src/Interpreters/Context.cpp | 4 + src/Interpreters/Context.h | 4 + .../MergeTree/BackgroundProcessList.h | 89 +++++++++++++++++ src/Storages/MergeTree/DataPartsExchange.cpp | 29 +++++- src/Storages/MergeTree/MergeList.h | 1 + .../MergeTree/ReplicatedFetchesList.cpp | 62 ++++++++++++ .../MergeTree/ReplicatedFetchesList.h | 95 +++++++++++++++++++ src/Storages/System/StorageSystemFetches.cpp | 59 ++++++++++++ src/Storages/System/StorageSystemFetches.h | 28 ++++++ 10 files changed, 378 insertions(+), 1 deletion(-) create mode 100644 src/Storages/MergeTree/BackgroundProcessList.h create mode 100644 src/Storages/MergeTree/ReplicatedFetchesList.cpp create mode 100644 src/Storages/MergeTree/ReplicatedFetchesList.h create mode 100644 src/Storages/System/StorageSystemFetches.cpp create mode 100644 src/Storages/System/StorageSystemFetches.h diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 48407f76938..c79a3bd953d 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -106,6 +106,7 @@ namespace detail std::vector cookies; HTTPHeaderEntries http_header_entries; RemoteHostFilter remote_host_filter; + std::function read_callback; std::istream * call(const Poco::URI uri_, Poco::Net::HTTPResponse & response) { @@ -208,6 +209,8 @@ namespace detail return false; internal_buffer = impl->buffer(); working_buffer = internal_buffer; + if (read_callback) + read_callback(count()); return true; } @@ -218,6 +221,11 @@ namespace detail return cookie.getValue(); return def; } + + void setNextReadCallback(std::function read_callback_) + { + read_callback = read_callback_; + } }; } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 197c183a06b..38e8ffdf76a 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -329,6 +330,7 @@ struct ContextShared mutable MarkCachePtr mark_cache; /// Cache of marks in compressed files. ProcessList process_list; /// Executing queries at the moment. MergeList merge_list; /// The list of executable merge (for (Replicated)?MergeTree) + ReplicatedFetchList replicated_fetch_list; ConfigurationPtr users_config; /// Config with the users, profiles and quotas sections. InterserverIOHandler interserver_io_handler; /// Handler for interserver communication. std::optional buffer_flush_schedule_pool; /// A thread pool that can do background flush for Buffer tables. @@ -506,6 +508,8 @@ ProcessList & Context::getProcessList() { return shared->process_list; } const ProcessList & Context::getProcessList() const { return shared->process_list; } MergeList & Context::getMergeList() { return shared->merge_list; } const MergeList & Context::getMergeList() const { return shared->merge_list; } +ReplicatedFetchList & Context::getReplicatedFetchList() { return shared->replicated_fetch_list; } +const ReplicatedFetchList & Context::getReplicatedFetchList() const { return shared->replicated_fetch_list; } void Context::enableNamedSessions() diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 075fc3837ef..a072906a0ba 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -65,6 +65,7 @@ class InterserverIOHandler; class BackgroundProcessingPool; class BackgroundSchedulePool; class MergeList; +class ReplicatedFetchList; class Cluster; class Compiler; class MarkCache; @@ -477,6 +478,9 @@ public: MergeList & getMergeList(); const MergeList & getMergeList() const; + ReplicatedFetchList & getReplicatedFetchList(); + const ReplicatedFetchList & getReplicatedFetchList() const; + /// If the current session is expired at the time of the call, synchronously creates and returns a new session with the startNewSession() call. /// If no ZooKeeper configured, throws an exception. std::shared_ptr getZooKeeper() const; diff --git a/src/Storages/MergeTree/BackgroundProcessList.h b/src/Storages/MergeTree/BackgroundProcessList.h new file mode 100644 index 00000000000..f90aa1c88a8 --- /dev/null +++ b/src/Storages/MergeTree/BackgroundProcessList.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + + +template +class BackgroundProcessList; + +template +class BackgroundProcessListEntry +{ + BackgroundProcessList & list; + using container_t = std::list; + typename container_t::iterator it; + CurrentMetrics::Increment metric_increment; +public: + BackgroundProcessListEntry(const BackgroundProcessListEntry &) = delete; + BackgroundProcessListEntry & operator=(const BackgroundProcessListEntry &) = delete; + + BackgroundProcessListEntry(BackgroundProcessList & list_, const typename container_t::iterator it_, const CurrentMetrics::Metric & metric) + : list(list_), it{it_}, metric_increment{metric} + { + list.onEntryCreate(*this); + } + + ~BackgroundProcessListEntry() + { + std::lock_guard lock{list.mutex}; + list.onEntryDestroy(*this); + list.entries.erase(it); + } + + ListElement * operator->() { return &*it; } + const ListElement * operator->() const { return &*it; } +}; + + +template +class BackgroundProcessList +{ +protected: + friend class BackgroundProcessListEntry; + + using container_t = std::list; + using info_container_t = std::list; + + mutable std::mutex mutex; + container_t entries; + + CurrentMetrics::Metric metric; + + BackgroundProcessList(const CurrentMetrics::Metric & metric_) + : metric(metric_) + {} +public: + + using Entry = BackgroundProcessListEntry; + using EntryPtr = std::unique_ptr; + + template + EntryPtr insert(Args &&... args) + { + std::lock_guard lock{mutex}; + auto entry = std::make_unique(*this, entries.emplace(entries.end(), std::forward(args)...), metric); + return entry; + } + + info_container_t get() const + { + std::lock_guard lock{mutex}; + info_container_t res; + for (const auto & list_element : entries) + res.emplace_back(list_element.getInfo()); + return res; + } + + virtual void onEntryCreate(const Entry & /* entry */) {} + virtual void onEntryDestroy(const Entry & /* entry */) {} + virtual inline ~BackgroundProcessList() {} +}; + +} diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 0e79404e59d..b1f1dc3f5f5 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace CurrentMetrics @@ -52,6 +53,25 @@ std::string getEndpointId(const std::string & node_id) return "DataPartsExchange:" + node_id; } + +struct ReplicatedFetchReadCallback +{ + ReplicatedFetchList::Entry & replicated_fetch_entry; + + ReplicatedFetchReadCallback(ReplicatedFetchList::Entry & replicated_fetch_entry_) + : replicated_fetch_entry(replicated_fetch_entry_) + {} + + + void operator() (size_t bytes_count) + { + replicated_fetch_entry->bytes_read_compressed = bytes_count; + replicated_fetch_entry->progress.store( + replicated_fetch_entry->bytes_read_compressed.load(std::memory_order_relaxed) / replicated_fetch_entry->total_size_bytes_compressed, + std::memory_order_relaxed); + } +}; + } std::string Service::getId(const std::string & node_id) const @@ -228,7 +248,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( throw Exception("Fetching of part was cancelled", ErrorCodes::ABORTED); /// Validation of the input that may come from malicious replica. - MergeTreePartInfo::fromPartName(part_name, data.format_version); + auto part_info = MergeTreePartInfo::fromPartName(part_name, data.format_version); const auto data_settings = data.getSettings(); Poco::URI uri; @@ -286,6 +306,13 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( /// We don't know real size of part because sender server version is too old reservation = data.makeEmptyReservationOnLargestDisk(); } + auto storage_id = data.getStorageID(); + auto entry = data.global_context.getReplicatedFetchList().insert( + storage_id.getDatabaseName(), storage_id.getTableName(), + part_info.partition_id, part_name, part_name, + replica_path, uri.toString(), interserver_scheme, to_detached, sum_files_size); + + in.setNextReadCallback(ReplicatedFetchReadCallback(*entry)); bool sync = (data_settings->min_compressed_bytes_to_fsync_after_fetch && sum_files_size >= data_settings->min_compressed_bytes_to_fsync_after_fetch); diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index cf25721f2df..8d97b899aac 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -107,6 +107,7 @@ using MergeListEntry = BackgroundProcessListEntry; */ class MergeList final : public BackgroundProcessList { +private: using Parent = BackgroundProcessList; std::atomic merges_with_ttl_counter = 0; public: diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.cpp b/src/Storages/MergeTree/ReplicatedFetchesList.cpp new file mode 100644 index 00000000000..d66d0965351 --- /dev/null +++ b/src/Storages/MergeTree/ReplicatedFetchesList.cpp @@ -0,0 +1,62 @@ +#include +#include +#include +#include + +namespace DB +{ +ReplicatedFetchListElement::ReplicatedFetchListElement( + const std::string & database_, const std::string & table_, + const std::string & partition_id_, const std::string & result_part_name_, + const std::string & result_part_path_, const std::string & source_replica_path_, + const std::string & source_replica_address_, const std::string & interserver_scheme_, + UInt8 to_detached_, UInt64 total_size_bytes_compressed_) + : database(database_) + , table(table_) + , partition_id(partition_id_) + , result_part_name(result_part_name_) + , result_part_path(result_part_path_) + , source_replica_path(source_replica_path_) + , source_replica_address(source_replica_address_) + , interserver_scheme(interserver_scheme_) + , to_detached(to_detached_) + , total_size_bytes_compressed(total_size_bytes_compressed_) +{ + background_thread_memory_tracker = CurrentThread::getMemoryTracker(); + if (background_thread_memory_tracker) + { + background_thread_memory_tracker_prev_parent = background_thread_memory_tracker->getParent(); + background_thread_memory_tracker->setParent(&memory_tracker); + } +} + + +ReplicatedFetchInfo ReplicatedFetchListElement::getInfo() const +{ + ReplicatedFetchInfo res; + res.database = database; + res.table = table; + res.partition_id = partition_id; + res.result_part_name = result_part_name; + res.result_part_path = result_part_path; + res.source_replica_path = source_replica_path; + res.source_replica_address = source_replica_address; + res.interserver_scheme = interserver_scheme; + res.to_detached = to_detached; + res.elapsed = watch.elapsedSeconds(); + res.progress = progress.load(std::memory_order_relaxed); + res.bytes_read_compressed = bytes_read_compressed.load(std::memory_order_relaxed); + res.total_size_bytes_compressed = total_size_bytes_compressed; + res.memory_usage = memory_tracker.get(); + res.thread_id = thread_id; + return res; +} + +ReplicatedFetchListElement::~ReplicatedFetchListElement() +{ + /// Unplug memory_tracker from current background processing pool thread + if (background_thread_memory_tracker) + background_thread_memory_tracker->setParent(background_thread_memory_tracker_prev_parent); +} + +} diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.h b/src/Storages/MergeTree/ReplicatedFetchesList.h new file mode 100644 index 00000000000..ad8edb6ad59 --- /dev/null +++ b/src/Storages/MergeTree/ReplicatedFetchesList.h @@ -0,0 +1,95 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace CurrentMetrics +{ + extern const Metric ReplicatedFetch; +} + +namespace DB +{ + +struct ReplicatedFetchInfo +{ + std::string database; + std::string table; + std::string partition_id; + + std::string result_part_name; + std::string result_part_path; + + std::string source_replica_path; + std::string source_replica_address; + std::string interserver_scheme; + + UInt8 to_detached; + + Float64 elapsed; + Float64 progress; + + UInt64 total_size_bytes_compressed; + UInt64 bytes_read_compressed; + + UInt64 memory_usage; + UInt64 thread_id; +}; + + +struct ReplicatedFetchListElement : private boost::noncopyable +{ + const std::string database; + const std::string table; + const std::string partition_id; + + const std::string result_part_name; + const std::string result_part_path; + + const std::string source_replica_path; + const std::string source_replica_address; + const std::string interserver_scheme; + + const UInt8 to_detached; + + Stopwatch watch; + std::atomic progress{}; + std::atomic is_cancelled{}; + std::atomic bytes_read_compressed{}; + UInt64 total_size_bytes_compressed{}; + + MemoryTracker memory_tracker{VariableContext::Process}; + MemoryTracker * background_thread_memory_tracker; + MemoryTracker * background_thread_memory_tracker_prev_parent = nullptr; + + UInt64 thread_id; + + ReplicatedFetchListElement( + const std::string & database_, const std::string & table_, + const std::string & partition_id_, const std::string & result_part_name_, + const std::string & result_part_path_, const std::string & source_replica_path_, + const std::string & source_replica_address_, const std::string & interserver_scheme_, + UInt8 to_detached_, UInt64 total_size_bytes_compressed_); + + ReplicatedFetchInfo getInfo() const; + + ~ReplicatedFetchListElement(); +}; + + +using ReplicatedFetchListEntry = BackgroundProcessListEntry; + +class ReplicatedFetchList final : public BackgroundProcessList +{ +private: + using Parent = BackgroundProcessList; + +public: + ReplicatedFetchList () + : Parent(CurrentMetrics::ReplicatedFetch) + {} +}; + +} diff --git a/src/Storages/System/StorageSystemFetches.cpp b/src/Storages/System/StorageSystemFetches.cpp new file mode 100644 index 00000000000..27d4eeddbfc --- /dev/null +++ b/src/Storages/System/StorageSystemFetches.cpp @@ -0,0 +1,59 @@ +#include +#include +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemFetches::getNamesAndTypes() +{ + return { + {"database", std::make_shared()}, + {"table", std::make_shared()}, + {"elapsed", std::make_shared()}, + {"progress", std::make_shared()}, + {"result_part_name", std::make_shared()}, + {"result_part_path", std::make_shared()}, + {"partition_id", std::make_shared()}, + {"total_size_bytes_compressed", std::make_shared()}, + {"bytes_read_compressed", std::make_shared()}, + {"source_replica_path", std::make_shared()}, + {"source_replica_address", std::make_shared()}, + {"interserver_scheme", std::make_shared()}, + {"to_detached", std::make_shared()}, + {"memory_usage", std::make_shared()}, + {"thread_id", std::make_shared()}, + }; +} + +void StorageSystemFetches::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const +{ + const auto access = context.getAccess(); + const bool check_access_for_tables = !access->isGranted(AccessType::SHOW_TABLES); + + for (const auto & fetch : context.getReplicatedFetchList().get()) + { + if (check_access_for_tables && !access->isGranted(AccessType::SHOW_TABLES, fetch.database, fetch.table)) + continue; + + size_t i = 0; + res_columns[i++]->insert(fetch.database); + res_columns[i++]->insert(fetch.table); + res_columns[i++]->insert(fetch.elapsed); + res_columns[i++]->insert(fetch.progress); + res_columns[i++]->insert(fetch.result_part_name); + res_columns[i++]->insert(fetch.result_part_path); + res_columns[i++]->insert(fetch.partition_id); + res_columns[i++]->insert(fetch.total_size_bytes_compressed); + res_columns[i++]->insert(fetch.bytes_read_compressed); + res_columns[i++]->insert(fetch.source_replica_path); + res_columns[i++]->insert(fetch.source_replica_address); + res_columns[i++]->insert(fetch.interserver_scheme); + res_columns[i++]->insert(fetch.to_detached); + res_columns[i++]->insert(fetch.memory_usage); + res_columns[i++]->insert(fetch.thread_id); + } +} + +} diff --git a/src/Storages/System/StorageSystemFetches.h b/src/Storages/System/StorageSystemFetches.h new file mode 100644 index 00000000000..be1b66193bf --- /dev/null +++ b/src/Storages/System/StorageSystemFetches.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ + +class Context; + +class StorageSystemFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock +{ + friend struct ext::shared_ptr_helper; +public: + std::string getName() const override { return "SystemFetches"; } + + static NamesAndTypesList getNamesAndTypes(); + +protected: + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; +}; + +} From d412e0ca2c9b5a856a9476ea15e98d92c58afa8f Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 19:41:38 +0300 Subject: [PATCH 298/432] Move trash to "Unbundled" Dockerfile --- docker/packager/deb/Dockerfile | 4 ---- docker/packager/unbundled/Dockerfile | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 4e9259b214a..8fd89d60f85 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -31,10 +31,6 @@ RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb \ && chmod +x dpkg-deb \ && cp dpkg-deb /usr/bin -RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ - && wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \ - && dpkg -i /tmp/arrow-keyring.deb - # Libraries from OS are only needed to test the "unbundled" build (this is not used in production). RUN apt-get update \ && apt-get install \ diff --git a/docker/packager/unbundled/Dockerfile b/docker/packager/unbundled/Dockerfile index 604f187cc58..50671011a23 100644 --- a/docker/packager/unbundled/Dockerfile +++ b/docker/packager/unbundled/Dockerfile @@ -1,6 +1,10 @@ # docker build -t yandex/clickhouse-unbundled-builder . FROM yandex/clickhouse-deb-builder +RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ + && wget -nv -O /tmp/arrow-keyring.deb "https://apache.bintray.com/arrow/ubuntu/apache-arrow-archive-keyring-latest-${CODENAME}.deb" \ + && dpkg -i /tmp/arrow-keyring.deb + # Libraries from OS are only needed to test the "unbundled" build (that is not used in production). RUN apt-get update \ && apt-get install \ From e449266a16344d60071c141fa64556b03fdefc33 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 22:06:30 +0300 Subject: [PATCH 299/432] Fix TSan report in lgamma --- src/AggregateFunctions/AggregateFunctionStudentTTest.h | 5 +++-- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index d260a6be980..a88f8151b31 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -98,7 +98,7 @@ struct AggregateFunctionStudentTTestData final Float64 getSSquared() const { - /// The original formulae looks like + /// The original formulae looks like /// \frac{\sum_{i = 1}^{n_x}{(x_i - \bar{x}) ^ 2} + \sum_{i = 1}^{n_y}{(y_i - \bar{y}) ^ 2}}{n_x + n_y - 2} /// But we made some mathematical transformations not to store original sequences. /// Also we dropped sqrt, because later it will be squared later. @@ -150,7 +150,8 @@ struct AggregateFunctionStudentTTestData final const Float64 t = getTStatisticSquared(); auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; Float64 numenator = integrateSimpson(0, v / (t + v), f); - Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); + int unused; + Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused)); return numenator / denominator; } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 175e0171606..5f0cc409ba9 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -18,6 +18,7 @@ #include + namespace ErrorCodes { extern const int BAD_ARGUMENTS; @@ -159,9 +160,10 @@ struct AggregateFunctionWelchTTestData final { const Float64 v = getDegreesOfFreedom(); const Float64 t = getTStatisticSquared(); - auto f = [&v] (double x) { return std::pow(x, v/2 - 1) / std::sqrt(1 - x); }; + auto f = [&v] (double x) { return std::pow(x, v / 2 - 1) / std::sqrt(1 - x); }; Float64 numenator = integrateSimpson(0, v / (t + v), f); - Float64 denominator = std::exp(std::lgammal(v/2) + std::lgammal(0.5) - std::lgammal(v/2 + 0.5)); + int unused; + Float64 denominator = std::exp(lgammal_r(v / 2, &unused) + lgammal_r(0.5, &unused) - lgammal_r(v / 2 + 0.5, &unused)); return numenator / denominator; } From 32ed8c968158780eb0e3fba4966b73019e06ace4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 22:12:40 +0300 Subject: [PATCH 300/432] Fix trailing whitespace --- src/Access/ContextAccess.cpp | 2 +- src/Columns/ColumnNullable.cpp | 2 +- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 2 +- src/DataStreams/SquashingTransform.cpp | 2 +- src/Interpreters/InterpreterShowTablesQuery.cpp | 2 +- src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp | 2 +- src/Interpreters/PreparedSets.h | 3 ++- src/Processors/Transforms/ConvertingTransform.h | 4 +++- utils/check-style/check-style | 3 +++ 9 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 61ab4c8002d..0459022cb1a 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -100,7 +100,7 @@ namespace if (res & alter_table) res |= alter_view; - /// CREATE TABLE (on any database/table) => CREATE_TEMPORARY_TABLE (global) + /// CREATE TABLE (on any database/table) => CREATE_TEMPORARY_TABLE (global) static const AccessFlags create_temporary_table = AccessType::CREATE_TEMPORARY_TABLE; if ((level == 0) && (max_flags_with_children & create_table)) res |= create_temporary_table; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index bdbc941c1e7..51248a598af 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -344,7 +344,7 @@ void ColumnNullable::updatePermutation(bool reverse, size_t limit, int null_dire /// Shift all NULL values to the end. for (const auto & [first, last] : equal_ranges) { - /// Current interval is righter than limit. + /// Current interval is righter than limit. if (limit && first > limit) break; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index abb8158781b..2db5fabbf74 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1294,7 +1294,7 @@ void ZooKeeper::receiveEvent() if (request_info.watch) { bool add_watch = false; - /// 3 indicates the ZooKeeperExistsRequest. + /// 3 indicates the ZooKeeperExistsRequest. // For exists, we set the watch on both node exist and nonexist case. // For other case like getData, we only set the watch when node exists. if (request_info.request->getOpNum() == 3) diff --git a/src/DataStreams/SquashingTransform.cpp b/src/DataStreams/SquashingTransform.cpp index c57e2351230..1f6ca8a7306 100644 --- a/src/DataStreams/SquashingTransform.cpp +++ b/src/DataStreams/SquashingTransform.cpp @@ -27,7 +27,7 @@ Block SquashingTransform::add(const Block & input_block) /* * To minimize copying, accept two types of argument: const reference for output - * stream, and rvalue reference for input stream, and decide whether to copy + * stream, and rvalue reference for input stream, and decide whether to copy * inside this function. This allows us not to copy Block unless we absolutely * have to. */ diff --git a/src/Interpreters/InterpreterShowTablesQuery.cpp b/src/Interpreters/InterpreterShowTablesQuery.cpp index ef7fd840ac5..cb5db386f5a 100644 --- a/src/Interpreters/InterpreterShowTablesQuery.cpp +++ b/src/Interpreters/InterpreterShowTablesQuery.cpp @@ -50,7 +50,7 @@ String InterpreterShowTablesQuery::getRewrittenQuery() return rewritten_query.str(); } - /// SHOW CLUSTER/CLUSTERS + /// SHOW CLUSTER/CLUSTERS if (query.clusters) { std::stringstream rewritten_query; diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 70916fe386d..245feae166d 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -56,7 +56,7 @@ static inline String resolveDatabase( } } - /// When USE other_database_name; CREATE TABLE table_name; + /// When USE other_database_name; CREATE TABLE table_name; /// context.getCurrentDatabase() is always return `default database` /// When USE replica_mysql_database; CREATE TABLE table_name; /// context.getCurrentDatabase() is always return replica_clickhouse_database diff --git a/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h index 0faa748303d..f486752e192 100644 --- a/src/Interpreters/PreparedSets.h +++ b/src/Interpreters/PreparedSets.h @@ -6,6 +6,7 @@ #include #include + namespace DB { @@ -16,7 +17,7 @@ struct PreparedSetKey /// if left hand sides of the IN operators have different types). static PreparedSetKey forLiteral(const IAST & ast, DataTypes types_) { - /// Remove LowCardinality types from type list because Set doesn't support LowCardinality keys now, + /// Remove LowCardinality types from type list because Set doesn't support LowCardinality keys now, /// just converts LowCardinality to ordinary types. for (auto & type : types_) type = recursiveRemoveLowCardinality(type); diff --git a/src/Processors/Transforms/ConvertingTransform.h b/src/Processors/Transforms/ConvertingTransform.h index b426a2ab525..4ae74457998 100644 --- a/src/Processors/Transforms/ConvertingTransform.h +++ b/src/Processors/Transforms/ConvertingTransform.h @@ -1,7 +1,9 @@ #pragma once + #include #include + namespace DB { @@ -46,7 +48,7 @@ private: /// How to construct result block. Position in source block, where to get each column. ColumnNumbers conversion; /// Do not check that constants are same. Use value from result_header. - /// This is needed in case run functions which are constant in query scope, + /// This is needed in case run functions which are constant in query scope, /// but may return different result being executed remotely, like `now64()` or `randConstant()`. /// In this case we replace constants from remote source to constatns from initiator. bool ignore_constant_values; diff --git a/utils/check-style/check-style b/utils/check-style/check-style index ef569c9f73e..120d41aad34 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -102,3 +102,6 @@ find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' # Too many exclamation marks find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -F '!!!' && echo "Too many exclamation marks (looks dirty, unconfident)." + +# Trailing whitespaces +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P ' $' && echo "^ Trailing whitespaces." From baf55437fa744d4bfefe6d7cf5437e3d9a4c9406 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 22:17:01 +0300 Subject: [PATCH 301/432] Better diagnostics in check-style script --- utils/check-style/check-style | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 120d41aad34..f9818a1e2bb 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -93,15 +93,15 @@ git status -uno | grep ya.make && echo "ya.make files should be generated with u find $ROOT_PATH/{src,programs,utils} -name '*.h' | while read file; do [[ $(head -n1 $file) != '#pragma once' ]] && echo "File $file must have '#pragma once' in first line"; done # Check for executable bit on non-executable files -find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -q '.' && echo "These files should not be executable." +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -P '.' && echo "These files should not be executable." # Check for BOM -find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' && echo "Files should not have UTF-8 BOM" -find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' && echo "Files should not have UTF-16LE BOM" -find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' && echo "Files should not have UTF-16BE BOM" +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' | grep -P '.' && echo "Files should not have UTF-8 BOM" +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFF\xFE' | grep -P '.' && echo "Files should not have UTF-16LE BOM" +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xFE\xFF' | grep -P '.' && echo "Files should not have UTF-16BE BOM" # Too many exclamation marks -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -F '!!!' && echo "Too many exclamation marks (looks dirty, unconfident)." +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -F '!!!' | grep -P '.' && echo "Too many exclamation marks (looks dirty, unconfident)." # Trailing whitespaces -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P ' $' && echo "^ Trailing whitespaces." +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | xargs grep -P ' $' | grep -P '.' && echo "^ Trailing whitespaces." From 740cf8ce820022f1ed4e0dbfbe281bf7ab53689a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 22:20:40 +0300 Subject: [PATCH 302/432] Some aggregate functions were erroneously marked as case-insensitive --- src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp | 2 +- src/AggregateFunctions/AggregateFunctionRankCorrelation.cpp | 2 +- src/AggregateFunctions/AggregateFunctionRetention.cpp | 2 +- src/AggregateFunctions/AggregateFunctionStudentTTest.cpp | 2 +- .../AggregateFunctionTimeSeriesGroupSum.cpp | 4 ++-- src/AggregateFunctions/AggregateFunctionWelchTTest.cpp | 2 +- src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp b/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp index e338b060b12..6f4f254ae8f 100644 --- a/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp +++ b/src/AggregateFunctions/AggregateFunctionBoundingRatio.cpp @@ -31,7 +31,7 @@ AggregateFunctionPtr createAggregateFunctionRate(const std::string & name, const void registerAggregateFunctionRate(AggregateFunctionFactory & factory) { - factory.registerFunction("boundingRatio", createAggregateFunctionRate, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("boundingRatio", createAggregateFunctionRate); } } diff --git a/src/AggregateFunctions/AggregateFunctionRankCorrelation.cpp b/src/AggregateFunctions/AggregateFunctionRankCorrelation.cpp index 20472279dba..796ff028424 100644 --- a/src/AggregateFunctions/AggregateFunctionRankCorrelation.cpp +++ b/src/AggregateFunctions/AggregateFunctionRankCorrelation.cpp @@ -45,7 +45,7 @@ AggregateFunctionPtr createAggregateFunctionRankCorrelation(const std::string & void registerAggregateFunctionRankCorrelation(AggregateFunctionFactory & factory) { - factory.registerFunction("rankCorr", createAggregateFunctionRankCorrelation, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("rankCorr", createAggregateFunctionRankCorrelation); } } diff --git a/src/AggregateFunctions/AggregateFunctionRetention.cpp b/src/AggregateFunctions/AggregateFunctionRetention.cpp index 4497703c550..c9d475c78f3 100644 --- a/src/AggregateFunctions/AggregateFunctionRetention.cpp +++ b/src/AggregateFunctions/AggregateFunctionRetention.cpp @@ -32,7 +32,7 @@ AggregateFunctionPtr createAggregateFunctionRetention(const std::string & name, void registerAggregateFunctionRetention(AggregateFunctionFactory & factory) { - factory.registerFunction("retention", createAggregateFunctionRetention, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("retention", createAggregateFunctionRetention); } } diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp index a2c36e43488..58fc9e5b5b9 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.cpp @@ -47,6 +47,6 @@ AggregateFunctionPtr createAggregateFunctionStudentTTest(const std::string & nam void registerAggregateFunctionStudentTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("studentTTest", createAggregateFunctionStudentTTest, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("studentTTest", createAggregateFunctionStudentTTest); } } diff --git a/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp index cd0599729b9..c8711c257f8 100644 --- a/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp +++ b/src/AggregateFunctions/AggregateFunctionTimeSeriesGroupSum.cpp @@ -28,8 +28,8 @@ namespace void registerAggregateFunctionTimeSeriesGroupSum(AggregateFunctionFactory & factory) { - factory.registerFunction("timeSeriesGroupSum", createAggregateFunctionTimeSeriesGroupSum, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("timeSeriesGroupRateSum", createAggregateFunctionTimeSeriesGroupSum, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("timeSeriesGroupSum", createAggregateFunctionTimeSeriesGroupSum); + factory.registerFunction("timeSeriesGroupRateSum", createAggregateFunctionTimeSeriesGroupSum); } } diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp index 483c99dde9b..0dcb125305d 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.cpp @@ -44,6 +44,6 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(const std::string & name, void registerAggregateFunctionWelchTTest(AggregateFunctionFactory & factory) { - factory.registerFunction("welchTTest", createAggregateFunctionWelchTTest, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("welchTTest", createAggregateFunctionWelchTTest); } } diff --git a/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp b/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp index 872c70c2b98..1e9f2782d95 100644 --- a/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp +++ b/src/AggregateFunctions/AggregateFunctionWindowFunnel.cpp @@ -58,7 +58,7 @@ AggregateFunctionPtr createAggregateFunctionWindowFunnel(const std::string & nam void registerAggregateFunctionWindowFunnel(AggregateFunctionFactory & factory) { - factory.registerFunction("windowFunnel", createAggregateFunctionWindowFunnel, AggregateFunctionFactory::CaseInsensitive); + factory.registerFunction("windowFunnel", createAggregateFunctionWindowFunnel); } } From 25da82328cd8bae16eae79cc844c73dfa20a50ef Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 26 Oct 2020 22:26:01 +0300 Subject: [PATCH 303/432] Adjust tests --- tests/queries/0_stateless/01322_student_ttest.sql | 4 ++-- tests/queries/0_stateless/01322_welch_ttest.sql | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/01322_student_ttest.sql b/tests/queries/0_stateless/01322_student_ttest.sql index babc8457bc0..b8b86384bc4 100644 --- a/tests/queries/0_stateless/01322_student_ttest.sql +++ b/tests/queries/0_stateless/01322_student_ttest.sql @@ -6,7 +6,7 @@ DROP TABLE IF EXISTS student_ttest; CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO student_ttest VALUES (0.88854,-2.90702), (-5.76966,3.61651), (6.76618,4.27458), (3.55546,4.82133), (-9.76948,9.59483), (4.92323,1.00424), (-0.36352,2.04147), (0.97018,-3.58214), (4.61656,6.59543), (-6.78292,-1.00532), (4.02008,-3.59794), (12.41838,-2.82434), (5.14417,-3.13194), (3.86836,9.90977), (-1.26199,0.523), (12.44106,4.62779), (3.28349,-2.56872), (1.77261,2.25807), (-8.94748,1.04044), (-1.01449,-2.35744), (-1.26377,10.81531), (6.79682,-9.68469), (6.32333,3.80885), (-8.21214,12.70435), (-1.68565,-6.01112), (9.7557,1.89065), (3.66694,5.08892), (1.39967,3.45254), (-5.52035,11.58151), (-10.95601,0.85035), (0.93877,8.38397), (1.45933,1.17169), (-5.40551,4.74621), (-0.83857,-1.66614), (8.50794,4.2414), (-6.68686,1.68765), (5.03099,1.85223), (1.56251,9.10111), (4.17381,-2.38085), (-2.92644,-14.79595), (5.11068,-3.8938), (2.09617,-3.41864), (11.7787,-3.15282), (6.50336,-0.56684), (0.62098,12.87997), (-7.97121,6.89115), (3.81902,12.921), (0.33151,-7.94908), (10.68584,2.45687), (0.56007,2.14957), (-7.38621,7.55081), (5.05882,-3.71534), (2.34616,-2.41064), (11.3806,-0.80734), (5.95276,-4.75651), (-3.01429,2.05241), (5.98169,-5.44523), (0.96985,-2.75054), (-1.15932,-13.00131), (2.11547,-2.74451), (2.49668,-1.39004), (-12.49569,-3.02854), (-4.94667,7.65112), (-3.64215,1.1245), (-8.35595,6.74117), (3.211,-0.75777), (2.33805,8.93451), (2.38608,-8.85559), (-3.2862,-0.36405), (-0.80454,4.02742), (-0.53483,6.88718), (10.66445,-1.05124), (-0.37619,3.04085), (0.48246,3.32368), (7.41919,1.147), (0.42414,3.41554), (-2.32335,-3.47851), (-0.70223,-0.47684), (-5.9332,-0.55605), (-1.20561,-0.17006), (3.39865,2.26218), (9.61739,12.45494), (-0.78651,-1.84097), (-4.00256,1.64934), (-7.99646,-7.07496), (8.72923,-9.99462), (0.71859,6.09954), (-1.62726,-1.05319), (5.11234,3.04757), (-0.95625,0.93899), (-3.75573,-4.63243), (1.03141,-7.43322), (-3.33588,-7.298), (1.51804,-6.59016), (-3.30935,-6.11649), (-1.97507,0.56682), (4.06456,2.00661), (3.27195,-2.79814), (-7.81761,2.84482), (-3.81785,3.65348), (-4.18311,-4.22807), (-11.33313,-4.54336), (-0.25221,-3.63343), (7.2514,2.96878), (5.30301,6.11661), (2.46762,-1.70919), (4.22716,-4.71133), (0.33916,6.09652), (9.7638,-6.83454), (-7.58684,0.18006), (-4.09888,1.51676), (4.26617,-5.31646), (-0.56744,-3.21215), (4.65125,-5.07599), (-1.30301,-2.36591), (4.53771,3.55724), (9.96929,4.8904), (3.72939,-3.22586), (-2.29818,-1.74928), (3.09417,5.73458), (0.82251,1.41188), (5.29975,2.86255), (2.8685,2.90179), (-5.73321,-2.19949), (-1.85651,1.72727), (-1.07984,1.76939), (9.78342,-0.12848), (-13.49652,-0.52), (3.68791,3.48333), (1.9998,7.8262), (1.11674,0.09099), (9.43869,7.77017), (4.07029,9.49484), (5.32715,1.42825), (7.16504,1.99624), (6.66096,4.00419), (-5.7111,1.07925), (-0.38575,-0.09987), (4.49165,-5.48733), (-3.36489,-1.83517), (7.71814,2.38059), (-1.58966,1.42075), (-1.61063,-1.11968), (-0.91602,-6.46035), (0.73459,7.66576), (-3.24463,4.6307), (6.3947,5.55989), (-2.77845,3.16684), (4.45899,5.07671), (-8.84186,-10.20566), (2.62276,-4.73386), (1.774,1.28353), (4.3692,6.75679), (0.05942,12.09895), (-1.44042,7.0049), (-2.53594,7.16156), (-2.24752,-0.64311), (4.98874,-0.66747), (4.05434,3.99996), (-2.56483,9.07298), (-6.79286,-4.60971), (-2.06165,0.70744), (-0.26056,2.56774), (1.89567,9.32424), (-3.15145,3.95087), (-7.31321,7.11372), (0.28936,-0.89284), (-0.63111,8.6155), (0.22611,-0.14141), (-9.3377,-4.86319), (-5.76638,-6.95801), (3.87306,4.44883), (6.7011,4.6156), (9.03915,-2.3579), (-1.21835,-5.1186), (0.82892,8.12819), (2.80656,2.78392), (-1.34746,-4.30221), (-1.99912,-1.47506), (0.6036,6.8598), (-3.46117,0.47636), (5.23732,0.95383), (-1.86702,7.79779), (-5.86115,-2.61767), (6.48523,-10.5087), (-7.40158,-2.74299), (-1.38913,3.87369), (4.94613,-1.07093), (-2.07818,4.98864), (2.39808,-7.50772), (4.89238,6.41316), (4.39481,1.39061), (5.20425,-3.1747), (13.62598,-2.13621), (-2.86293,-0.02203), (-3.62396,0.89025), (-4.28695,-5.87746), (4.66425,3.60026), (2.20871,-0.23178), (1.60382,-2.1897), (-9.87024,-5.85101), (-7.37302,-1.6053), (-4.17814,3.6184), (2.5148,-8.53795), (3.21708,-0.35987), (-11.48089,2.15301), (1.19821,-6.60692), (-0.07436,9.54341), (-1.10652,1.11511), (4.03395,2.94025), (-4.35883,12.05657), (2.04013,3.75156), (0.52264,7.95597), (8.14004,-0.99449), (-8.86949,0.90597), (-0.35807,-7.90627), (-10.71113,3.50863), (-2.13755,-1.47493), (0.50715,4.11671), (6.30826,10.06325), (2.37527,-1.06059), (0.20872,-1.37737), (-5.85729,-0.42542), (-4.97217,-3.90267), (-9.78434,9.35037), (-1.53277,-7.91219), (0.14827,-4.69945), (-1.053,3.63776), (1.74558,3.46492), (11.17194,2.84518), (9.35487,-3.04301), (-9.17209,8.82764), (10.41814,7.80134), (7.41206,7.87755), (3.71775,7.01035), (-2.04674,2.43271), (6.18037,11.36418), (5.6383,-6.92659), (-0.90058,5.95541), (-1.27073,3.59436), (-2.3473,5.18429), (-8.44271,4.20225), (2.75551,0.5029), (-1.15521,4.03074), (4.08722,5.23152), (-1.70399,10.65409), (7.24114,-0.69845), (-8.43976,11.70096), (-1.53052,5.80692), (-0.00526,-8.1819), (-4.04813,4.31485), (-2.84299,5.7227), (-5.201,5.67398), (7.75774,-1.75826), (-2.85791,7.54164), (-3.86071,-1.79026), (-1.80029,-1.7395), (-5.26015,5.65042), (-3.158,0.38765), (7.71014,-4.64719), (-4.84866,-10.22048), (-8.38785,-2.05447), (7.67021,-2.43441), (4.96521,-5.38551), (-0.40919,5.47764), (-3.25711,8.26637), (3.07685,-3.6421), (2.89376,-11.66269), (-10.47331,3.972), (-3.48942,5.46642), (1.13906,-3.72304), (-8.57454,5.75251), (-3.38963,5.12841), (-2.3195,0.59067), (-1.60694,5.21138), (-5.57406,-4.58702), (-0.93075,-8.737), (-11.76579,-2.12737), (10.68283,0.22888), (8.74324,-1.46448), (7.66409,2.40311), (4.76715,-5.21814), (0.44539,13.94749), (-1.35941,-2.77448), (4.18849,-3.7867), (-6.17097,3.4954), (0.27977,3.12586), (-1.45006,-7.01485), (-4.81694,-3.20727), (-3.0297,6.31415), (0.02145,2.37521), (2.46883,8.13787), (9.60317,2.15956), (-9.93898,-0.40842), (1.05549,-7.27283), (5.55366,4.27575), (-3.80722,-2.89126), (-4.18851,6.84344), (1.00351,7.0869), (3.11385,-5.18837), (-5.17623,2.67648), (-3.18396,-6.57021), (-6.65302,0.60429), (-0.50832,-1.04921), (-4.04375,7.12873), (4.52707,1.68973), (6.63124,-2.58404), (-3.72082,-3.83114), (5.79825,-7.26546), (-2.0158,-5.07153), (-2.78369,-0.80395), (-1.91821,2.09455), (6.31714,4.33374), (-1.80869,8.54335), (8.55586,0.80566), (2.40826,-8.38085), (-8.46361,7.54812), (5.04452,8.78007), (-0.84665,1.5857), (2.30903,8.43855), (-3.71837,-1.90846), (-0.69419,-1.2434), (3.6733,7.16172), (-1.96098,-3.44129), (2.36747,-6.37542), (-12.03622,-4.99486), (4.38481,4.99033), (2.93955,-1.83734), (2.16804,-2.83289), (-0.08218,-4.13997), (-3.97934,1.40163), (-7.43985,8.57867), (0.91666,-1.87639), (7.23432,3.41667), (-6.13303,6.31762), (-10.23217,1.58473), (-6.21681,1.63625), (-0.80934,-6.93618), (0.17914,3.58046), (2.13338,-6.8097), (6.97656,4.69978), (6.90455,-1.72912), (6.25943,5.29491), (-6.04019,-1.63062), (-7.30909,5.83818), (1.4589,17.0769), (12.00208,4.54301), (2.22457,-1.33801), (-2.45912,5.64339), (-6.92213,1.26913), (4.05547,-1.01553), (0.04709,4.8316), (-7.70952,3.08635), (-1.47883,-2.27738), (1.3701,-1.13761), (-4.92928,10.08698), (-2.75872,5.33827), (-0.09178,2.84345), (2.62642,-1.51132), (-1.14623,13.46078), (2.76609,8.58965), (4.94404,-2.36683), (-7.01764,-1.8217), (-10.91568,1.96981), (-2.49738,2.31718), (0.73576,3.66493), (2.25436,1.93104), (-1.72956,5.20332), (2.41054,3.20519), (5.72149,3.34631), (-6.41371,7.0087), (3.38217,-7.96126), (1.24133,-0.62182), (10.03634,-4.65227), (-2.37303,10.6572), (-1.35543,4.50891), (-1.4387,9.74298), (-4.0976,3.85707), (-0.82501,6.41144), (-1.93498,1.48649), (5.59955,2.28076), (5.46656,2.75342), (2.43568,-5.40401), (-0.23926,7.11389), (-4.9945,5.74368), (-4.96655,6.78345), (-0.59258,3.83773), (2.02497,0.70959), (0.67583,0.57434), (3.16522,1.5888), (-1.9673,3.94889), (-6.75319,5.8234), (-6.69723,7.78366), (0.81148,9.08354), (4.44531,-7.99182), (-4.43522,-2.77033), (-5.28602,-10.29342), (-3.58829,1.76251), (-7.97395,2.09266), (-2.84891,4.20614), (-3.95112,-3.63064), (3.54945,-2.17794), (12.12376,-2.66225), (-3.12347,-2.74707), (3.65209,-1.93431), (9.34031,1.38629), (-0.26348,4.12816), (-5.23968,-1.58902), (2.22336,-5.08864), (-10.70405,-2.30491), (-4.41319,2.64605), (-5.94912,1.16158), (1.8147,2.63534), (7.69287,1.4956), (9.46125,-4.60768), (4.72497,0.60771), (-0.57565,3.29549), (-1.12303,-1.42592), (2.90272,0.8883), (-4.4584,-1.10612), (4.28819,-2.57296), (11.64512,5.88085), (-1.80395,7.40745), (2.51605,13.48116), (-3.18439,5.53539), (-0.70213,-1.46014), (-7.68383,3.73304), (-8.32268,3.5435), (-8.71115,-3.89151), (9.96933,4.16265), (0.95675,2.32663), (3.35114,5.31735), (-2.66008,6.33485), (7.75456,2.1339), (0.73568,0.82708), (0.3483,-2.95155), (-1.09203,-6.76019), (-7.76963,-4.20179), (5.81902,8.78354), (-3.41424,1.41863), (-0.39209,7.65689), (4.67608,-6.52601), (0.68753,-4.4426), (5.17179,-4.49483), (4.98983,-3.91479), (-0.12659,-2.84562), (3.25267,2.58974), (1.50184,2.24424), (2.94507,-4.65846), (-0.42333,8.4062), (-3.66227,8.20262), (8.90812,-8.63752), (4.74411,4.97966), (2.22018,-0.35563), (-2.07976,-4.72116), (4.8711,-2.95997), (0.5023,2.73959), (6.31569,-0.23956), (-4.36903,10.13915), (3.82146,11.83775), (-6.99477,-2.50332), (3.61225,-0.58181), (14.69335,-7.62836), (0.58368,2.26478), (4.65341,-3.50179), (-3.14272,-2.08023), (2.67048,4.07256), (4.64963,-1.40826), (-2.70828,-2.33644), (1.42923,3.00197), (5.84498,4.23668), (-4.76568,-2.24647), (0.19907,1.0445), (1.67486,-0.31901), (5.32145,8.62657), (-8.03477,3.92817), (3.46776,0.08462), (4.66374,10.15884), (-5.37394,0.4113), (5.39045,4.45847), (-1.44756,5.82941), (-1.64419,6.59202), (3.39699,-3.73441), (-2.94659,-5.86969), (-2.38437,-4.56543), (-0.23958,-1.32636), (6.88389,-0.17884), (-2.7172,-3.56181), (-1.53419,-0.66932), (7.38841,6.87538), (-5.44178,0.73527), (-0.89287,-0.24177), (2.93546,-0.8657), (-0.26901,-0.22977), (-4.70044,1.02095), (2.25846,6.16311), (-9.28813,-5.68027), (6.04268,-3.7619), (4.41693,4.22959), (1.75714,-1.5249); SELECT '-2.610898982580138', '0.00916587538237954'; -SELECT roundBankers(StudentTTest(left, right).1, 16) as t_stat, roundBankers(StudentTTest(left, right).2, 16) as p_value from student_ttest; +SELECT roundBankers(studentTTest(left, right).1, 16) as t_stat, roundBankers(studentTTest(left, right).2, 16) as p_value from student_ttest; DROP TABLE IF EXISTS student_ttest; /*Check t-stat and p-value and compare it with scipy.stat implementation @@ -15,5 +15,5 @@ DROP TABLE IF EXISTS student_ttest; CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO student_ttest VALUES (4.52546,8.69444), (3.73628,3.81414), (-0.39478,12.38442), (5.15633,8.9738), (0.50539,9.19594), (-5.34036,7.21009), (0.19336,4.97743), (8.35729,4.94756), (6.95818,19.80911), (-2.93812,13.75358), (8.30807,16.56373), (-3.3517,9.72882), (4.16279,4.64509), (-3.17231,17.76854), (1.93545,4.80693), (11.06606,8.79505), (-4.22678,10.88868), (-1.99975,6.21932), (-4.51178,15.11614), (-4.50711,13.24703), (1.89786,14.76476), (-6.19638,-0.6117), (-3.70188,17.48993), (5.01334,12.11847), (1.79036,4.87439), (2.14435,18.56479), (3.0282,1.23712), (2.35528,5.41596), (-12.18535,4.54994), (5.59709,11.37668), (-12.92336,9.5982), (-0.04281,6.59822), (-0.16923,1.16703), (0.88924,8.88418), (-4.68414,10.95047), (8.01099,5.52787), (2.61686,-1.11647), (-2.76895,14.49946), (3.32165,3.27585), (-0.85135,-0.42025), (1.21368,6.37906), (4.38673,2.5242), (6.20964,8.1405), (-1.23172,6.46732), (4.65516,9.89332), (-1.87143,10.4374), (0.86429,-1.06465), (2.51184,6.84902), (-1.88822,10.96576), (-1.61802,7.83319), (1.93653,14.39823), (-3.66631,7.02594), (-1.05294,13.46629), (-10.74718,10.39531), (16.49295,11.27348), (-7.65494,9.32187), (-3.39303,12.32667), (-4.89418,8.98905), (3.2521,9.54757), (0.05831,5.98325), (-3.00409,3.47248), (5.76702,9.26966), (2.67674,5.77816), (10.52623,6.32966), (-0.54501,9.49313), (-4.89835,6.21337), (3.52457,10.00242), (-0.0451,6.25167), (-6.61226,15.64671), (9.02391,2.78968), (5.52571,6.55442), (4.54352,3.68819), (-3.8394,9.55934), (-7.75295,4.166), (5.91167,12.32471), (1.38897,7.10969), (6.24166,16.31723), (5.58536,12.99482), (4.7591,10.11585), (-2.58336,10.29455), (-1.91263,18.27524), (3.31575,12.84435), (5.3507,13.11954), (-15.22081,12.84147), (-0.84775,15.55658), (-4.538,11.45329), (6.71177,7.50912), (0.52882,8.56226), (2.0242,8.63104), (5.69146,15.68026), (4.63328,21.6361), (0.22984,6.23925), (-2.84052,8.65714), (7.91867,9.9423), (1.11001,12.28213), (-0.11251,3.11279), (-0.20905,13.58128), (0.03287,16.51407), (-1.59397,16.60476), (-5.39405,12.02022), (-7.1233,12.11035), (4.51517,9.47832), (-0.70967,6.40742), (5.67299,8.87252), (-0.33835,15.14265), (-1.83047,2.23572), (-0.62877,11.57144), (-7.23148,18.87737), (0.1802,12.1833), (11.73325,11.17519), (2.17603,16.80422), (-0.11683,6.81423), (-1.29102,12.12546), (-0.23201,8.06153), (-6.8643,10.97228), (-6.85153,7.30596), (-4.77163,15.44026), (6.11721,8.00993), (5.96406,12.60196), (3.59135,13.96832), (-0.60095,14.03207), (3.11163,4.53758), (-0.18831,8.08297), (0.67657,4.90451), (-3.16117,8.14253), (0.26957,19.88605), (2.18653,13.85254), (-5.94611,23.01839), (-4.39352,6.02084), (-3.71525,9.60319), (5.11103,1.90511), (1.33998,10.35237), (1.01629,16.27082), (-3.36917,12.52379), (-3.99661,11.37435), (8.19336,13.61823), (2.89168,15.77622), (-11.10373,15.17254), (11.68005,6.711), (3.08282,4.74205), (-6.81506,10.09812), (-2.34587,6.61722), (-2.68725,10.34164), (0.3577,8.96602), (-3.05682,12.32157), (9.08062,11.75711), (-0.77913,13.49499), (10.35215,8.57713), (6.82565,11.50313), (-1.24674,1.13097), (5.18822,7.83205), (-3.70743,5.77957), (1.40319,15.5519), (5.89432,10.82676), (1.43152,11.51218), (6.70638,9.29779), (9.76613,9.77021), (4.27604,9.94114), (-2.63141,15.54513), (-7.8133,19.10736), (-0.06668,15.04205), (1.05391,9.03114), (4.41797,24.0104), (0.09337,9.94205), (6.16075,2.5925), (7.49413,8.82726), (-3.52872,10.0209), (-2.17126,8.1635), (-3.87605,4.24074), (3.26607,7.67291), (-3.28045,5.21642), (2.1429,11.2808), (1.53386,6.88172), (0.21169,5.98743), (-0.63674,17.97249), (5.84893,6.46323), (-0.63498,15.37416), (8.29526,2.89957), (-1.08358,17.13044), (-2.306,11.06355), (2.86991,3.09625), (-0.76074,-2.33019), (5.49191,7.42675), (1.82883,15.06792), (-3.70497,8.81116), (-0.53232,19.17446), (-11.49722,18.77181), (3.44877,14.06443), (-1.8596,12.81241), (-10.34851,2.72299), (1.13093,18.67739), (-10.93389,11.63275), (-3.39703,2.23891), (0.19749,13.01195), (-3.68389,7.43402), (-4.67863,8.14599), (10.78916,16.65328), (0.37675,1.362), (3.98094,3.87957), (-3.64775,11.16134), (-4.8443,6.25357), (1.102,4.21945), (8.72112,12.50047), (-1.47361,6.45486), (6.24183,18.99924), (6.83569,18.09508), (-3.11684,13.59528), (4.91306,3.39681), (-0.03628,13.33157), (5.1282,5.8945), (-2.38558,5.61212), (2.33351,8.41149), (-0.97191,13.78608), (-0.05588,6.08609), (-4.70019,12.76962), (-5.12371,3.26206), (0.65606,0.25528), (-0.11574,11.9083), (4.4238,4.35071), (6.93399,11.19855), (3.68712,13.87404), (-0.01187,6.87986), (1.8332,8.32566), (5.81322,22.51334), (-4.04709,2.5226), (-8.26397,16.84498), (-2.11273,6.26108), (5.28396,13.84824), (0.73054,6.03262), (6.43559,14.12668), (4.35565,16.01939), (-1.05545,8.19237), (5.00087,18.01595), (-2.72239,9.45609), (7.32313,6.90459), (2.11548,12.83115), (-3.40953,10.603), (6.97051,13.70439), (-0.45567,6.1633), (1.31699,4.1151), (-1.49871,8.20499), (7.14772,11.67903), (0.79277,7.30851), (6.9698,6.50941), (2.08733,7.3949), (-3.55962,12.80075), (0.75601,5.62043), (1.21,18.2542), (-2.17877,17.9393), (1.83206,16.4569), (5.72463,8.78811), (7.42257,4.85949), (0.97829,-3.36394), (7.54238,5.38683), (9.91081,12.26083), (-4.61743,10.27907), (-4.40799,11.5144), (9.99854,11.57335), (8.53725,1.94203), (3.2905,7.78228), (0.38634,11.79385), (-2.53374,10.18415), (4.94758,14.67613), (4.79624,4.70301), (5.57664,12.72151), (-6.44871,-3.35508), (3.34431,17.63775), (0.14209,2.53883), (10.88431,14.01483), (0.31846,12.4387), (-0.54703,11.15408), (-4.67791,7.74882), (-5.68011,13.60956), (-4.93362,7.81991), (1.2271,10.90969), (5.27512,8.19828), (-3.84611,-1.18523), (6.81706,0.5916), (10.33033,0.35805), (5.13979,12.98364), (3.66534,11.38628), (-2.07219,13.94644), (10.65442,2.03781), (-3.31751,10.74447), (-1.82011,12.35656), (-0.39886,7.08701), (1.77052,2.69871), (1.29049,19.66653), (7.92344,7.88636), (-2.92595,10.36916), (-2.67107,1.632), (5.64708,11.86081), (0.34639,13.47602), (-3.04356,6.60204), (3.98828,7.01303), (-1.36695,20.19992), (-8.48462,18.88249), (-4.04669,11.34367), (9.84561,12.97305), (-6.1537,9.5776), (0.82433,17.91364), (1.92449,18.3247), (2.51288,9.9211), (0.40965,7.14257), (2.89183,6.59133), (3.84347,12.35274), (0.66829,10.57523), (-3.45094,12.12859), (1.3544,9.47177), (-9.85456,0.60659), (5.25689,4.72996), (-5.26018,4.51121), (-6.16912,13.28893), (-1.77163,8.09014), (3.96687,8.02511), (0.70893,13.85406), (-5.45342,1.75412), (-3.89706,6.00641), (3.11868,6.35554), (4.41714,7.11293), (7.64841,8.30442), (0.00489,12.63024), (3.2263,12.38966), (-5.33042,7.6801), (2.52189,11.33744), (-7.40308,4.67713), (0.67891,7.62276), (2.49343,2.14478), (5.43133,15.32988), (-0.67541,1.52299), (-0.60299,17.00017), (-6.32903,8.29701), (-3.44336,10.92961), (-0.23963,6.78449), (6.94686,7.02698), (6.59442,11.51719), (-4.18532,9.97926), (-1.8228,7.44251), (-0.29443,7.58541), (2.99821,4.76058), (2.51942,12.88959), (-3.49176,9.974), (-0.57979,17.03689), (8.69471,11.14554), (-1.19427,11.7392), (-3.17119,11.50029), (-2.99566,19.41759), (-3.34493,9.65127), (-2.33826,9.87673), (-5.04164,14.13485), (-0.48214,9.78034), (7.45097,1.57826), (3.04787,3.72091), (2.92632,9.4054), (1.39694,23.22816), (4.38686,-0.12571), (3.25753,6.97343), (7.14218,10.09049), (-4.04341,11.78393), (-9.19352,3.01909), (2.78473,16.09448), (0.33331,6.25485), (9.89238,7.13164), (6.00566,7.75879), (-1.7511,9.56834), (4.77815,6.14824), (5.07457,13.53454), (2.56132,8.26364), (2.38317,8.7095), (-1.63486,10.61607), (-1.46871,10.64418), (-5.8681,23.9106), (-2.96227,11.38978), (-1.90638,11.4383), (-13.3052,18.41498), (-2.14705,3.70959), (-9.62069,19.95918), (2.29313,9.53847), (0.22162,14.04957), (-1.83956,13.70151), (4.1853,5.45046), (6.05965,10.95061), (-0.23737,9.55156), (6.07452,17.92345), (4.34629,6.23976), (4.02922,8.71029), (3.62622,13.58736), (-3.95825,8.78527), (-1.63412,11.14213), (-1.25727,12.23717), (5.06323,16.44557), (-0.66176,0.47144), (2.36606,9.7198), (-5.77792,13.50981), (4.535,14.27806), (1.02031,13.50793), (4.49345,7.47381), (-4.99791,11.07844), (2.46716,9.89844), (3.65471,21.48548), (11.2283,6.92085), (6.69743,4.44074), (-5.60375,19.98074), (0.28683,7.92826), (-0.85737,16.6313), (4.26726,17.17618), (-3.4322,13.80807), (-2.07039,5.37083), (-2.26798,9.73962), (-0.99818,10.66273), (0.41335,8.90639), (5.18124,12.24596), (-5.01858,16.89203), (2.05561,12.69184), (-0.12117,15.59077), (0.99471,6.94287), (6.89979,-0.1801), (-4.18527,3.25318), (-6.35104,8.08804), (3.89734,13.78384), (-1.979,0.46434), (3.15404,7.78224), (3.52672,9.10987), (2.48372,-0.89391), (-6.13089,14.3696), (2.2968,3.01763), (-2.74324,8.03559), (-0.12876,7.24609), (-1.51135,11.86271), (-3.92434,6.28196), (-1.71254,8.9725), (-1.25878,14.46114), (2.03021,9.50216), (4.31726,16.30413), (-3.02908,1.02795), (9.7093,1.88717), (-3.36284,9.80106), (6.70938,4.53487), (0.42762,16.34543), (5.04726,7.71098), (2.78386,2.74639), (6.83022,6.51875), (-3.02109,10.42308), (-0.65382,13.57901), (-15.58675,0.52784), (5.89746,4.4708), (-4.11598,6.39619), (-1.37208,14.57666), (10.08082,2.71602), (5.35686,12.53905), (1.93331,11.4292), (10.47444,12.44641), (-2.36872,14.50894), (6.50752,17.64374), (2.54603,11.03218), (-0.4332,9.82789), (5.26572,10.11104), (2.09016,2.16137), (1.15513,10.24054), (14.95941,12.86909), (-3.85505,15.22845), (-2.36239,5.05411), (1.64338,10.84836), (-4.25074,11.15717), (7.29744,0.91782), (-1.18964,13.29961), (5.60612,15.11314), (-3.77011,11.54004), (6.67642,-0.94238), (-0.06862,19.32581), (5.60514,10.20744), (3.7341,6.54857), (9.59001,8.69108), (3.30093,8.2296), (-2.75658,8.4474), (4.71994,6.81178), (0.74699,5.99415), (2.91095,13.99336), (-7.36829,8.7469), (-5.29487,8.62349), (3.31079,1.84212), (1.06974,4.4762), (-1.18424,9.25421), (-7.415,10.44229), (3.40595,12.21649), (-7.63085,10.45968), (1.13336,15.34722), (-0.0096,5.50868), (0.8928,10.93609), (-0.5943,2.78631), (7.48306,11.86145), (10.11943,18.67385), (5.60459,10.64051), (4.00189,12.75565), (2.35823,6.63666), (0.33475,12.19343), (3.47072,9.08636), (-6.68867,11.67256), (3.31031,20.31392), (2.17159,11.66443); SELECT '-28.740781574102936', '7.667329672103986e-133'; -SELECT roundBankers(StudentTTest(left, right).1, 16) as t_stat, roundBankers(StudentTTest(left, right).2, 16) as p_value from student_ttest; +SELECT roundBankers(studentTTest(left, right).1, 16) as t_stat, roundBankers(studentTTest(left, right).2, 16) as p_value from student_ttest; DROP TABLE IF EXISTS student_ttest; diff --git a/tests/queries/0_stateless/01322_welch_ttest.sql b/tests/queries/0_stateless/01322_welch_ttest.sql index 5a5b52ab612..cce65c28bd8 100644 --- a/tests/queries/0_stateless/01322_welch_ttest.sql +++ b/tests/queries/0_stateless/01322_welch_ttest.sql @@ -3,19 +3,19 @@ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (27.5,27.1), (21.0,22.0), (19.0,20.8), (23.6,23.4), (17.0,23.4), (17.9,23.5), (16.9,25.8), (20.1,22.0), (21.9,24.8), (22.6,20.2), (23.1,21.9), (19.6,22.1), (19.0,22.9), (21.7,20.5), (21.4,24.4); SELECT '0.021378001462867'; -SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (30.02,29.89), (29.99,29.93), (30.11,29.72), (29.97,29.98), (30.01,30.02), (29.99,29.98); SELECT '0.090773324285671'; -SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (0.010268,0.159258), (0.000167,0.136278), (0.000167,0.122389); SELECT '0.00339907162713746'; -SELECT roundBankers(WelchTTest(left, right).2, 16) from welch_ttest; +SELECT roundBankers(welchTTest(left, right).2, 16) from welch_ttest; DROP TABLE IF EXISTS welch_ttest; /*Check t-stat and p-value and compare it with scipy.stat implementation @@ -24,7 +24,7 @@ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (14.72789,-8.65656), (9.61661,22.98234), (13.57615,23.80821), (3.98392,13.33939), (11.98889,-4.05537), (10.99422,23.5155), (5.44792,-6.45272), (20.29346,17.7903), (7.05926,11.463), (9.22732,5.28021), (12.06847,8.39157), (13.52612,6.02464), (8.24597,14.43732), (9.35245,15.76584), (10.12297,1.54391), (15.80624,1.24897), (13.68613,27.1507), (10.72729,7.71091), (5.62078,15.71846), (6.12229,32.97808), (6.03801,-1.79334), (8.95585,-9.23439), (24.04613,11.27838), (9.04757,0.72703), (2.68263,18.51557), (15.43935,9.16619), (2.89423,17.29624), (4.01423,-1.30208), (4.30568,-3.48018), (11.99948,10.12082), (8.40574,-8.01318), (10.86642,-14.22264), (9.4266,16.58174), (-8.12752,-0.55975), (7.91634,5.61449), (7.3967,1.44626), (2.26431,7.89158), (14.20118,1.13369), (6.68233,-0.82609), (15.46221,12.23365), (7.88467,12.45443), (11.20011,14.46915), (8.92027,13.72627), (10.27926,18.41459), (5.14395,29.66702), (5.62178,1.51619), (12.84383,10.40078), (9.98009,3.33266), (-0.69789,6.12036), (11.41386,11.86553), (7.76863,6.59422), (7.21743,22.0948), (1.81176,1.79623), (9.43762,14.29513), (19.22117,19.69162), (2.97128,-7.98033), (14.32851,5.48433), (7.54959,-2.28474), (3.81545,9.91876), (10.1281,10.64097), (2.48596,0.22523), (10.0461,17.01773), (3.59714,22.37388), (9.73522,14.04215), (18.8077,23.1244), (3.15148,18.96958), (12.26062,8.42663), (5.66707,3.7165), (6.58623,14.29366), (17.30902,23.50886), (9.91391,26.33722), (5.36946,26.72396), (15.73637,13.26287), (16.96281,12.97607), (11.54063,17.41838), (18.37358,8.63875), (11.38255,17.08943), (10.53256,23.15356), (8.08833,-4.4965), (16.27556,7.58895), (2.42969,26.04074), (9.56127,6.84245), (7.32998,20.56287), (9.19511,3.84735), (9.66903,-2.76304), (4.15029,13.1615), (8.83511,8.21954), (14.60617,-3.49943), (14.06143,22.12419), (5.39556,7.08323), (10.11871,16.12937), (10.56619,-0.32672), (14.4462,16.5942), (10.42106,7.68977), (7.75551,11.39484), (11.00418,-5.11987), (4.47226,20.87404), (16.35461,8.01007), (18.55174,3.26497), (11.82044,5.61253), (7.39454,20.69182), (11.27767,0.0296), (6.83827,21.904), (7.76858,22.46572), (15.97614,3.63685), (14.53781,-5.10846), (12.99546,14.86389), (16.91151,5.47188), (9.65012,18.44095), (14.25487,16.71368), (14.03618,6.36704), (2.57382,8.82663), (2.50779,14.6727), (14.24787,7.98383), (13.34666,2.65568), (7.31102,21.45827), (10.22981,11.77948), (17.4435,4.71979), (21.2074,3.17951), (6.64191,13.90226), (18.7086,15.50578), (14.78686,10.8026), (9.85287,16.91369), (4.48263,9.90552), (14.17469,13.87322), (14.4342,4.12366), (19.2481,-3.78985), (3.47165,1.7599), (8.28712,3.43715), (8.81657,-3.45246), (0.92319,23.64571), (20.41106,-4.96877), (6.76127,3.93514), (22.00242,1.49914), (8.66129,12.71519), (10.9929,5.11521), (17.95494,4.79872), (17.20996,20.89391), (12.18888,5.363), (12.14257,8.02765), (15.81243,14.30804), (4.43362,11.49002), (1.17567,14.25281), (15.60881,7.6573), (9.34833,15.49686), (6.33513,3.29327), (-0.83095,2.27236), (12.43268,12.58104), (6.63207,19.19128), (11.96877,15.25901), (14.81029,6.5221), (21.84876,10.10965), (3.75896,12.75249), (6.91307,16.50977), (13.73015,-8.6697), (8.63753,8.28553), (15.71679,1.44315), (1.74565,4.65869), (9.16895,0.98149), (5.70685,0.16623), (5.00117,17.66332), (13.06888,4.35346), (7.51204,6.52742), (15.34885,-1.06631), (5.20264,-5.28454), (8.59043,14.25583), (6.45619,8.74058), (14.61979,1.89553), (11.7075,-0.92959), (14.04901,10.30289), (4.20525,-6.3744), (15.1733,-8.1706), (3.12934,10.95369), (8.08049,4.94384), (15.41273,28.40568), (16.90751,3.7004), (5.86893,2.52363), (7.1086,4.07997), (4.418,7.8849), (12.0614,17.95409), (7.07887,16.67021), (3.61585,11.34377), (11.73001,-0.07446), (10.80449,22.00223), (8.40311,3.31778), (9.91276,18.50719), (16.4164,-3.58655), (5.25034,6.5394), (15.20283,12.40459), (10.42909,16.59866), (9.53888,7.54176), (14.68939,-1.51044), (6.60007,12.69758), (18.31058,2.9842), (7.01885,2.49187), (18.71631,2.04113), (10.50002,-2.46544), (10.7517,15.18368), (4.23224,-0.04058), (2.28924,-0.4127), (8.56059,10.5526), (8.25095,12.03982), (9.15673,12.10923), (13.28409,11.54954), (8.4513,-1.18613), (2.83911,11.30984), (2.79676,23.54105), (9.11055,10.67321), (7.18529,24.09196), (-4.1258,7.5008), (5.28306,12.52233), (6.82757,4.30673), (10.89035,9.35793), (5.24822,4.44472), (11.935,-7.00679), (6.45675,8.56241), (10.18088,23.73891), (4.9932,15.62708), (18.09939,16.09205), (8.11738,12.52074), (5.37883,14.58927), (10.50339,-4.80187), (16.64093,8.47964), (14.77263,7.75477), (13.71385,12.6893), (6.98746,7.14147), (10.74635,12.12654), (5.49432,12.32334), (13.46078,7.98909), (10.67565,3.26652), (9.0291,20.53684), (11.51417,32.3369), (13.07118,19.74911), (9.5049,-4.62897), (8.50611,8.26483), (6.47606,20.88451), (13.06526,-2.12982), (19.08658,25.61459), (9.49741,5.32091), (10.60865,-4.1196), (2.28996,7.57937), (8.12846,21.15847), (5.62241,6.46355), (4.07712,7.74846), (17.98526,19.62636), (9.466,28.34629), (11.38904,26.73919), (5.91826,20.40427), (1.52059,3.03378), (18.79161,10.2537), (18.20669,7.47745), (-1.67829,10.79184), (18.01586,3.91962), (16.31577,19.97973), (7.88281,18.87711), (8.46179,12.56157), (10.31113,11.46033), (14.88377,3.78661), (1.31835,-9.45748), (2.53176,12.06033), (9.48625,-0.74615), (3.97936,13.2815), (11.52319,24.78052), (13.24178,5.83337), (7.58739,17.4111), (10.00959,19.70331), (9.73361,11.78446), (8.35716,-1.366), (1.65491,1.37458), (11.11521,16.31483), (6.08355,32.63464), (10.04582,-3.79736), (11.58237,19.17984), (16.40249,-0.27705), (1.9691,-3.69456), (13.22776,28.38058), (2.67059,-1.36876), (9.83651,-25.63301), (2.12539,3.58644), (9.27114,-6.85667), (9.0699,13.42225), (2.78179,12.04671), (12.49311,28.99468), (12.97662,7.87662), (15.06359,2.61119), (16.91565,-3.56022), (5.92011,1.50022), (5.81304,14.55836), (8.46425,9.35831), (9.48705,16.9366), (4.68191,29.23126), (5.70028,15.31386), (-0.78798,13.46112), (10.03442,7.39667), (15.45433,11.15599), (9.43845,9.80499), (3.05825,22.64923), (6.92126,8.67693), (14.05905,18.67335), (19.71579,-3.19127), (15.0131,22.94716), (4.50386,17.86834), (1.31061,16.98267), (10.81197,15.91653), (14.32942,11.79718), (9.26469,18.50208), (7.27679,8.90755), (22.69295,10.44843), (12.03763,4.67433), (7.34876,6.82287), (16.60689,10.82228), (7.48786,-4.18631), (15.78602,20.3872), (17.21048,11.84735), (13.93482,21.25376), (9.69911,10.55032), (12.24315,12.19023), (10.58131,0.63369), (19.57006,7.92381), (9.8856,17.90933), (11.70302,15.30781), (7.89864,10.01877), (12.24831,0.88744), (16.93707,22.20967), (9.65467,-4.23117), (4.221,21.50819), (15.45229,11.27421), (12.83088,-16.23179), (7.58313,33.43085), (12.895,5.15093), (10.02471,1.34505), (13.36059,6.027), (5.07864,-10.43035), (9.72017,27.45998), (11.05809,19.24886), (15.28528,-4.44761), (13.99834,5.453), (19.26989,12.73758), (9.41846,11.2897), (11.65425,31.032), (8.49638,7.39168), (6.38592,11.95245), (-4.69837,26.279), (12.22061,-1.0255), (9.41331,10.36675), (13.2075,11.58439), (12.97005,27.8405), (11.44352,13.1707), (9.79805,31.39133), (6.93116,27.08301), (10.07691,-2.14368), (22.05892,4.08476), (7.80353,21.5573), (-2.17276,16.69822), (0.61509,7.69955), (8.35842,8.32793), (17.77108,6.49235), (14.70841,-7.3284), (1.27992,10.58264), (15.62699,-6.17006), (9.32914,34.55782), (15.41866,10.93221), (10.82009,44.24299), (3.29902,14.6224), (9.21998,-7.42798), (7.93845,15.52351), (10.33344,11.33982), (12.06399,10.46716), (5.5308,13.0986), (8.38727,-4.25988), (18.11104,9.55316), (8.86565,0.75489), (19.41825,25.99212), (9.52376,-0.81401), (3.94552,3.49551), (9.37587,22.99402), (15.44954,10.99628), (15.90527,23.70223), (13.18927,2.71482), (7.01646,22.82309), (9.06005,31.25686), (9.06431,4.86318), (5.76006,-1.06476), (9.18705,15.10298), (-3.48446,-0.61015), (15.89817,17.81246), (12.94719,-1.55788), (23.69426,18.09709), (17.47755,9.11271), (15.61528,9.94682), (0.54832,-7.33194), (14.32916,-4.67293), (9.55305,21.81717), (13.79891,7.16318), (0.82544,13.25649), (13.34875,13.88776), (9.07614,4.95793), (5.19621,17.65303), (2.1451,14.47382), (9.87726,13.19373), (8.45439,31.86093), (-1.41842,5.73161), (7.93598,10.96492), (11.23151,6.97951), (17.84458,1.75136), (7.02237,10.96144), (10.7842,15.08137), (4.42832,9.95311), (4.45044,7.07729), (1.50938,3.08148), (21.21651,22.37954), (6.2097,8.51951), (6.84354,2.88746), (18.53804,26.73509), (12.01072,-2.88939), (4.8345,-2.82367), (20.41587,-0.35783), (14.48353,14.22076), (8.71116,11.50295), (12.42818,7.10171), (14.89244,8.28488), (8.03033,0.54178), (5.25917,13.8022), (2.30092,15.62157), (10.22504,10.79173), (15.37573,28.18946), (7.13666,30.43524), (4.45018,2.54914), (10.18405,9.89421), (3.91025,13.08631), (14.52304,4.68761), (13.14771,5.61516), (11.99219,22.88072), (9.21345,7.4735), (8.85106,11.27382), (12.91887,2.39559), (15.62308,-3.31889), (11.88034,9.61957), (15.12097,23.01381), (11.58168,-1.23467), (16.83051,9.07691), (5.25405,15.78056), (2.19976,12.28421), (4.56716,9.44888), (16.46053,13.16928), (5.61995,4.33357), (8.67704,2.21737), (5.62789,33.17833), (9.84815,13.25407), (13.05834,-2.47961), (11.74205,6.41401), (3.88393,18.8439), (16.15321,-4.63375), (4.83925,-8.2909), (13.00334,12.18221), (4.4028,-2.95356), (4.35794,19.61659), (4.47478,12.45056), (2.38713,-4.17198), (4.25235,21.9641), (10.87509,11.96416), (9.82411,12.74573), (13.61518,10.47873), (10.25507,12.73295), (4.0335,11.31373), (10.69881,9.9827), (5.70321,5.87138), (6.96244,4.24372), (9.35874,-23.72256), (6.28076,28.41337), (8.29015,4.88103), (6.88653,3.61902), (7.70687,8.93586), (8.2001,16.40759), (6.73415,27.84494), (3.82052,5.6001), (3.94469,14.51379), (15.82384,13.5576), (2.54004,12.92213), (10.74876,3.90686), (12.60517,17.07104), (17.7024,15.84268), (4.6722,17.38777), (13.67341,16.54766), (6.4565,5.94487), (12.95699,17.02804), (4.56912,7.66386), (5.58464,10.43088), (4.0638,6.16059), (13.05559,20.46178), (5.38269,20.02888), (0.16354,20.95949), (7.23962,6.50808), (7.38577,7.22366), (8.50951,8.06659), (13.72574,16.08241), (17.80421,13.83514), (3.01135,-0.33454), (8.02608,12.98848), (14.23847,12.99024); SELECT '-0.5028215369186904', '0.6152361677168877'; -SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(left, right).1, 16) as t_stat, roundBankers(welchTTest(left, right).2, 16) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; /*Check t-stat and p-value and compare it with scipy.stat implementation @@ -33,5 +33,5 @@ DROP TABLE IF EXISTS welch_ttest; CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; INSERT INTO welch_ttest VALUES (4.82025,-2.69857), (6.13896,15.80943), (15.20277,7.31555), (14.15351,3.96517), (7.21338,4.77809), (8.55506,9.6472), (13.80816,-26.41717), (11.28411,-10.85635), (7.4612,-1.4376), (7.43759,-0.96308), (12.9832,2.84315), (-5.74783,5.79467), (12.47114,-3.06091), (15.14223,-14.62902), (3.40603,22.08022), (9.27323,-2.11982), (7.88547,-4.84824), (8.56456,-10.50447), (4.59731,2.4891), (7.91213,9.90324), (7.33894,-22.66866), (21.74811,-0.97103), (11.92111,-16.57608), (0.18828,-3.78749), (10.47314,25.84511), (20.37396,5.30797), (11.04991,-18.19466), (13.30083,11.72708), (14.28065,0.2891), (2.86942,-9.83474), (24.96072,6.69942), (14.20164,18.09604), (18.28769,18.52651), (10.50949,1.38201), (9.22273,7.64615), (11.77608,17.66598), (8.56872,-2.44141), (13.74535,-9.01598), (11.65209,27.69142), (12.51894,4.06946), (17.76256,-15.0077), (13.52122,-10.49648), (8.70796,-4.88322), (6.04749,-25.09805), (16.33064,-4.64024), (8.35636,20.94434), (14.03496,24.12126), (11.05834,-14.10962), (14.49261,10.6512), (2.59383,14.50687), (8.01022,-19.88081), (4.05458,-11.55271), (13.26384,13.16921), (14.62058,16.63864), (10.52489,-24.08114), (8.46357,-9.09949), (6.4147,-10.54702), (9.70071,0.20813), (12.47581,8.19066), (4.38333,-2.70523), (17.54172,-0.23954), (10.12109,7.19398), (7.73186,-7.1618), (14.0279,-7.44322), (11.6621,-17.92031), (17.47045,-1.58146), (15.50223,9.18338), (15.46034,3.25838), (13.39964,-14.30234), (14.98025,1.84695), (15.87912,31.13794), (17.67374,-0.85067), (9.64073,19.02787), (12.84904,-3.09594), (7.70278,13.45584), (13.03156,-5.48104), (9.04512,-22.74928), (15.97014,-8.03697), (8.96389,17.31143), (11.48009,-16.65231), (9.71153,-18.58713), (13.00084,-16.52641), (12.39803,14.95261), (13.08188,12.56762), (5.82244,15.00188), (10.81871,1.85858), (8.2539,2.1926), (7.52114,-2.4095), (9.11488,21.56873), (8.37482,3.35509), (14.48652,-4.98672), (11.42152,35.08603), (16.03111,-10.01602), (13.14057,-3.85153), (-2.26351,-6.81974), (15.50394,19.56525), (14.88603,-9.35488), (13.37257,0.24268), (11.84026,-3.51488), (7.66558,-0.37066), (6.24584,24.20888), (3.6312,-11.73537), (2.7018,0.01282), (5.63656,0.03963), (5.82643,-9.65589), (10.06745,-0.37429), (-0.5831,5.61255), (14.84202,0.49984), (9.5524,-10.15066), (19.71713,-14.54314), (14.23109,16.56889), (8.69105,-7.73873), (5.33742,-3.76422), (7.30372,1.40722), (7.93342,2.28818), (15.20884,-13.12643), (7.53839,5.17082), (13.45311,4.79089), (11.04473,-17.42643), (10.76673,8.72548), (15.44145,-3.70285), (14.06596,16.77893), (9.14873,13.382), (12.88372,19.98418), (8.74994,0.00483), (10.53263,-4.75951), (16.16694,2.35391), (8.37197,21.65809), (3.43739,-9.2714), (4.72799,-18.38253), (9.08802,7.23097), (11.2531,14.97927), (5.16115,-4.02197), (10.20895,-29.8189), (18.70884,-12.8554), (15.88924,-7.60124), (3.38758,-14.90158), (6.46449,-3.31486), (10.21088,31.38144), (14.08458,-8.61288), (15.74508,15.31895), (19.31896,-10.19488), (13.19641,13.796), (11.95409,-0.32912), (10.70718,-0.0684), (1.05245,-30.06834), (10.04772,24.93912), (17.01369,-3.26506), (10.2286,-8.29751), (19.58323,-5.39189), (7.02892,-25.08603), (4.16866,-1.45318), (8.94326,16.72724), (4.99854,-3.38467), (8.88352,-26.00478), (18.65422,7.28369), (17.32328,16.96226), (9.33492,16.5858), (14.94788,10.46583), (8.05863,3.84345), (14.6737,-2.99382), (10.93801,1.42078), (0.54036,-11.0123), (-0.34242,2.09909), (5.89076,1.21064), (3.15189,15.36079), (1.94421,-21.61349), (6.38698,22.7726), (10.50654,10.50512), (8.95362,-6.95825), (6.23711,9.20036), (11.75359,15.66902), (12.42155,3.28098), (-1.55472,-9.05692), (4.6688,0.32882), (10.48087,-1.64934), (11.74615,-4.81406), (9.26822,-5.06006), (7.55517,19.97493), (12.76005,2.88646), (16.47102,-0.34552), (11.31297,7.55186), (14.37437,-22.96115), (2.38799,31.29166), (6.44577,6.18798), (5.07471,-2.52715), (11.55123,-11.58799), (7.76795,14.13596), (10.60116,13.45069), (14.40885,12.15179), (11.58158,3.44491), (8.81648,-8.78006), (12.92299,18.32087), (11.26939,11.91757), (17.95014,-2.00179), (2.95002,10.88411), (17.41959,9.09327), (11.12455,6.62484), (8.78541,8.87178), (14.36413,11.52254), (12.98554,-14.15988), (12.58505,-17.19515), (15.49789,14.03089), (11.70999,-2.4095), (0.65596,-16.83575), (11.08202,2.71469), (14.75752,4.84351), (6.84385,-1.17651), (9.27245,-3.37529), (13.78243,-19.92137), (17.4863,4.48952), (4.01777,-12.4906), (11.82861,-5.65277), (13.86551,8.50819), (6.16591,-19.61261), (8.71589,12.54156), (16.77195,11.06784), (17.23243,-12.59285), (-2.12941,3.43683), (5.66629,-3.00325), (12.45153,12.49082), (1.63971,7.20955), (13.84031,17.6547), (4.6144,15.8619), (5.26169,24.3048), (9.27769,-8.05434), (9.14288,-6.06901), (9.71953,-15.69515), (9.38446,-11.13917), (1.64788,-3.90757), (11.72922,-2.57038), (13.68926,5.14065), (9.42952,17.8497), (12.05574,-8.64665), (9.09148,-18.68331), (5.32273,5.8567), (20.25258,-20.93884), (10.14599,4.40583), (10.82156,14.35985), (5.75736,4.18134), (7.13567,4.3635), (9.29746,9.35428), (5.1618,2.8908), (10.076,16.01017), (21.65669,-1.48499), (13.35486,-9.97949), (6.79957,1.03055), (8.76243,-2.79697), (14.59294,6.85977), (16.90609,4.73213), (10.50337,2.7815), (-0.07923,-2.46866), (13.51648,18.39425), (12.0676,-0.80378), (0.86482,-0.22982), (9.03563,-16.11608), (5.38751,3.0862), (17.16866,3.20779), (2.78702,10.50146), (11.15548,-0.21305), (12.30843,11.21012), (8.04897,-0.99825), (9.95814,18.39633), (11.29308,-3.39003), (14.13032,-0.64411), (21.05877,-1.39932), (3.57386,15.45319), (7.96631,-0.66044), (3.30484,-15.2223), (18.61856,-34.39907), (16.35184,-3.57836), (7.65236,16.82828), (18.02895,1.66624), (9.79458,15.43475), (16.7274,8.17776), (8.84453,5.50486), (13.05709,10.43082), (10.91447,-6.63332), (8.40171,2.28008), (16.95211,16.37203), (11.82194,5.16313), (19.87978,-8.85281), (12.88455,13.26692), (-0.00947,-7.46842), (12.28109,8.43091), (6.96462,-13.18172), (13.75282,-0.72401), (14.39141,22.3881), (11.07193,10.65448), (12.88039,2.81289), (11.38253,10.92405), (21.02707,-8.95358), (7.51955,19.80653), (6.31984,-12.86527), (15.6543,5.38826), (14.80315,-6.83501), (8.38024,-15.7647), (21.7516,-27.67412), (14.31336,8.6499), (15.04703,-4.89542), (5.73787,16.76167), (13.16911,12.84284), (12.40695,-17.27324), (9.88968,-4.18726), (8.46703,-14.62366), (8.70637,-5.49863), (8.03551,-16.22846), (5.9757,10.60329), (12.22951,6.46781), (3.14736,1.70458), (10.51266,10.77448), (18.593,0.8463), (10.82213,13.0482), (7.14216,-4.36264), (6.81154,3.22647), (-0.6486,2.38828), (20.56136,6.7946), (11.35367,-0.25254), (11.38205,1.2497), (17.14,1.6544), (14.91215,4.1019), (15.50207,11.27839), (5.93162,-5.04127), (3.74869,18.11674), (14.11532,0.51231), (7.38954,-0.51029), (5.45764,13.52556), (18.33733,16.10171), (9.91923,5.68197), (2.38991,-2.85904), (14.16756,-8.89167), (2.39791,6.24489), (6.92586,10.85319), (5.32474,-0.39816), (2.28812,3.87079), (5.71718,-3.1867), (5.84197,1.55322), (2.76206,16.86779), (19.05928,-14.60321), (11.51788,-1.81952), (6.56648,-3.11624), (3.35735,1.24193), (7.55948,10.18179), (19.99908,4.69796), (13.00634,0.69032), (18.36886,11.7723), (11.14675,7.62896), (16.72931,9.89741), (12.50106,9.11484), (6.00605,-3.84676), (23.06653,-0.4777), (5.39694,0.95958), (9.53167,-7.95056), (12.76944,-10.97474), (7.20604,-6.54861), (13.25391,34.74933), (13.7341,27.39463), (10.85292,4.18299), (-7.75835,6.02476), (10.29728,-1.99397), (13.70099,1.26478), (10.17959,23.37106), (9.98399,10.49682), (12.69389,-11.04354), (-0.28848,-12.22284), (-2.18319,-9.87635), (13.36378,28.90511), (10.09232,6.77613), (5.49489,0.55352), (5.46156,0.37031), (0.94225,7.1418), (12.79205,3.24897), (10.09593,-1.60918), (6.06218,3.1675), (0.89463,-17.97072), (11.88986,-5.61743), (10.79733,14.1422), (1.51371,14.87695), (2.20967,-4.65961), (15.45732,-0.99174), (16.5262,-2.96623), (5.99724,-9.02263), (8.3613,-17.2088), (15.68183,2.78608), (15.32117,6.74239), (14.15674,4.8524), (6.64553,7.46731), (4.20777,1.04894), (-0.10521,-12.8023), (-0.88169,-17.18188), (1.85913,-5.08801), (9.73673,22.13942), (0.30926,-0.36384), (6.17559,17.80564), (11.76602,7.67504), (5.68385,1.59779), (14.57088,4.10942), (12.81509,0.61074), (9.85682,-14.40767), (12.06376,10.59906), (6.08874,16.57017), (11.63921,-15.17526), (14.86722,-6.98549), (10.41035,-0.64548), (2.93794,3.23756), (12.21841,14.65504), (0.23804,4.583), (3.14845,12.72378), (7.29748,5.26547), (3.06134,0.81781), (13.77684,9.38273), (16.21992,10.37636), (5.33511,10.70325), (9.68959,-0.83043), (9.44169,-7.53149), (18.08012,-9.09147), (4.04224,-19.51381), (8.77918,-28.44508), (10.18324,6.44392), (9.38914,11.10201), (11.76995,-2.86184), (14.19963,8.30673), (6.88817,8.8797), (16.56123,10.68053), (15.39885,15.62919), (5.21241,8.00579), (4.44408,6.4651), (17.87587,-4.50029), (12.53337,18.04514), (13.60916,11.12996), (6.60104,-5.14007), (7.35453,9.43857), (18.61572,3.13476), (6.10437,4.9772), (13.08682,-17.45782), (12.15404,0.05552), (4.90789,-1.90283), (2.13353,2.67908), (12.49593,-2.62243), (11.93056,-3.22767), (13.29408,-8.70222), (5.70038,-23.11605), (8.40271,21.6757), (5.19456,12.70076), (-5.51028,4.4322), (14.0329,11.69344), (10.38365,9.18052), (6.56812,-2.2549), (4.21129,-2.15615), (9.7157,20.29765), (9.88553,-0.29536), (13.45346,15.50109), (4.97752,8.79187), (12.77595,5.11533), (8.56465,-20.44436), (4.27703,-3.00909), (18.12502,-4.48291), (12.45735,21.84462), (12.42912,1.94225), (12.08125,-2.81908), (10.85779,17.19418), (4.36013,-9.33528), (11.85062,-0.17346), (8.47776,0.03958), (9.60822,-35.17786), (11.3069,8.36887), (14.25525,-9.02292), (1.55168,-10.98804), (14.57782,0.29335), (7.84786,4.29634), (9.87774,3.87718), (14.75575,-9.08532), (3.68774,7.13922), (9.37667,-7.62463), (20.28676,-10.5666), (12.10027,4.68165), (8.01819,-3.30172), (18.78158,13.04852), (20.85402,13.45616), (18.98069,2.41043), (16.1429,-0.36501), (9.24047,-15.67383), (14.12487,17.92217), (10.18841,8.42106), (-3.04478,3.22063), (5.7552,-7.31753), (9.30376,21.99596), (11.42837,-36.8273), (6.02364,-20.46391), (8.86984,5.74179), (10.91177,-15.83178), (10.04418,14.90454), (18.10774,-8.84645), (7.49384,3.72036), (9.11556,4.6877), (9.7051,16.35418), (5.23268,3.15441), (9.04647,2.39907), (8.81547,-17.58664), (2.65098,-13.18269); SELECT '14.971190998235835', '5.898143508382202e-44'; -SELECT roundBankers(WelchTTest(left, right).1, 16) as t_stat, roundBankers(WelchTTest(left, right).2, 16) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(left, right).1, 16) as t_stat, roundBankers(welchTTest(left, right).2, 16) as p_value from welch_ttest; DROP TABLE IF EXISTS welch_ttest; From 73f3aac66380c8d34da999efb139b0a79811f455 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Oct 2020 00:07:24 +0300 Subject: [PATCH 304/432] Fix assert error in quantileDeterministic #15683 --- .../ReservoirSamplerDeterministic.h | 70 ++++++++----------- 1 file changed, 30 insertions(+), 40 deletions(-) diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index 3097070c651..f0f926ce31e 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -39,8 +39,8 @@ namespace ErrorCodes namespace detail { -const size_t DEFAULT_SAMPLE_COUNT = 8192; -const auto MAX_SKIP_DEGREE = sizeof(UInt32) * 8; + const size_t DEFAULT_MAX_SAMPLE_SIZE = 8192; + const auto MAX_SKIP_DEGREE = sizeof(UInt32) * 8; } /// What if there is not a single value - throw an exception, or return 0 or NaN in the case of double? @@ -50,6 +50,7 @@ enum class ReservoirSamplerDeterministicOnEmpty RETURN_NAN_OR_ZERO, }; + template class ReservoirSamplerDeterministic @@ -60,8 +61,8 @@ class ReservoirSamplerDeterministic } public: - ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT) - : sample_count{sample_count_} + ReservoirSamplerDeterministic(const size_t max_sample_size_ = detail::DEFAULT_MAX_SAMPLE_SIZE) + : max_sample_size{max_sample_size_} { } @@ -131,8 +132,8 @@ public: void merge(const ReservoirSamplerDeterministic & b) { - if (sample_count != b.sample_count) - throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different sample_count"); + if (max_sample_size != b.max_sample_size) + throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different max sample count"); sorted = false; if (b.skip_degree > skip_degree) @@ -150,11 +151,16 @@ public: void read(DB::ReadBuffer & buf) { - DB::readIntBinary(sample_count, buf); + size_t size = 0; + DB::readIntBinary(size, buf); DB::readIntBinary(total_values, buf); - samples.resize(std::min(total_values, sample_count)); - for (size_t i = 0; i < samples.size(); ++i) + /// Compatibility with old versions. + if (size > total_values) + size = total_values; + + samples.resize(size); + for (size_t i = 0; i < size; ++i) DB::readPODBinary(samples[i], buf); sorted = false; @@ -162,10 +168,11 @@ public: void write(DB::WriteBuffer & buf) const { - DB::writeIntBinary(sample_count, buf); + size_t size = samples.size(); + DB::writeIntBinary(size, buf); DB::writeIntBinary(total_values, buf); - for (size_t i = 0; i < std::min(sample_count, total_values); ++i) + for (size_t i = 0; i < size; ++i) DB::writePODBinary(samples[i], buf); } @@ -174,18 +181,19 @@ private: using Element = std::pair; using Array = DB::PODArray; - size_t sample_count; - size_t total_values{}; - bool sorted{}; + const size_t max_sample_size; /// Maximum amount of stored values. + size_t total_values = 0; /// How many values were inserted (regardless if they remain in sample or not). + bool sorted = false; Array samples; - UInt8 skip_degree{}; + UInt8 skip_degree = 0; /// The number N determining that we save only one per 2^N elements in average. void insertImpl(const T & v, const UInt32 hash) { - /// @todo why + 1? I don't quite recall - while (samples.size() + 1 >= sample_count) + /// Make a room for plus one element. + while (samples.size() >= max_sample_size) { - if (++skip_degree > detail::MAX_SKIP_DEGREE) + ++skip_degree; + if (skip_degree > detail::MAX_SKIP_DEGREE) throw DB::Exception{"skip_degree exceeds maximum value", DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED}; thinOut(); } @@ -195,35 +203,17 @@ private: void thinOut() { - auto size = samples.size(); - for (size_t i = 0; i < size;) - { - if (!good(samples[i].second)) - { - /// swap current element with the last one - std::swap(samples[size - 1], samples[i]); - --size; - } - else - ++i; - } - - if (size != samples.size()) - { - samples.resize(size); - sorted = false; - } + samples.resize(std::distance(samples.begin(), + std::remove_if(samples.begin(), samples.end(), [this](const auto & elem){ return !good(elem.second); }))); + sorted = false; } void sortIfNeeded() { if (sorted) return; + std::sort(samples.begin(), samples.end(), [](const auto & lhs, const auto & rhs) { return lhs.first < rhs.first; }); sorted = true; - std::sort(samples.begin(), samples.end(), [] (const std::pair & lhs, const std::pair & rhs) - { - return lhs.first < rhs.first; - }); } template From f78e0d48b7833fa6f6ee2f2c22876b69755860c1 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Oct 2020 00:14:49 +0300 Subject: [PATCH 305/432] Added a test --- .../0_stateless/01533_quantile_deterministic_assert.reference | 1 + .../queries/0_stateless/01533_quantile_deterministic_assert.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/queries/0_stateless/01533_quantile_deterministic_assert.reference create mode 100644 tests/queries/0_stateless/01533_quantile_deterministic_assert.sql diff --git a/tests/queries/0_stateless/01533_quantile_deterministic_assert.reference b/tests/queries/0_stateless/01533_quantile_deterministic_assert.reference new file mode 100644 index 00000000000..231c72269ca --- /dev/null +++ b/tests/queries/0_stateless/01533_quantile_deterministic_assert.reference @@ -0,0 +1 @@ +3998 diff --git a/tests/queries/0_stateless/01533_quantile_deterministic_assert.sql b/tests/queries/0_stateless/01533_quantile_deterministic_assert.sql new file mode 100644 index 00000000000..c75e5dd501f --- /dev/null +++ b/tests/queries/0_stateless/01533_quantile_deterministic_assert.sql @@ -0,0 +1 @@ +SELECT quantileDeterministic(number, sipHash64(number)) FROM remote('127.0.0.{1,2}', numbers(8193)); From a34b0880c31440a59e6aca5284211a10b1699c4f Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 27 Oct 2020 00:21:56 +0300 Subject: [PATCH 306/432] Update ReservoirSamplerDeterministic.h --- src/AggregateFunctions/ReservoirSamplerDeterministic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/src/AggregateFunctions/ReservoirSamplerDeterministic.h index f0f926ce31e..eae24c1f3e9 100644 --- a/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -133,7 +133,7 @@ public: void merge(const ReservoirSamplerDeterministic & b) { if (max_sample_size != b.max_sample_size) - throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different max sample count"); + throw Poco::Exception("Cannot merge ReservoirSamplerDeterministic's with different max sample size"); sorted = false; if (b.skip_degree > skip_degree) From f95aaa336e06cacb02b1b488f8c1a865d4a21576 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Tue, 27 Oct 2020 00:23:50 +0300 Subject: [PATCH 307/432] Empty commit to re-run checks. From 59910991b6745e779c8492bbf41928d957bcdaa8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Oct 2020 00:33:46 +0300 Subject: [PATCH 308/432] Fix "Darwin" --- src/AggregateFunctions/AggregateFunctionStudentTTest.h | 10 +++++++++- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 10 +++++++++- src/Functions/lgamma.cpp | 1 - 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index a88f8151b31..5084e34e56f 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -20,9 +20,17 @@ namespace ErrorCodes { -extern const int BAD_ARGUMENTS; + extern const int BAD_ARGUMENTS; } +#if defined(OS_DARWIN) +extern "C" +{ + double lgamma_r(double x, int * signgamp); +} +#endif + + namespace DB { diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 5f0cc409ba9..8fd7ebeee6b 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -21,9 +21,17 @@ namespace ErrorCodes { -extern const int BAD_ARGUMENTS; + extern const int BAD_ARGUMENTS; } +#if defined(OS_DARWIN) +extern "C" +{ + double lgamma_r(double x, int * signgamp); +} +#endif + + namespace DB { diff --git a/src/Functions/lgamma.cpp b/src/Functions/lgamma.cpp index 51b3dfd97df..e4da0d8dfbd 100644 --- a/src/Functions/lgamma.cpp +++ b/src/Functions/lgamma.cpp @@ -4,7 +4,6 @@ #if defined(OS_DARWIN) extern "C" { - /// Is defined in libglibc-compatibility.a double lgamma_r(double x, int * signgamp); } #endif From f694eca7dc7c634e0287dfde3531ebff92379d6b Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Tue, 27 Oct 2020 01:43:46 +0300 Subject: [PATCH 309/432] Empty commit to re-run checks. From 81a5f540d7dcb949a1221d966490a00acee6de9d Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Tue, 27 Oct 2020 01:44:34 +0300 Subject: [PATCH 310/432] Added redundant empty line. --- tests/integration/test_multiple_disks/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 07478d99657..bc60553d005 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -1551,3 +1551,4 @@ def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_clust finally: node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy)) + From d6130f13ef47100dc733fd144b2e2225365656f0 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 10:21:49 +0300 Subject: [PATCH 311/432] Add table to system tables --- src/Storages/System/attachSystemTables.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 2b7ee363f05..012b54e3c45 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -137,6 +138,7 @@ void attachSystemTablesServer(IDatabase & system_database, bool has_zookeeper) attach(system_database, "clusters"); attach(system_database, "graphite_retentions"); attach(system_database, "macros"); + attach(system_database, "fetches"); if (has_zookeeper) attach(system_database, "zookeeper"); From 381af53d4a17f301c826e4d4384db3f037e364fa Mon Sep 17 00:00:00 2001 From: tavplubix Date: Tue, 27 Oct 2020 11:44:58 +0300 Subject: [PATCH 312/432] Update clickhouse-test --- tests/clickhouse-test | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index d5736001d60..626d7ede814 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -180,7 +180,7 @@ def need_retry(stderr): def get_processlist(client_cmd): try: - return subprocess.check_output("{} --query 'SHOW PROCESSLIST FORMAT Vertical'".format(client_cmd), shell=True) + return subprocess.check_output("{} --query 'SHOW PROCESSLIST FORMAT Vertical'".format(client_cmd), shell=True).decode('utf-8') except: return "" # server seems dead @@ -189,7 +189,7 @@ def get_processlist(client_cmd): def get_stacktraces_from_gdb(server_pid): cmd = "gdb -batch -ex 'thread apply all backtrace' -p {}".format(server_pid) try: - return subprocess.check_output(cmd, shell=True) + return subprocess.check_output(cmd, shell=True).decode('utf-8') except Exception as ex: return "Error occured while receiving stack traces from gdb: {}".format(str(ex)) @@ -198,7 +198,10 @@ def get_stacktraces_from_gdb(server_pid): # it does not work in Sandbox def get_stacktraces_from_clickhouse(client): try: - return subprocess.check_output("{} --allow_introspection_functions=1 --query \"SELECT arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace FROM system.stack_trace format Vertical\"".format(client), shell=True) + return subprocess.check_output("{} --allow_introspection_functions=1 --query " + "\"SELECT arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), " + "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace " + "FROM system.stack_trace format Vertical\"".format(client), shell=True).decode('utf-8') except Exception as ex: return "Error occured while receiving stack traces from client: {}".format(str(ex)) From cd0dbcb9d8259bc061a39cd835c768ce5bb8bbe3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Sun, 25 Oct 2020 07:44:04 +0000 Subject: [PATCH 313/432] Minimize event loop lifetime --- .../RabbitMQ/RabbitMQBlockInputStream.h | 1 + src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 97 ++++++++++--------- src/Storages/RabbitMQ/StorageRabbitMQ.h | 1 + 3 files changed, 52 insertions(+), 47 deletions(-) diff --git a/src/Storages/RabbitMQ/RabbitMQBlockInputStream.h b/src/Storages/RabbitMQ/RabbitMQBlockInputStream.h index f68b79275f6..5f2c2a62018 100644 --- a/src/Storages/RabbitMQ/RabbitMQBlockInputStream.h +++ b/src/Storages/RabbitMQ/RabbitMQBlockInputStream.h @@ -30,6 +30,7 @@ public: Block readImpl() override; void readSuffixImpl() override; + bool queueEmpty() const { return !buffer || buffer->queueEmpty(); } bool needChannelUpdate(); void updateChannel(); bool sendAck(); diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 6565afeb32d..6fa8d52764d 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -38,8 +38,9 @@ namespace DB static const auto CONNECT_SLEEP = 200; static const auto RETRIES_MAX = 20; -static const auto HEARTBEAT_RESCHEDULE_MS = 3000; static const uint32_t QUEUE_SIZE = 100000; +static const auto MAX_FAILED_READ_ATTEMPTS = 10; +static const auto MAX_THREAD_WORK_DURATION_MS = 60000; namespace ErrorCodes { @@ -122,9 +123,6 @@ StorageRabbitMQ::StorageRabbitMQ( streaming_task = global_context.getSchedulePool().createTask("RabbitMQStreamingTask", [this]{ streamingToViewsFunc(); }); streaming_task->deactivate(); - heartbeat_task = global_context.getSchedulePool().createTask("RabbitMQHeartbeatTask", [this]{ heartbeatFunc(); }); - heartbeat_task->deactivate(); - if (queue_base.empty()) { /* Make sure that local exchange name is unique for each table and is not the same as client's exchange name. It also needs to @@ -210,16 +208,6 @@ Context StorageRabbitMQ::addSettings(Context context) const } -void StorageRabbitMQ::heartbeatFunc() -{ - if (!stream_cancelled && event_handler->connectionRunning()) - { - connection->heartbeat(); - heartbeat_task->scheduleAfter(HEARTBEAT_RESCHEDULE_MS); - } -} - - void StorageRabbitMQ::loopingFunc() { if (event_handler->connectionRunning()) @@ -402,7 +390,6 @@ bool StorageRabbitMQ::restoreConnection(bool reconnecting) if (reconnecting) { - deactivateTask(heartbeat_task, false, false); connection->close(); /// Connection might be unusable, but not closed /* Connection is not closed immediately (firstly, all pending operations are completed, and then @@ -452,7 +439,6 @@ void StorageRabbitMQ::unbindExchange() */ std::call_once(flag, [&]() { - heartbeat_task->deactivate(); streaming_task->deactivate(); event_handler->updateLoopState(Loop::STOP); looping_task->deactivate(); @@ -499,8 +485,6 @@ Pipe StorageRabbitMQ::read( deactivateTask(looping_task, false, true); update_channels = restoreConnection(true); - if (update_channels) - heartbeat_task->scheduleAfter(HEARTBEAT_RESCHEDULE_MS); } Pipes pipes; @@ -521,7 +505,6 @@ Pipe StorageRabbitMQ::read( if (event_handler->loopRunning()) { deactivateTask(looping_task, false, true); - deactivateTask(heartbeat_task, false, false); } rabbit_stream->updateChannel(); @@ -568,7 +551,6 @@ void StorageRabbitMQ::startup() event_handler->updateLoopState(Loop::RUN); streaming_task->activateAndSchedule(); - heartbeat_task->activateAndSchedule(); } @@ -579,7 +561,6 @@ void StorageRabbitMQ::shutdown() deactivateTask(streaming_task, true, false); deactivateTask(looping_task, true, true); - deactivateTask(heartbeat_task, true, false); connection->close(); @@ -688,6 +669,8 @@ void StorageRabbitMQ::streamingToViewsFunc() if (dependencies_count) { + auto start_time = std::chrono::steady_clock::now(); + // Keep streaming as long as there are attached views and streaming is not cancelled while (!stream_cancelled && num_created_consumers > 0) { @@ -696,8 +679,17 @@ void StorageRabbitMQ::streamingToViewsFunc() LOG_DEBUG(log, "Started streaming to {} attached views", dependencies_count); - if (!streamToViews()) + if (streamToViews()) break; + + auto end_time = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + if (duration.count() > MAX_THREAD_WORK_DURATION_MS) + { + event_handler->updateLoopState(Loop::STOP); + LOG_TRACE(log, "Reschedule streaming. Thread work duration limit exceeded."); + break; + } } } } @@ -731,13 +723,6 @@ bool StorageRabbitMQ::streamToViews() auto column_names = block_io.out->getHeader().getNames(); auto sample_block = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID()); - /* event_handler->connectionRunning() does not guarantee that connnection is not closed in case loop was not running before, but - * need to anyway start the loop to activate error callbacks and update connection state, because even checking with - * connection->usable() will not give correct answer before callbacks are activated. - */ - if (!event_handler->loopRunning() && event_handler->connectionRunning()) - looping_task->activateAndSchedule(); - auto block_size = getMaxBlockSize(); // Create a stream for each consumer and join them in a union stream @@ -770,34 +755,46 @@ bool StorageRabbitMQ::streamToViews() in = streams[0]; std::atomic stub = {false}; + + /// Loop could run untill this point only if select query was made + if (!event_handler->loopRunning()) + { + event_handler->updateLoopState(Loop::RUN); + looping_task->activateAndSchedule(); + } + copyData(*in, *block_io.out, &stub); - /* Need to stop loop even if connection is ok, because sending ack() with loop running in another thread will lead to a lot of data - * races inside the library, but only in case any error occurs or connection is lost while ack is being sent + /* Note: sending ack() with loop running in another thread will lead to a lot of data races inside the library, but only in case + * error occurs or connection is lost while ack is being sent */ - if (event_handler->loopRunning()) - deactivateTask(looping_task, false, true); + deactivateTask(looping_task, false, true); + size_t queue_empty = 0; if (!event_handler->connectionRunning()) { - if (!stream_cancelled && restoreConnection(true)) + if (stream_cancelled) + return true; + + if (restoreConnection(true)) { for (auto & stream : streams) stream->as()->updateChannel(); } else { - /// Reschedule if unable to connect to rabbitmq or quit if cancelled - return false; + LOG_TRACE(log, "Reschedule streaming. Unable to restore connection."); + return true; } } else { - deactivateTask(heartbeat_task, false, false); - /// Commit for (auto & stream : streams) { + if (stream->as()->queueEmpty()) + ++queue_empty; + /* false is returned by the sendAck function in only two cases: * 1) if connection failed. In this case all channels will be closed and will be unable to send ack. Also ack is made based on * delivery tags, which are unique to channels, so if channels fail, those delivery tags will become invalid and there is @@ -828,19 +825,25 @@ bool StorageRabbitMQ::streamToViews() break; } } + + event_handler->iterateLoop(); } } - event_handler->updateLoopState(Loop::RUN); - looping_task->activateAndSchedule(); - heartbeat_task->scheduleAfter(HEARTBEAT_RESCHEDULE_MS); /// It is also deactivated in restoreConnection(), so reschedule anyway + if ((queue_empty == num_queues) && (++read_attempts == MAX_FAILED_READ_ATTEMPTS)) + { + connection->heartbeat(); + read_attempts = 0; + LOG_TRACE(log, "Reschedule streaming. Queues are empty."); + return true; + } + else + { + event_handler->updateLoopState(Loop::RUN); + looping_task->activateAndSchedule(); + } - // Check whether the limits were applied during query execution - bool limits_applied = false; - const BlockStreamProfileInfo & info = in->getProfileInfo(); - limits_applied = info.hasAppliedLimit(); - - return limits_applied; + return false; } diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index 8d9a20f9e34..9cb4a5c59a1 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -122,6 +122,7 @@ private: BackgroundSchedulePool::TaskHolder looping_task; std::atomic stream_cancelled{false}; + size_t read_attempts = 0; ConsumerBufferPtr createReadBuffer(); From 7beddceadddbf7cdab18d274cfdbe6fce1466e06 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Oct 2020 07:20:19 +0000 Subject: [PATCH 314/432] Fix bug that caused awful CPU usage --- src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 3 ++- src/Storages/RabbitMQ/StorageRabbitMQ.h | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 6fa8d52764d..7998bedfb8c 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -40,6 +40,7 @@ static const auto CONNECT_SLEEP = 200; static const auto RETRIES_MAX = 20; static const uint32_t QUEUE_SIZE = 100000; static const auto MAX_FAILED_READ_ATTEMPTS = 10; +static const auto RESCHEDULE_MS = 500; static const auto MAX_THREAD_WORK_DURATION_MS = 60000; namespace ErrorCodes @@ -700,7 +701,7 @@ void StorageRabbitMQ::streamingToViewsFunc() /// Wait for attached views if (!stream_cancelled) - streaming_task->schedule(); + streaming_task->scheduleAfter(RESCHEDULE_MS); } diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index 9cb4a5c59a1..cb52b6bb282 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -118,7 +118,6 @@ private: std::once_flag flag; /// remove exchange only once std::mutex task_mutex; BackgroundSchedulePool::TaskHolder streaming_task; - BackgroundSchedulePool::TaskHolder heartbeat_task; BackgroundSchedulePool::TaskHolder looping_task; std::atomic stream_cancelled{false}; From db03cd7dd7b984b5a267fa7299f6e6e44bbf1f6c Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Oct 2020 07:14:38 +0000 Subject: [PATCH 315/432] Much more optimal queues setup --- .../table-engines/integrations/rabbitmq.md | 2 +- .../table-engines/integrations/rabbitmq.md | 2 +- .../ReadBufferFromRabbitMQConsumer.cpp | 71 +--------------- .../RabbitMQ/ReadBufferFromRabbitMQConsumer.h | 8 +- src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 80 ++++++++++++++++++- src/Storages/RabbitMQ/StorageRabbitMQ.h | 2 + .../integration/test_storage_rabbitmq/test.py | 10 +-- 7 files changed, 89 insertions(+), 86 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index dd14ee3b4b1..0340603eaae 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -51,7 +51,7 @@ Optional parameters: - `rabbitmq_row_delimiter` – Delimiter character, which ends the message. - `rabbitmq_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. - `rabbitmq_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. -- `rabbitmq_num_queues` – The number of queues per consumer. Default: `1`. Specify more queues if the capacity of one queue per consumer is insufficient. +- `rabbitmq_num_queues` – Total number of queues. Default: `1`. Increasing this number can significantly improve performance. - `rabbitmq_queue_base` - Specify a hint for queue names. Use cases of this setting are described below. - `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified. - `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`. diff --git a/docs/ru/engines/table-engines/integrations/rabbitmq.md b/docs/ru/engines/table-engines/integrations/rabbitmq.md index ef7b811e295..75f20827df2 100644 --- a/docs/ru/engines/table-engines/integrations/rabbitmq.md +++ b/docs/ru/engines/table-engines/integrations/rabbitmq.md @@ -45,7 +45,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] - `rabbitmq_row_delimiter` – символ-разделитель, который завершает сообщение. - `rabbitmq_schema` – опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap’n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. - `rabbitmq_num_consumers` – количество потребителей на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. -- `rabbitmq_num_queues` – количество очередей на потребителя. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одной очереди на потребителя недостаточна. +- `rabbitmq_num_queues` – количество очередей. По умолчанию: `1`. Большее число очередей может сильно увеличить пропускную способность. - `rabbitmq_queue_base` - настройка для имен очередей. Сценарии использования описаны ниже. - `rabbitmq_persistent` - флаг, от которого зависит настройка 'durable' для сообщений при запросах `INSERT`. По умолчанию: `0`. - `rabbitmq_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `rabbitmq_skip_broken_messages = N`, то движок отбрасывает `N` сообщений, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0. diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp index 43a9d75d084..5644b8d7c6a 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp @@ -17,33 +17,30 @@ namespace DB namespace ErrorCodes { extern const int BAD_ARGUMENTS; - extern const int CANNOT_CREATE_RABBITMQ_QUEUE_BINDING; } ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer( ChannelPtr consumer_channel_, - ChannelPtr setup_channel_, HandlerPtr event_handler_, const String & exchange_name_, + std::vector & queues_, size_t channel_id_base_, const String & channel_base_, const String & queue_base_, Poco::Logger * log_, char row_delimiter_, - bool hash_exchange_, size_t num_queues_, const String & deadletter_exchange_, uint32_t queue_size_, const std::atomic & stopped_) : ReadBuffer(nullptr, 0) , consumer_channel(std::move(consumer_channel_)) - , setup_channel(setup_channel_) , event_handler(event_handler_) , exchange_name(exchange_name_) + , queues(queues_) , channel_base(channel_base_) , channel_id_base(channel_id_base_) , queue_base(queue_base_) - , hash_exchange(hash_exchange_) , num_queues(num_queues_) , deadletter_exchange(deadletter_exchange_) , log(log_) @@ -52,9 +49,6 @@ ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer( , stopped(stopped_) , received(queue_size * num_queues) { - for (size_t queue_id = 0; queue_id < num_queues; ++queue_id) - bindQueue(queue_id); - setupChannel(); } @@ -65,67 +59,6 @@ ReadBufferFromRabbitMQConsumer::~ReadBufferFromRabbitMQConsumer() } -void ReadBufferFromRabbitMQConsumer::bindQueue(size_t queue_id) -{ - std::atomic binding_created = false; - - auto success_callback = [&](const std::string & queue_name, int msgcount, int /* consumercount */) - { - queues.emplace_back(queue_name); - LOG_DEBUG(log, "Queue {} is declared", queue_name); - - if (msgcount) - LOG_INFO(log, "Queue {} is non-empty. Non-consumed messaged will also be delivered", queue_name); - - /* Here we bind either to sharding exchange (consistent-hash) or to bridge exchange (fanout). All bindings to routing keys are - * done between client's exchange and local bridge exchange. Binding key must be a string integer in case of hash exchange, for - * fanout exchange it can be arbitrary - */ - setup_channel->bindQueue(exchange_name, queue_name, std::to_string(channel_id_base)) - .onSuccess([&] { binding_created = true; }) - .onError([&](const char * message) - { - throw Exception( - ErrorCodes::CANNOT_CREATE_RABBITMQ_QUEUE_BINDING, - "Failed to create queue binding with queue {} for exchange {}. Reason: {}", std::string(message), - queue_name, exchange_name); - }); - }; - - auto error_callback([&](const char * message) - { - /* This error is most likely a result of an attempt to declare queue with different settings if it was declared before. So for a - * given queue name either deadletter_exchange parameter changed or queue_size changed, i.e. table was declared with different - * max_block_size parameter. Solution: client should specify a different queue_base parameter or manually delete previously - * declared queues via any of the various cli tools. - */ - throw Exception("Failed to declare queue. Probably queue settings are conflicting: max_block_size, deadletter_exchange. Attempt \ - specifying differently those settings or use a different queue_base or manually delete previously declared queues, \ - which were declared with the same names. ERROR reason: " - + std::string(message), ErrorCodes::BAD_ARGUMENTS); - }); - - AMQP::Table queue_settings; - - queue_settings["x-max-length"] = queue_size; - queue_settings["x-overflow"] = "reject-publish"; - - if (!deadletter_exchange.empty()) - queue_settings["x-dead-letter-exchange"] = deadletter_exchange; - - /* The first option not just simplifies queue_name, but also implements the possibility to be able to resume reading from one - * specific queue when its name is specified in queue_base setting - */ - const String queue_name = !hash_exchange ? queue_base : std::to_string(channel_id_base) + "_" + std::to_string(queue_id) + "_" + queue_base; - setup_channel->declareQueue(queue_name, AMQP::durable, queue_settings).onSuccess(success_callback).onError(error_callback); - - while (!binding_created) - { - iterateEventLoop(); - } -} - - void ReadBufferFromRabbitMQConsumer::subscribe() { for (const auto & queue_name : queues) diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h index 109770c77e9..556e069083c 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h @@ -24,15 +24,14 @@ class ReadBufferFromRabbitMQConsumer : public ReadBuffer public: ReadBufferFromRabbitMQConsumer( ChannelPtr consumer_channel_, - ChannelPtr setup_channel_, HandlerPtr event_handler_, const String & exchange_name_, + std::vector & queues_, size_t channel_id_base_, const String & channel_base_, const String & queue_base_, Poco::Logger * log_, char row_delimiter_, - bool hash_exchange_, size_t num_queues_, const String & deadletter_exchange_, uint32_t queue_size_, @@ -79,19 +78,17 @@ public: private: bool nextImpl() override; - void bindQueue(size_t queue_id); void subscribe(); void iterateEventLoop(); ChannelPtr consumer_channel; - ChannelPtr setup_channel; HandlerPtr event_handler; const String exchange_name; + std::vector queues; const String channel_base; const size_t channel_id_base; const String queue_base; - const bool hash_exchange; const size_t num_queues; const String deadletter_exchange; Poco::Logger * log; @@ -102,7 +99,6 @@ private: String channel_id; std::atomic channel_error = true, wait_subscription = false; - std::vector queues; ConcurrentBoundedQueue received; MessageData current; size_t subscribed = 0; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 7998bedfb8c..e49b18ea391 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -52,6 +52,7 @@ namespace ErrorCodes extern const int CANNOT_BIND_RABBITMQ_EXCHANGE; extern const int CANNOT_DECLARE_RABBITMQ_EXCHANGE; extern const int CANNOT_REMOVE_RABBITMQ_EXCHANGE; + extern const int CANNOT_CREATE_RABBITMQ_QUEUE_BINDING; } namespace ExchangeType @@ -385,6 +386,67 @@ void StorageRabbitMQ::bindExchange() } +void StorageRabbitMQ::bindQueue(size_t queue_id) +{ + std::atomic binding_created = false; + + auto success_callback = [&](const std::string & queue_name, int msgcount, int /* consumercount */) + { + queues.emplace_back(queue_name); + LOG_DEBUG(log, "Queue {} is declared", queue_name); + + if (msgcount) + LOG_INFO(log, "Queue {} is non-empty. Non-consumed messaged will also be delivered", queue_name); + + /* Here we bind either to sharding exchange (consistent-hash) or to bridge exchange (fanout). All bindings to routing keys are + * done between client's exchange and local bridge exchange. Binding key must be a string integer in case of hash exchange, for + * fanout exchange it can be arbitrary + */ + setup_channel->bindQueue(consumer_exchange, queue_name, std::to_string(queue_id)) + .onSuccess([&] { binding_created = true; }) + .onError([&](const char * message) + { + throw Exception( + ErrorCodes::CANNOT_CREATE_RABBITMQ_QUEUE_BINDING, + "Failed to create queue binding for exchange {}. Reason: {}", exchange_name, std::string(message)); + }); + }; + + auto error_callback([&](const char * message) + { + /* This error is most likely a result of an attempt to declare queue with different settings if it was declared before. So for a + * given queue name either deadletter_exchange parameter changed or queue_size changed, i.e. table was declared with different + * max_block_size parameter. Solution: client should specify a different queue_base parameter or manually delete previously + * declared queues via any of the various cli tools. + */ + throw Exception("Failed to declare queue. Probably queue settings are conflicting: max_block_size, deadletter_exchange. Attempt \ + specifying differently those settings or use a different queue_base or manually delete previously declared queues, \ + which were declared with the same names. ERROR reason: " + + std::string(message), ErrorCodes::BAD_ARGUMENTS); + }); + + AMQP::Table queue_settings; + + queue_settings["x-max-length"] = queue_size; + + if (!deadletter_exchange.empty()) + queue_settings["x-dead-letter-exchange"] = deadletter_exchange; + else + queue_settings["x-overflow"] = "reject-publish"; + + /* The first option not just simplifies queue_name, but also implements the possibility to be able to resume reading from one + * specific queue when its name is specified in queue_base setting + */ + const String queue_name = !hash_exchange ? queue_base : std::to_string(queue_id) + "_" + queue_base; + setup_channel->declareQueue(queue_name, AMQP::durable, queue_settings).onSuccess(success_callback).onError(error_callback); + + while (!binding_created) + { + event_handler->iterateLoop(); + } +} + + bool StorageRabbitMQ::restoreConnection(bool reconnecting) { size_t cnt_retries = 0; @@ -444,6 +506,7 @@ void StorageRabbitMQ::unbindExchange() event_handler->updateLoopState(Loop::STOP); looping_task->deactivate(); + setup_channel = std::make_shared(connection.get()); setup_channel->removeExchange(bridge_exchange) .onSuccess([&]() { @@ -458,6 +521,8 @@ void StorageRabbitMQ::unbindExchange() { event_handler->iterateLoop(); } + + setup_channel->close(); }); } @@ -536,6 +601,13 @@ void StorageRabbitMQ::startup() initExchange(); bindExchange(); + for (size_t i = 1; i <= num_queues; ++i) + { + bindQueue(i); + } + + setup_channel->close(); + for (size_t i = 0; i < num_consumers; ++i) { try @@ -617,8 +689,8 @@ ConsumerBufferPtr StorageRabbitMQ::createReadBuffer() ChannelPtr consumer_channel = std::make_shared(connection.get()); return std::make_shared( - consumer_channel, setup_channel, event_handler, consumer_exchange, ++consumer_id, - unique_strbase, queue_base, log, row_delimiter, hash_exchange, num_queues, + consumer_channel, event_handler, consumer_exchange, queues, ++consumer_id, + unique_strbase, queue_base, log, row_delimiter, num_queues, deadletter_exchange, queue_size, stream_cancelled); } @@ -665,6 +737,7 @@ void StorageRabbitMQ::streamingToViewsFunc() try { auto table_id = getStorageID(); + // Check if at least one direct dependency is attached size_t dependencies_count = DatabaseCatalog::instance().getDependencies(table_id).size(); @@ -757,7 +830,6 @@ bool StorageRabbitMQ::streamToViews() std::atomic stub = {false}; - /// Loop could run untill this point only if select query was made if (!event_handler->loopRunning()) { event_handler->updateLoopState(Loop::RUN); @@ -831,7 +903,7 @@ bool StorageRabbitMQ::streamToViews() } } - if ((queue_empty == num_queues) && (++read_attempts == MAX_FAILED_READ_ATTEMPTS)) + if ((queue_empty == num_created_consumers) && (++read_attempts == MAX_FAILED_READ_ATTEMPTS)) { connection->heartbeat(); read_attempts = 0; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.h b/src/Storages/RabbitMQ/StorageRabbitMQ.h index cb52b6bb282..d7891aed0a7 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.h +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.h @@ -114,6 +114,7 @@ private: std::atomic wait_confirm = true; /// needed to break waiting for confirmations for producer std::atomic exchange_removed = false; ChannelPtr setup_channel; + std::vector queues; std::once_flag flag; /// remove exchange only once std::mutex task_mutex; @@ -140,6 +141,7 @@ private: void initExchange(); void bindExchange(); + void bindQueue(size_t queue_id); bool restoreConnection(bool reconnecting); bool streamToViews(); diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index ab44d0ebea0..d7f98d5cb77 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -537,14 +537,14 @@ def test_rabbitmq_big_message(rabbitmq_cluster): @pytest.mark.timeout(420) def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): NUM_CONSUMERS = 10 - NUM_QUEUES = 2 + NUM_QUEUES = 10 instance.query(''' CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'test_sharding', - rabbitmq_num_queues = 2, + rabbitmq_num_queues = 10, rabbitmq_num_consumers = 10, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; @@ -617,7 +617,7 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): rabbitmq_exchange_name = 'combo', rabbitmq_queue_base = 'combo', rabbitmq_num_consumers = 2, - rabbitmq_num_queues = 2, + rabbitmq_num_queues = 5, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; ''') @@ -879,7 +879,7 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): rabbitmq_queue_base = 'over', rabbitmq_exchange_type = 'direct', rabbitmq_num_consumers = 5, - rabbitmq_num_queues = 2, + rabbitmq_num_queues = 10, rabbitmq_max_block_size = 10000, rabbitmq_routing_key_list = 'over', rabbitmq_format = 'TSV', @@ -1722,7 +1722,7 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'consumer_reconnect', rabbitmq_num_consumers = 10, - rabbitmq_num_queues = 2, + rabbitmq_num_queues = 10, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; ''') From 41971e073a6d6eb6c4f62f641d4899cc4f8d6f03 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Tue, 27 Oct 2020 12:04:03 +0100 Subject: [PATCH 316/432] Fix typos reported by codespell --- CHANGELOG.md | 4 ++-- base/common/StringRef.h | 2 +- cmake/yandex/ya.make.versions.inc | 6 +++--- contrib/libhdfs3-cmake/CMakeLists.txt | 2 +- docker/test/performance-comparison/compare.sh | 12 ++++++------ .../data-types/special-data-types/interval.md | 2 +- docs/en/sql-reference/functions/other-functions.md | 4 ++-- docs/en/sql-reference/operators/index.md | 2 +- docs/en/sql-reference/statements/create/table.md | 6 +++--- programs/local/LocalServer.cpp | 2 +- programs/server/config.xml | 2 +- src/Access/IAccessStorage.cpp | 2 +- src/AggregateFunctions/AggregateFunctionGroupArray.h | 2 +- src/Columns/IColumnUnique.h | 2 +- src/Common/FileSyncGuard.h | 6 +++--- src/Common/ThreadPool.cpp | 4 ++-- src/Common/TraceCollector.cpp | 2 +- src/Common/ZooKeeper/ZooKeeperImpl.cpp | 4 ++-- src/Compression/CompressionFactory.cpp | 2 +- src/Compression/ICompressionCodec.cpp | 2 +- src/Core/Defines.h | 2 +- src/Core/MySQL/MySQLReplication.cpp | 2 +- src/Core/Settings.h | 2 +- src/DataTypes/NumberTraits.h | 2 +- src/Databases/DatabaseAtomic.cpp | 2 +- src/Databases/MySQL/ConnectionMySQLSettings.h | 2 +- src/Functions/FunctionBinaryArithmetic.h | 2 +- src/Functions/FunctionsAES.h | 2 +- src/Functions/formatReadableTimeDelta.cpp | 2 +- src/Functions/isDecimalOverflow.cpp | 2 +- src/Interpreters/ThreadStatusExt.cpp | 2 +- src/Interpreters/executeQuery.cpp | 6 +++--- src/Parsers/obfuscateQueries.cpp | 2 +- src/Processors/IAccumulatingTransform.h | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.h | 2 +- src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp | 2 +- .../MergeTree/MergeTreeDataMergerMutator.cpp | 2 +- src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h | 2 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- .../MergeTree/ReplicatedMergeTreeRestartingThread.h | 2 +- src/Storages/MergeTree/TTLMergeSelector.h | 2 +- .../RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp | 8 ++++---- src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 4 ++-- .../RabbitMQ/WriteBufferToRabbitMQProducer.h | 8 ++++---- tests/integration/test_disk_types/test.py | 4 ++-- .../test_distributed_load_balancing/test.py | 2 +- tests/queries/0_stateless/00932_geohash_support.sql | 2 +- .../01281_group_by_limit_memory_tracking.sh | 2 +- utils/db-generator/README.md | 6 +++--- 50 files changed, 77 insertions(+), 77 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e4acdc293f..09ce72d20ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -409,7 +409,7 @@ ## ClickHouse release 20.6 -### ClickHouse release v20.6.3.28-stable +### ClickHouse release v20.6.3.28-stable #### New Feature @@ -2362,7 +2362,7 @@ No changes compared to v20.4.3.16-stable. * `Live View` table engine refactoring. [#8519](https://github.com/ClickHouse/ClickHouse/pull/8519) ([vzakaznikov](https://github.com/vzakaznikov)) * Add additional checks for external dictionaries created from DDL-queries. [#8127](https://github.com/ClickHouse/ClickHouse/pull/8127) ([alesapin](https://github.com/alesapin)) * Fix error `Column ... already exists` while using `FINAL` and `SAMPLE` together, e.g. `select count() from table final sample 1/2`. Fixes [#5186](https://github.com/ClickHouse/ClickHouse/issues/5186). [#7907](https://github.com/ClickHouse/ClickHouse/pull/7907) ([Nikolai Kochetov](https://github.com/KochetovNicolai)) -* Now table the first argument of `joinGet` function can be table indentifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird)) +* Now table the first argument of `joinGet` function can be table identifier. [#7707](https://github.com/ClickHouse/ClickHouse/pull/7707) ([Amos Bird](https://github.com/amosbird)) * Allow using `MaterializedView` with subqueries above `Kafka` tables. [#8197](https://github.com/ClickHouse/ClickHouse/pull/8197) ([filimonov](https://github.com/filimonov)) * Now background moves between disks run it the seprate thread pool. [#7670](https://github.com/ClickHouse/ClickHouse/pull/7670) ([Vladimir Chebotarev](https://github.com/excitoon)) * `SYSTEM RELOAD DICTIONARY` now executes synchronously. [#8240](https://github.com/ClickHouse/ClickHouse/pull/8240) ([Vitaly Baranov](https://github.com/vitlibar)) diff --git a/base/common/StringRef.h b/base/common/StringRef.h index 4376876c077..b51b95456cb 100644 --- a/base/common/StringRef.h +++ b/base/common/StringRef.h @@ -51,7 +51,7 @@ struct StringRef }; /// Here constexpr doesn't implicate inline, see https://www.viva64.com/en/w/v1043/ -/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetics +/// nullptr can't be used because the StringRef values are used in SipHash's pointer arithmetic /// and the UBSan thinks that something like nullptr + 8 is UB. constexpr const inline char empty_string_ref_addr{}; constexpr const inline StringRef EMPTY_STRING_REF{&empty_string_ref_addr, 0}; diff --git a/cmake/yandex/ya.make.versions.inc b/cmake/yandex/ya.make.versions.inc index 3ac401cb108..6910164d0aa 100644 --- a/cmake/yandex/ya.make.versions.inc +++ b/cmake/yandex/ya.make.versions.inc @@ -11,11 +11,11 @@ CFLAGS (GLOBAL -DDBMS_VERSION_MAJOR=${VERSION_MAJOR}) CFLAGS (GLOBAL -DDBMS_VERSION_MINOR=${VERSION_MINOR}) CFLAGS (GLOBAL -DDBMS_VERSION_PATCH=${VERSION_PATCH}) CFLAGS (GLOBAL -DVERSION_FULL=\"\\\"${VERSION_FULL}\\\"\") -CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR}) -CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR}) +CFLAGS (GLOBAL -DVERSION_MAJOR=${VERSION_MAJOR}) +CFLAGS (GLOBAL -DVERSION_MINOR=${VERSION_MINOR}) CFLAGS (GLOBAL -DVERSION_PATCH=${VERSION_PATCH}) -# TODO: not supported yet, not sure if ya.make supports arithmetics. +# TODO: not supported yet, not sure if ya.make supports arithmetic. CFLAGS (GLOBAL -DVERSION_INTEGER=0) CFLAGS (GLOBAL -DVERSION_NAME=\"\\\"${VERSION_NAME}\\\"\") diff --git a/contrib/libhdfs3-cmake/CMakeLists.txt b/contrib/libhdfs3-cmake/CMakeLists.txt index ab4857f0387..3aa5cb970db 100644 --- a/contrib/libhdfs3-cmake/CMakeLists.txt +++ b/contrib/libhdfs3-cmake/CMakeLists.txt @@ -192,7 +192,7 @@ set(SRCS ${HDFS3_SOURCE_DIR}/common/FileWrapper.h ) -# old kernels (< 3.17) doens't have SYS_getrandom. Always use POSIX implementation to have better compatibility +# old kernels (< 3.17) doesn't have SYS_getrandom. Always use POSIX implementation to have better compatibility set_source_files_properties(${HDFS3_SOURCE_DIR}/rpc/RpcClient.cpp PROPERTIES COMPILE_FLAGS "-DBOOST_UUID_RANDOM_PROVIDER_FORCE_POSIX=1") # target diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 30443f21fba..d0cf12baa9e 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -63,7 +63,7 @@ function configure # Make copies of the original db for both servers. Use hardlinks instead # of copying to save space. Before that, remove preprocessed configs and # system tables, because sharing them between servers with hardlinks may - # lead to weird effects. + # lead to weird effects. rm -r left/db ||: rm -r right/db ||: rm -r db0/preprocessed_configs ||: @@ -82,7 +82,7 @@ function restart export MALLOC_CONF="confirm_conf:true" set -m # Spawn servers in their own process groups - + left/clickhouse-server --config-file=left/config/config.xml \ -- --path left/db --user_files_path left/db/user_files \ &>> left-server-log.log & @@ -208,7 +208,7 @@ function run_tests echo test "$test_name" # Don't profile if we're past the time limit. - # Use awk because bash doesn't support floating point arithmetics. + # Use awk because bash doesn't support floating point arithmetic. profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }") TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") @@ -541,10 +541,10 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv') as select abs(diff) > report_threshold and abs(diff) > stat_threshold as changed_fail, abs(diff) > report_threshold - 0.05 and abs(diff) > stat_threshold as changed_show, - + not changed_fail and stat_threshold > report_threshold + 0.10 as unstable_fail, not changed_show and stat_threshold > report_threshold - 0.05 as unstable_show, - + left, right, diff, stat_threshold, if(report_threshold > 0, report_threshold, 0.10) as report_threshold, query_metric_stats.test test, query_metric_stats.query_index query_index, @@ -767,7 +767,7 @@ create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as -- The threshold for 2) is significantly larger than the threshold for 1), to -- avoid jitter. create view shortness - as select + as select (test, query_index) in (select * from file('analyze/marked-short-queries.tsv', TSV, 'test text, query_index int')) diff --git a/docs/en/sql-reference/data-types/special-data-types/interval.md b/docs/en/sql-reference/data-types/special-data-types/interval.md index 8a4b9ae7886..7c0c5b00c0d 100644 --- a/docs/en/sql-reference/data-types/special-data-types/interval.md +++ b/docs/en/sql-reference/data-types/special-data-types/interval.md @@ -80,4 +80,4 @@ Code: 43. DB::Exception: Received from localhost:9000. DB::Exception: Wrong argu ## See Also {#see-also} - [INTERVAL](../../../sql-reference/operators/index.md#operator-interval) operator -- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions +- [toInterval](../../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 1145efbbc5f..518479fb728 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -551,7 +551,7 @@ formatReadableTimeDelta(column[, maximum_unit]) **Parameters** - `column` — A column with numeric time delta. -- `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years. +- `maximum_unit` — Optional. Maximum unit to show. Acceptable values seconds, minutes, hours, days, months, years. Example: @@ -1584,7 +1584,7 @@ isDecimalOverflow(d, [p]) **Parameters** - `d` — value. [Decimal](../../sql-reference/data-types/decimal.md). -- `p` — precision. Optional. If omitted, the initial presicion of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). +- `p` — precision. Optional. If omitted, the initial precision of the first argument is used. Using of this paratemer could be helpful for data extraction to another DBMS or file. [UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges). **Returned values** diff --git a/docs/en/sql-reference/operators/index.md b/docs/en/sql-reference/operators/index.md index e5554b58e4a..262ae00dc95 100644 --- a/docs/en/sql-reference/operators/index.md +++ b/docs/en/sql-reference/operators/index.md @@ -169,7 +169,7 @@ SELECT now() AS current_date_time, current_date_time + INTERVAL 4 DAY + INTERVAL **See Also** - [Interval](../../sql-reference/data-types/special-data-types/interval.md) data type -- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type convertion functions +- [toInterval](../../sql-reference/functions/type-conversion-functions.md#function-tointerval) type conversion functions ## Logical Negation Operator {#logical-negation-operator} diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 35780856ef2..82326bf51cf 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -121,7 +121,7 @@ Defines storage time for values. Can be specified only for MergeTree-family tabl ## Column Compression Codecs {#codecs} -By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration. +By default, ClickHouse applies the `lz4` compression method. For `MergeTree`-engine family you can change the default compression method in the [compression](../../../operations/server-configuration-parameters/settings.md#server-settings-compression) section of a server configuration. You can also define the compression method for each individual column in the `CREATE TABLE` query. @@ -138,7 +138,7 @@ ENGINE = ... ``` -The `Default` codec can be specified to reference default compression which may dependend on different settings (and properties of data) in runtime. +The `Default` codec can be specified to reference default compression which may depend on different settings (and properties of data) in runtime. Example: `value UInt64 CODEC(Default)` — the same as lack of codec specification. Also you can remove current CODEC from the column and use default compression from config.xml: @@ -149,7 +149,7 @@ ALTER TABLE codec_example MODIFY COLUMN float_value CODEC(Default); Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`. -To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type. +To select the best codec combination for you project, pass benchmarks similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. One thing to note is that codec can't be applied for ALIAS column type. !!! warning "Warning" You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility. diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 63667307876..b9dde555788 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -152,7 +152,7 @@ void LocalServer::tryInitPath() default_path = parent_folder / fmt::format("clickhouse-local-{}-{}-{}", getpid(), time(nullptr), randomSeed()); if (exists(default_path)) - throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessfull attempt to create working directory: {} exist!", default_path.string()); + throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Unsuccessful attempt to create working directory: {} exist!", default_path.string()); create_directory(default_path); temporary_directory_to_delete = default_path; diff --git a/programs/server/config.xml b/programs/server/config.xml index 5bdec5377fd..679e3e9e6f4 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -270,7 +270,7 @@ This parameter is mandatory and cannot be empty. roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. - If any of the listed roles is not defined locally at the time of authentication, the authenthication attept + If any of the listed roles is not defined locally at the time of authentication, the authenthication attempt will fail as if the provided password was incorrect. Example: diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 8dd219e07d7..58821e7de4b 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -585,7 +585,7 @@ void IAccessStorage::throwInvalidPassword() void IAccessStorage::throwCannotAuthenticate(const String & user_name) { - /// We use the same message for all authentification failures because we don't want to give away any unnecessary information for security reasons, + /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons, /// only the log will show the exact reason. throw Exception(user_name + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED); } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.h b/src/AggregateFunctions/AggregateFunctionGroupArray.h index 83e096c797b..02b9003eb96 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.h +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.h @@ -296,7 +296,7 @@ public: { typename ColumnVector::Container & data_to = assert_cast &>(arr_to.getData()).getData(); if constexpr (is_big_int_v) - // is data_to empty? we should probaly use std::vector::insert then + // is data_to empty? we should probably use std::vector::insert then for (auto it = this->data(place).value.begin(); it != this->data(place).value.end(); it++) data_to.push_back(*it); else diff --git a/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h index 99facea6055..99e134675f6 100644 --- a/src/Columns/IColumnUnique.h +++ b/src/Columns/IColumnUnique.h @@ -82,7 +82,7 @@ public: * @see DB::ColumnUnique * * The most common example uses https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/ columns. - * Consider data type @e LC(String). The inner type here is @e String which is more or less a contigous memory + * Consider data type @e LC(String). The inner type here is @e String which is more or less a contiguous memory * region, so it can be easily represented as a @e StringRef. So we pass that ref to this function and get its * index in the dictionary, which can be used to operate with the indices column. */ diff --git a/src/Common/FileSyncGuard.h b/src/Common/FileSyncGuard.h index 6451f6ebf36..486b02d0f24 100644 --- a/src/Common/FileSyncGuard.h +++ b/src/Common/FileSyncGuard.h @@ -5,15 +5,15 @@ namespace DB { -/// Helper class, that recieves file descriptor and does fsync for it in destructor. +/// Helper class, that receives file descriptor and does fsync for it in destructor. /// It's used to keep descriptor open, while doing some operations with it, and do fsync at the end. /// Guaranties of sequence 'close-reopen-fsync' may depend on kernel version. /// Source: linux-fsdevel mailing-list https://marc.info/?l=linux-fsdevel&m=152535409207496 class FileSyncGuard { public: - /// NOTE: If you have already opened descriptor, it's preffered to use - /// this constructor instead of construnctor with path. + /// NOTE: If you have already opened descriptor, it's preferred to use + /// this constructor instead of constructor with path. FileSyncGuard(const DiskPtr & disk_, int fd_) : disk(disk_), fd(fd_) {} FileSyncGuard(const DiskPtr & disk_, const String & path) diff --git a/src/Common/ThreadPool.cpp b/src/Common/ThreadPool.cpp index 21116e9d432..e527e97d608 100644 --- a/src/Common/ThreadPool.cpp +++ b/src/Common/ThreadPool.cpp @@ -234,13 +234,13 @@ void ThreadPoolImpl::worker(typename std::list::iterator thread_ std::is_same_v ? CurrentMetrics::GlobalThreadActive : CurrentMetrics::LocalThreadActive); job(); - /// job should be reseted before decrementing scheduled_jobs to + /// job should be reset before decrementing scheduled_jobs to /// ensure that the Job destroyed before wait() returns. job = {}; } catch (...) { - /// job should be reseted before decrementing scheduled_jobs to + /// job should be reset before decrementing scheduled_jobs to /// ensure that the Job destroyed before wait() returns. job = {}; diff --git a/src/Common/TraceCollector.cpp b/src/Common/TraceCollector.cpp index fc5318b11fb..d10d5981d57 100644 --- a/src/Common/TraceCollector.cpp +++ b/src/Common/TraceCollector.cpp @@ -152,7 +152,7 @@ void TraceCollector::run() if (trace_log) { // time and time_in_microseconds are both being constructed from the same timespec so that the - // times will be equal upto the precision of a second. + // times will be equal up to the precision of a second. struct timespec ts; clock_gettime(CLOCK_REALTIME, &ts); diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index abb8158781b..f5c57781eef 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -1288,13 +1288,13 @@ void ZooKeeper::receiveEvent() response->removeRootPath(root_path); } - /// Instead of setting the watch in sendEvent, set it in receiveEvent becuase need to check the response. + /// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response. /// The watch shouldn't be set if the node does not exist and it will never exist like sequential ephemeral nodes. /// By using getData() instead of exists(), a watch won't be set if the node doesn't exist. if (request_info.watch) { bool add_watch = false; - /// 3 indicates the ZooKeeperExistsRequest. + /// 3 indicates the ZooKeeperExistsRequest. // For exists, we set the watch on both node exist and nonexist case. // For other case like getData, we only set the watch when node exists. if (request_info.request->getOpNum() == 3) diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index fe6a5b2dacd..46d7d7dfcc4 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -87,7 +87,7 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(const ASTPtr else throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE); - /// Default codec replaced with current default codec which may dependend on different + /// Default codec replaced with current default codec which may depend on different /// settings (and properties of data) in runtime. CompressionCodecPtr result_codec; if (codec_family_name == DEFAULT_CODEC_NAME) diff --git a/src/Compression/ICompressionCodec.cpp b/src/Compression/ICompressionCodec.cpp index baf6e9b2b86..3746753df8b 100644 --- a/src/Compression/ICompressionCodec.cpp +++ b/src/Compression/ICompressionCodec.cpp @@ -26,7 +26,7 @@ void ICompressionCodec::setCodecDescription(const String & codec_name, const AST std::shared_ptr result = std::make_shared(); result->name = "CODEC"; - /// Special case for codec Multiple, which doens't have name. It's just list + /// Special case for codec Multiple, which doesn't have name. It's just list /// of other codecs. if (codec_name.empty()) { diff --git a/src/Core/Defines.h b/src/Core/Defines.h index ba3d37242fa..aee8d6ca93e 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -67,7 +67,7 @@ /// Minimum revision supporting SettingsBinaryFormat::STRINGS. #define DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS 54429 -/// Mininum revision supporting interserver secret. +/// Minimum revision supporting interserver secret. #define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441 /// Version of ClickHouse TCP protocol. Increment it manually when you change the protocol. diff --git a/src/Core/MySQL/MySQLReplication.cpp b/src/Core/MySQL/MySQLReplication.cpp index 1179c0eb46b..c09c4b3b034 100644 --- a/src/Core/MySQL/MySQLReplication.cpp +++ b/src/Core/MySQL/MySQLReplication.cpp @@ -705,7 +705,7 @@ namespace MySQLReplication break; } default: - throw ReplicationError("Position update with unsupport event", ErrorCodes::LOGICAL_ERROR); + throw ReplicationError("Position update with unsupported event", ErrorCodes::LOGICAL_ERROR); } } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b94883ca871..f6a24641bc5 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -389,7 +389,7 @@ class IColumn; M(Bool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ M(Bool, allow_experimental_database_materialize_mysql, false, "Allow to create database with Engine=MaterializeMySQL(...).", 0) \ M(Bool, system_events_show_zero_values, false, "Include all metrics, even with zero values", 0) \ - M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \ + M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ diff --git a/src/DataTypes/NumberTraits.h b/src/DataTypes/NumberTraits.h index 603449150db..77bd2101f05 100644 --- a/src/DataTypes/NumberTraits.h +++ b/src/DataTypes/NumberTraits.h @@ -29,7 +29,7 @@ constexpr size_t min(size_t x, size_t y) } /// @note There's no auto scale to larger big integer, only for integral ones. -/// It's cause of (U)Int64 backward compatibilty and very big performance penalties. +/// It's cause of (U)Int64 backward compatibility and very big performance penalties. constexpr size_t nextSize(size_t size) { if (size < 8) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index a9dbae8ec92..3784ae0961b 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -116,7 +116,7 @@ void DatabaseAtomic::dropTable(const Context &, const String & table_name, bool } tryRemoveSymlink(table_name); /// Remove the inner table (if any) to avoid deadlock - /// (due to attemp to execute DROP from the worker thread) + /// (due to attempt to execute DROP from the worker thread) if (auto * mv = dynamic_cast(table.get())) mv->dropInnerTable(no_delay); /// Notify DatabaseCatalog that table was dropped. It will remove table data in background. diff --git a/src/Databases/MySQL/ConnectionMySQLSettings.h b/src/Databases/MySQL/ConnectionMySQLSettings.h index 90279f846a4..ce2773307c5 100644 --- a/src/Databases/MySQL/ConnectionMySQLSettings.h +++ b/src/Databases/MySQL/ConnectionMySQLSettings.h @@ -11,7 +11,7 @@ class Context; class ASTStorage; #define LIST_OF_CONNECTION_MYSQL_SETTINGS(M) \ - M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precison are seen as String on ClickHouse's side.", 0) \ + M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \ /// Settings that should not change after the creation of a database. #define APPLY_FOR_IMMUTABLE_CONNECTION_MYSQL_SETTINGS(M) \ diff --git a/src/Functions/FunctionBinaryArithmetic.h b/src/Functions/FunctionBinaryArithmetic.h index 63df025d2b1..43ff42956cd 100644 --- a/src/Functions/FunctionBinaryArithmetic.h +++ b/src/Functions/FunctionBinaryArithmetic.h @@ -326,7 +326,7 @@ struct DecimalBinaryOperation } private: - /// there's implicit type convertion here + /// there's implicit type conversion here static NativeResultType apply(NativeResultType a, NativeResultType b) { if constexpr (can_overflow && check_overflow) diff --git a/src/Functions/FunctionsAES.h b/src/Functions/FunctionsAES.h index 10c4a27e509..68d8b41407d 100644 --- a/src/Functions/FunctionsAES.h +++ b/src/Functions/FunctionsAES.h @@ -577,7 +577,7 @@ private: auto input_value = input_column->getDataAt(r); if constexpr (mode == CipherMode::RFC5116_AEAD_AES_GCM) { - // empty plaintext results in empty ciphertext + tag, means there should be atleast tag_size bytes. + // empty plaintext results in empty ciphertext + tag, means there should be at least tag_size bytes. if (input_value.size < tag_size) throw Exception(fmt::format("Encrypted data is too short: only {} bytes, " "should contain at least {} bytes of a tag.", diff --git a/src/Functions/formatReadableTimeDelta.cpp b/src/Functions/formatReadableTimeDelta.cpp index e55829b1bad..2b574f672d3 100644 --- a/src/Functions/formatReadableTimeDelta.cpp +++ b/src/Functions/formatReadableTimeDelta.cpp @@ -131,7 +131,7 @@ public: for (size_t i = 0; i < input_rows_count; ++i) { - /// Virtual call is Ok (neglible comparing to the rest of calculations). + /// Virtual call is Ok (negligible comparing to the rest of calculations). Float64 value = arguments[0].column->getFloat64(i); bool is_negative = value < 0; diff --git a/src/Functions/isDecimalOverflow.cpp b/src/Functions/isDecimalOverflow.cpp index 11c413757c6..323c9951a96 100644 --- a/src/Functions/isDecimalOverflow.cpp +++ b/src/Functions/isDecimalOverflow.cpp @@ -22,7 +22,7 @@ namespace { /// Returns 1 if and Decimal value has more digits then it's Precision allow, 0 otherwise. -/// Precision could be set as second argument or omitted. If ommited function uses Decimal presicion of the first argument. +/// Precision could be set as second argument or omitted. If omitted function uses Decimal precision of the first argument. class FunctionIsDecimalOverflow : public IFunction { public: diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 2166c78ef7c..7f29cfc7e5c 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -166,7 +166,7 @@ void ThreadStatus::initPerformanceCounters() memory_tracker.setDescription("(for thread)"); // query_start_time_{microseconds, nanoseconds} are all constructed from the same time point - // to ensure that they are all equal upto the precision of a second. + // to ensure that they are all equal up to the precision of a second. const auto now = std::chrono::system_clock::now(); query_start_time_nanoseconds = time_in_nanoseconds(now); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 57c557c5658..622acf2db9f 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -209,7 +209,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c // all callers to onExceptionBeforeStart method construct the timespec for event_time and // event_time_microseconds from the same time point. So, it can be assumed that both of these - // times are equal upto the precision of a second. + // times are equal up to the precision of a second. elem.event_time = current_time; elem.event_time_microseconds = current_time_microseconds; elem.query_start_time = current_time; @@ -267,7 +267,7 @@ static std::tuple executeQueryImpl( ReadBuffer * istr) { // current_time and current_time_microseconds are both constructed from the same time point - // to ensure that both the times are equal upto the precision of a second. + // to ensure that both the times are equal up to the precision of a second. const auto now = std::chrono::system_clock::now(); auto current_time = time_in_seconds(now); @@ -631,7 +631,7 @@ static std::tuple executeQueryImpl( elem.type = QueryLogElementType::EXCEPTION_WHILE_PROCESSING; // event_time and event_time_microseconds are being constructed from the same time point - // to ensure that both the times will be equal upto the precision of a second. + // to ensure that both the times will be equal up to the precision of a second. const auto time_now = std::chrono::system_clock::now(); elem.event_time = time_in_seconds(time_now); diff --git a/src/Parsers/obfuscateQueries.cpp b/src/Parsers/obfuscateQueries.cpp index 32382b70bd7..11f4a77ee0e 100644 --- a/src/Parsers/obfuscateQueries.cpp +++ b/src/Parsers/obfuscateQueries.cpp @@ -927,7 +927,7 @@ void obfuscateQueries( } else { - /// Everyting else is kept as is. + /// Everything else is kept as is. result.write(token.begin, token.size()); } } diff --git a/src/Processors/IAccumulatingTransform.h b/src/Processors/IAccumulatingTransform.h index 3e77c798ad7..b51753199c3 100644 --- a/src/Processors/IAccumulatingTransform.h +++ b/src/Processors/IAccumulatingTransform.h @@ -36,7 +36,7 @@ public: Status prepare() override; void work() override; - /// Adds additional port fo totals. + /// Adds additional port for totals. /// If added, totals will have been ready by the first generate() call (in totals chunk). InputPort * addTotalsPort(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 319b486c2c6..ffc2dd62ce0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -353,7 +353,7 @@ size_t IMergeTreeDataPart::getFileSizeOrZero(const String & file_name) const return checksum->second.file_size; } -String IMergeTreeDataPart::getColumnNameWithMinumumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const +String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const { const auto & storage_columns = metadata_snapshot->getColumns().getAllPhysical(); auto alter_conversions = storage.getAlterConversionsForPart(shared_from_this()); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index 202d9494247..21932ba445c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -145,7 +145,7 @@ public: /// Returns the name of a column with minimum compressed size (as returned by getColumnSize()). /// If no checksums are present returns the name of the first physically existing column. - String getColumnNameWithMinumumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const; + String getColumnNameWithMinimumCompressedSize(const StorageMetadataPtr & metadata_snapshot) const; bool contains(const IMergeTreeDataPart & other) const { return info.contains(other.info); } diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 739dfedfde4..ad10a437b1e 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -95,7 +95,7 @@ NameSet injectRequiredColumns(const MergeTreeData & storage, const StorageMetada */ if (!have_at_least_one_physical_column) { - const auto minimum_size_column_name = part->getColumnNameWithMinumumCompressedSize(metadata_snapshot); + const auto minimum_size_column_name = part->getColumnNameWithMinimumCompressedSize(metadata_snapshot); columns.push_back(minimum_size_column_name); /// correctly report added column injected_columns.insert(columns.back()); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index b29966751f9..141dd8002b9 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -298,7 +298,7 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( if (metadata_snapshot->hasAnyTTL() && merge_with_ttl_allowed && !ttl_merges_blocker.isCancelled()) { - /// TTL delete is prefered to recompression + /// TTL delete is preferred to recompression TTLDeleteMergeSelector delete_ttl_selector( next_delete_ttl_merge_times_by_partition, current_time, diff --git a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h index f0837f98486..17239e2618a 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h +++ b/src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h @@ -49,7 +49,7 @@ struct MergeTreeDataPartTTLInfos TTLInfoMap recompression_ttl; - /// Return smalles max recompression TTL value + /// Return the smallest max recompression TTL value time_t getMinimalMaxRecompressionTTL() const; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 8c1dc845d26..44d0788901f 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -1517,7 +1517,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( { /// In case when SELECT's predicate defines a single continuous interval of keys, /// we can use binary search algorithm to find the left and right endpoint key marks of such interval. - /// The returned value is the minumum range of marks, containing all keys for which KeyCondition holds + /// The returned value is the minimum range of marks, containing all keys for which KeyCondition holds LOG_TRACE(log, "Running binary search on index range for part {} ({} marks)", part->name, marks_count); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 986253a2206..824ed73c171 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -36,7 +36,7 @@ private: Poco::Logger * log; std::atomic need_stop {false}; - // We need it besides `storage.is_readonly`, bacause `shutdown()` may be called many times, that way `storage.is_readonly` will not change. + // We need it besides `storage.is_readonly`, because `shutdown()` may be called many times, that way `storage.is_readonly` will not change. bool incr_readonly = false; /// The random data we wrote into `/replicas/me/is_active`. diff --git a/src/Storages/MergeTree/TTLMergeSelector.h b/src/Storages/MergeTree/TTLMergeSelector.h index c294687cdc5..710b85b3474 100644 --- a/src/Storages/MergeTree/TTLMergeSelector.h +++ b/src/Storages/MergeTree/TTLMergeSelector.h @@ -84,7 +84,7 @@ public: time_t getTTLForPart(const IMergeSelector::Part & part) const override; /// Checks that part's codec is not already equal to required codec - /// according to recompression TTL. It doesn't make sence to assign such + /// according to recompression TTL. It doesn't make sense to assign such /// merge. bool isTTLAlreadySatisfied(const IMergeSelector::Part & part) const override; private: diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp index 43a9d75d084..685a55027ce 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp @@ -155,7 +155,7 @@ void ReadBufferFromRabbitMQConsumer::subscribe() .onError([&](const char * message) { /* End up here either if channel ends up in an error state (then there will be resubscription) or consume call error, which - * arises from queue settings mismatch or queue level error, which should not happen as noone else is supposed to touch them + * arises from queue settings mismatch or queue level error, which should not happen as no one else is supposed to touch them */ LOG_ERROR(log, "Consumer failed on channel {}. Reason: {}", channel_id, message); wait_subscription.store(false); @@ -173,16 +173,16 @@ bool ReadBufferFromRabbitMQConsumer::ackMessages() */ if (record_info.channel_id == channel_id && record_info.delivery_tag && record_info.delivery_tag > prev_tag) { - /// Commit all received messages with delivery tags from last commited to last inserted + /// Commit all received messages with delivery tags from last committed to last inserted if (!consumer_channel->ack(record_info.delivery_tag, AMQP::multiple)) { - LOG_ERROR(log, "Failed to commit messages with delivery tags from last commited to {} on channel {}", + LOG_ERROR(log, "Failed to commit messages with delivery tags from last committed to {} on channel {}", record_info.delivery_tag, channel_id); return false; } prev_tag = record_info.delivery_tag; - LOG_TRACE(log, "Consumer commited messages with deliveryTags up to {} on channel {}", record_info.delivery_tag, channel_id); + LOG_TRACE(log, "Consumer committed messages with deliveryTags up to {} on channel {}", record_info.delivery_tag, channel_id); } return true; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 6565afeb32d..d213251e366 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -406,7 +406,7 @@ bool StorageRabbitMQ::restoreConnection(bool reconnecting) connection->close(); /// Connection might be unusable, but not closed /* Connection is not closed immediately (firstly, all pending operations are completed, and then - * an AMQP closing-handshake is performed). But cannot open a new connection untill previous one is properly closed + * an AMQP closing-handshake is performed). But cannot open a new connection until previous one is properly closed */ while (!connection->closed() && ++cnt_retries != RETRIES_MAX) event_handler->iterateLoop(); @@ -731,7 +731,7 @@ bool StorageRabbitMQ::streamToViews() auto column_names = block_io.out->getHeader().getNames(); auto sample_block = metadata_snapshot->getSampleBlockForColumns(column_names, getVirtuals(), getStorageID()); - /* event_handler->connectionRunning() does not guarantee that connnection is not closed in case loop was not running before, but + /* event_handler->connectionRunning() does not guarantee that connection is not closed in case loop was not running before, but * need to anyway start the loop to activate error callbacks and update connection state, because even checking with * connection->usable() will not give correct answer before callbacks are activated. */ diff --git a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h index a8e94070dbd..28fa5df8111 100644 --- a/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h +++ b/src/Storages/RabbitMQ/WriteBufferToRabbitMQProducer.h @@ -76,20 +76,20 @@ private: String channel_id; /* payloads.queue: - * - payloads are pushed to queue in countRow and poped by another thread in writingFunc, each payload gets into queue only once + * - payloads are pushed to queue in countRow and popped by another thread in writingFunc, each payload gets into queue only once * returned.queue: * - payloads are pushed to queue: * 1) inside channel->onError() callback if channel becomes unusable and the record of pending acknowledgements from server * is non-empty. * 2) inside removeRecord() if received nack() - negative acknowledgement from the server that message failed to be written * to disk or it was unable to reach the queue. - * - payloads are poped from the queue once republished + * - payloads are popped from the queue once republished */ ConcurrentBoundedQueue> payloads, returned; /* Counter of current delivery on a current channel. Delivery tags are scoped per channel. The server attaches a delivery tag for each * published message - a serial number of delivery on current channel. Delivery tag is a way of server to notify publisher if it was - * able or unable to process delivery, i.e. it sends back a responce with a corresponding delivery tag. + * able or unable to process delivery, i.e. it sends back a response with a corresponding delivery tag. */ UInt64 delivery_tag = 0; @@ -100,7 +100,7 @@ private: */ bool wait_all = true; - /* false: untill writeSuffix is called + /* false: until writeSuffix is called * true: means payloads.queue will not grow anymore */ std::atomic wait_num = 0; diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index c748653bc82..ad09519a484 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -21,8 +21,8 @@ def cluster(): def test_different_types(cluster): node = cluster.instances["node"] - responce = node.query("SELECT * FROM system.disks") - disks = responce.split("\n") + response = node.query("SELECT * FROM system.disks") + disks = response.split("\n") for disk in disks: if disk == '': # skip empty line (after split at last position) continue diff --git a/tests/integration/test_distributed_load_balancing/test.py b/tests/integration/test_distributed_load_balancing/test.py index e7b86a210bd..0758dc38ba7 100644 --- a/tests/integration/test_distributed_load_balancing/test.py +++ b/tests/integration/test_distributed_load_balancing/test.py @@ -26,7 +26,7 @@ def bootstrap(): # just after server starts (+ 2 seconds, reload timeout). # # And on configuration reload the clusters will be re-created, so some - # internal stuff will be reseted: + # internal stuff will be reset: # - error_count # - last_used (round_robing) # diff --git a/tests/queries/0_stateless/00932_geohash_support.sql b/tests/queries/0_stateless/00932_geohash_support.sql index a477332b532..aeed72176b9 100644 --- a/tests/queries/0_stateless/00932_geohash_support.sql +++ b/tests/queries/0_stateless/00932_geohash_support.sql @@ -45,7 +45,7 @@ select 12 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = enc -- Here results are floats, and hence may not be compared for equality directly. -- We select all values that are off by some reasonable value: --- each byte of encoded string provides 5 bits of precison, (roughly 2.5 for lon and lat) +-- each byte of encoded string provides 5 bits of precision, (roughly 2.5 for lon and lat) -- each bit of precision divides value range by 2. -- hence max error is roughly value range 2.5 times divided by 2 for each precision bit. -- initial value range is [-90..90] for latitude and [-180..180] for longitude. diff --git a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh index b14defd672a..5333d0b4b0b 100755 --- a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh +++ b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Regression for MemoryTracker that had been incorrectly accounted -# (it was reseted before deallocation) +# (it was reset before deallocation) # # For this will be used: # - two-level group by diff --git a/utils/db-generator/README.md b/utils/db-generator/README.md index 1d276063bf9..f49c7911cb6 100644 --- a/utils/db-generator/README.md +++ b/utils/db-generator/README.md @@ -6,11 +6,11 @@ **Анализ схемы** По запросу необходимо определить возможные таблицы. Имея строку запроса можно понять, какие его части обозначают названия таблиц, таким образом можно определить их количество в нашей базе данных. -В парсере Clickhouse поддеревом запроса, отвечающее за таблицы из которых мы берем данные, является TABLES (Рисунок 1), в нем лежит основная таблица, из которой берутся колонки, а также операции JOIN, которые совершаются в запросе. Обходя все вершины в поддереве мы берем названия таблиц и баз данных в которых они лежат, а также их алиас, то есть укороченные названия, выбранные автором запроса. Эти названия могут понадобиться нам для определения принадлежности колонки в дальнейшем. +В парсере Clickhouse поддеревом запроса, отвечающее за таблицы из которых мы берем данные, является TABLES (Рисунок 1), в нем лежит основная таблица, из которой берутся колонки, а также операции JOIN, которые совершаются в запросе. Обходя все вершины в поддереве мы берем названия таблиц и баз данных в которых они лежат, а также их алиас, то есть укороченные названия, выбранные автором запроса. Эти названия могут понадобиться нам для определения принадлежности колонки в дальнейшем. Таким образом для запроса мы получаем набор баз данных, а также таблиц и их условных обозначений (алиасов), по которым делается запрос. Затем нам необходимо определить множество столбцов, которые присутствуют в запросе и таблицы, к которым они могут относиться. Во время исполнения запроса уже известно множество столбцов в каждой таблице, поэтому при исполнении программа автоматически связывает столбец и таблицу, однако в нашем случае нельзя однозначно трактовать принадлежность столбца к определенной таблице, например в следующем запросе: “SELECT column1, column2, column3 FROM table1 JOIN table2 on table1.column2 = table2.column3 ”. Здесь мы однозначно можем сказать, к какой таблице относятся колонки column2 и column3, однако column1 может принадлежать как первой, так и второй таблице. Для однозначности трактовки таких случаев, мы будем относить данную неопределенные колонки к основной таблице, по которой делается запрос, например в данном случае это будет таблица table1. -Все столбцы в дереве лежат в вершинах типа INDENTIFIER, которые находятся в поддеревьях SELECT, TABLES, WHERE, GROUP_BY, HAVING, ORDER_BY. Рекурсивно обходя поддеревья мы формируем множество всех таблиц, затем мы разделяем колонку на составляющие: таблица (если она явно указана через точку) и само название, затем, так как таблица может являться алиасом, мы заменяем алиас на оригинальное название таблицы. Теперь у нас есть список всех столбцов и таблиц, к которым они относятся, для столбцов без таблиц определяем основную таблицу запроса. +Все столбцы в дереве лежат в вершинах типа IDENTIFIER, которые находятся в поддеревьях SELECT, TABLES, WHERE, GROUP_BY, HAVING, ORDER_BY. Рекурсивно обходя поддеревья мы формируем множество всех таблиц, затем мы разделяем колонку на составляющие: таблица (если она явно указана через точку) и само название, затем, так как таблица может являться алиасом, мы заменяем алиас на оригинальное название таблицы. Теперь у нас есть список всех столбцов и таблиц, к которым они относятся, для столбцов без таблиц определяем основную таблицу запроса. **Анализ столбцов** @@ -20,7 +20,7 @@ Определить значения столбцов мы можем используя логический, арифметические и другие функции над значениями столбцов, которые указаны в запросе. Такие функции лежат в поддеревьях SELECT и WHERE. Параметром функции может быть константа, колонка либо другая функция (Рисунок 2). Таким образом для понимания типа колонки могут помочь следующие параметры: 1) Типы аргументов, которые может принимать функция, например функция TOSTARTOFMINUTE(округляет время до кратного 5 минутам вниз) может принимать только DATETIME, таким образом если аргументом данной функции является колонка, то данная колонка имеет тип DATETIME. 2) типы остальных аргументов в данной функции, например функция EQUALS(равенство), она подразумевает собой равенство типов ее аргументов, таким образом если в данной функции присутствует константа и столбец, то мы можем определить тип столбца как тип константы. Таким образом, для каждой функции мы определяем возможные типы аргументов, тип возвращаемого значения, а также параметр, являются ли аргументы функции одинакового типа. Рекурсивный обработчик функций будет определять возможные типы столбцов использующихся в данных функциях по значениям аргументов и возвращать возможные типы результата выполнения функции. -Теперь для каждого столбца мы имеем множество возможных типов его значений. Для однозначной трактовки запроса мы выберем один конкретный тип из этого множества. +Теперь для каждого столбца мы имеем множество возможных типов его значений. Для однозначной трактовки запроса мы выберем один конкретный тип из этого множества. **Определение значений столбцов** From 331545fe0df758be09b433d595fc3973141412b3 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 27 Oct 2020 14:29:10 +0300 Subject: [PATCH 317/432] Fix typo --- src/Storages/System/StorageSystemReplicas.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemReplicas.cpp b/src/Storages/System/StorageSystemReplicas.cpp index 973ccfbf464..ab54d760873 100644 --- a/src/Storages/System/StorageSystemReplicas.cpp +++ b/src/Storages/System/StorageSystemReplicas.cpp @@ -75,7 +75,7 @@ Pipe StorageSystemReplicas::read( for (const auto & db : DatabaseCatalog::instance().getDatabases()) { /// Check if database can contain replicated tables - if (db.second->canContainMergeTreeTables()) + if (!db.second->canContainMergeTreeTables()) continue; const bool check_access_for_tables = check_access_for_databases && !access->isGranted(AccessType::SHOW_TABLES, db.first); for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next()) From 791c1b6593bc6ca1236aadec0a117c77058669c7 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Tue, 27 Oct 2020 14:55:57 +0300 Subject: [PATCH 318/432] One more attempt to fix. --- .../configs/config.d/storage_configuration.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml index c04106221f7..b091adf9ec5 100644 --- a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml @@ -23,6 +23,7 @@
jbod1 + 0.09
external From a48bc837637ef88a2063bc1f1324f277a4b8b684 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Oct 2020 12:04:07 +0000 Subject: [PATCH 319/432] Fix style --- src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp index 5644b8d7c6a..8c7b435cd2a 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp @@ -14,11 +14,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer( ChannelPtr consumer_channel_, HandlerPtr event_handler_, From dca35b191377efa34830b3e6f72d2b95b92996d4 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:24:10 +0300 Subject: [PATCH 320/432] Add awesome test for fetch --- docker/test/integration/base/Dockerfile | 3 +- src/IO/ReadWriteBufferFromHTTP.h | 5 +- src/Storages/MergeTree/DataPartsExchange.cpp | 10 +-- .../MergeTree/ReplicatedFetchesList.cpp | 29 +++--- .../MergeTree/ReplicatedFetchesList.h | 23 +++-- src/Storages/System/StorageSystemFetches.cpp | 12 ++- src/Storages/System/StorageSystemFetches.h | 4 +- tests/integration/helpers/cluster.py | 1 + tests/integration/helpers/network.py | 12 +++ .../test_system_fetches/__init__.py | 1 + tests/integration/test_system_fetches/test.py | 90 +++++++++++++++++++ 11 files changed, 144 insertions(+), 46 deletions(-) create mode 100644 tests/integration/test_system_fetches/__init__.py create mode 100644 tests/integration/test_system_fetches/test.py diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index b6a46f6d934..99095de60fb 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -17,7 +17,8 @@ RUN apt-get update \ sqlite3 \ curl \ tar \ - krb5-user + krb5-user \ + iproute2 RUN rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index c79a3bd953d..10cacf10dd9 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -205,12 +205,12 @@ namespace detail bool nextImpl() override { + if (read_callback) + read_callback(count()); if (!impl->next()) return false; internal_buffer = impl->buffer(); working_buffer = internal_buffer; - if (read_callback) - read_callback(count()); return true; } @@ -225,6 +225,7 @@ namespace detail void setNextReadCallback(std::function read_callback_) { read_callback = read_callback_; + read_callback(count()); } }; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index b1f1dc3f5f5..c4a7816581a 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -53,7 +53,6 @@ std::string getEndpointId(const std::string & node_id) return "DataPartsExchange:" + node_id; } - struct ReplicatedFetchReadCallback { ReplicatedFetchList::Entry & replicated_fetch_entry; @@ -65,9 +64,9 @@ struct ReplicatedFetchReadCallback void operator() (size_t bytes_count) { - replicated_fetch_entry->bytes_read_compressed = bytes_count; + replicated_fetch_entry->bytes_read_compressed.store(bytes_count, std::memory_order_relaxed); replicated_fetch_entry->progress.store( - replicated_fetch_entry->bytes_read_compressed.load(std::memory_order_relaxed) / replicated_fetch_entry->total_size_bytes_compressed, + static_cast(bytes_count) / replicated_fetch_entry->total_size_bytes_compressed, std::memory_order_relaxed); } }; @@ -307,10 +306,11 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( reservation = data.makeEmptyReservationOnLargestDisk(); } auto storage_id = data.getStorageID(); + String new_part_path = data.getFullPathOnDisk(reservation->getDisk()) + part_name + "/"; auto entry = data.global_context.getReplicatedFetchList().insert( storage_id.getDatabaseName(), storage_id.getTableName(), - part_info.partition_id, part_name, part_name, - replica_path, uri.toString(), interserver_scheme, to_detached, sum_files_size); + part_info.partition_id, part_name, new_part_path, + replica_path, uri, to_detached, sum_files_size); in.setNextReadCallback(ReplicatedFetchReadCallback(*entry)); diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.cpp b/src/Storages/MergeTree/ReplicatedFetchesList.cpp index d66d0965351..92e60d9dde1 100644 --- a/src/Storages/MergeTree/ReplicatedFetchesList.cpp +++ b/src/Storages/MergeTree/ReplicatedFetchesList.cpp @@ -9,25 +9,21 @@ ReplicatedFetchListElement::ReplicatedFetchListElement( const std::string & database_, const std::string & table_, const std::string & partition_id_, const std::string & result_part_name_, const std::string & result_part_path_, const std::string & source_replica_path_, - const std::string & source_replica_address_, const std::string & interserver_scheme_, - UInt8 to_detached_, UInt64 total_size_bytes_compressed_) + const Poco::URI & uri_, UInt8 to_detached_, UInt64 total_size_bytes_compressed_) : database(database_) , table(table_) , partition_id(partition_id_) , result_part_name(result_part_name_) , result_part_path(result_part_path_) , source_replica_path(source_replica_path_) - , source_replica_address(source_replica_address_) - , interserver_scheme(interserver_scheme_) + , source_replica_hostname(uri_.getHost()) + , source_replica_port(uri_.getPort()) + , interserver_scheme(uri_.getScheme()) + , uri(uri_.toString()) , to_detached(to_detached_) , total_size_bytes_compressed(total_size_bytes_compressed_) + , thread_id(getThreadId()) { - background_thread_memory_tracker = CurrentThread::getMemoryTracker(); - if (background_thread_memory_tracker) - { - background_thread_memory_tracker_prev_parent = background_thread_memory_tracker->getParent(); - background_thread_memory_tracker->setParent(&memory_tracker); - } } @@ -40,23 +36,18 @@ ReplicatedFetchInfo ReplicatedFetchListElement::getInfo() const res.result_part_name = result_part_name; res.result_part_path = result_part_path; res.source_replica_path = source_replica_path; - res.source_replica_address = source_replica_address; + res.source_replica_hostname = source_replica_hostname; + res.source_replica_port = source_replica_port; + res.interserver_scheme = interserver_scheme; + res.uri = uri; res.interserver_scheme = interserver_scheme; res.to_detached = to_detached; res.elapsed = watch.elapsedSeconds(); res.progress = progress.load(std::memory_order_relaxed); res.bytes_read_compressed = bytes_read_compressed.load(std::memory_order_relaxed); res.total_size_bytes_compressed = total_size_bytes_compressed; - res.memory_usage = memory_tracker.get(); res.thread_id = thread_id; return res; } -ReplicatedFetchListElement::~ReplicatedFetchListElement() -{ - /// Unplug memory_tracker from current background processing pool thread - if (background_thread_memory_tracker) - background_thread_memory_tracker->setParent(background_thread_memory_tracker_prev_parent); -} - } diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.h b/src/Storages/MergeTree/ReplicatedFetchesList.h index ad8edb6ad59..670e1fb984a 100644 --- a/src/Storages/MergeTree/ReplicatedFetchesList.h +++ b/src/Storages/MergeTree/ReplicatedFetchesList.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace CurrentMetrics { @@ -23,8 +24,10 @@ struct ReplicatedFetchInfo std::string result_part_path; std::string source_replica_path; - std::string source_replica_address; + std::string source_replica_hostname; + UInt16 source_replica_port; std::string interserver_scheme; + std::string uri; UInt8 to_detached; @@ -34,7 +37,6 @@ struct ReplicatedFetchInfo UInt64 total_size_bytes_compressed; UInt64 bytes_read_compressed; - UInt64 memory_usage; UInt64 thread_id; }; @@ -48,9 +50,11 @@ struct ReplicatedFetchListElement : private boost::noncopyable const std::string result_part_name; const std::string result_part_path; - const std::string source_replica_path; - const std::string source_replica_address; - const std::string interserver_scheme; + std::string source_replica_path; + std::string source_replica_hostname; + UInt16 source_replica_port; + std::string interserver_scheme; + std::string uri; const UInt8 to_detached; @@ -60,22 +64,15 @@ struct ReplicatedFetchListElement : private boost::noncopyable std::atomic bytes_read_compressed{}; UInt64 total_size_bytes_compressed{}; - MemoryTracker memory_tracker{VariableContext::Process}; - MemoryTracker * background_thread_memory_tracker; - MemoryTracker * background_thread_memory_tracker_prev_parent = nullptr; - UInt64 thread_id; ReplicatedFetchListElement( const std::string & database_, const std::string & table_, const std::string & partition_id_, const std::string & result_part_name_, const std::string & result_part_path_, const std::string & source_replica_path_, - const std::string & source_replica_address_, const std::string & interserver_scheme_, - UInt8 to_detached_, UInt64 total_size_bytes_compressed_); + const Poco::URI & uri, UInt8 to_detached_, UInt64 total_size_bytes_compressed_); ReplicatedFetchInfo getInfo() const; - - ~ReplicatedFetchListElement(); }; diff --git a/src/Storages/System/StorageSystemFetches.cpp b/src/Storages/System/StorageSystemFetches.cpp index 27d4eeddbfc..0c992457c6c 100644 --- a/src/Storages/System/StorageSystemFetches.cpp +++ b/src/Storages/System/StorageSystemFetches.cpp @@ -1,5 +1,7 @@ #include #include +#include +#include #include #include @@ -19,10 +21,11 @@ NamesAndTypesList StorageSystemFetches::getNamesAndTypes() {"total_size_bytes_compressed", std::make_shared()}, {"bytes_read_compressed", std::make_shared()}, {"source_replica_path", std::make_shared()}, - {"source_replica_address", std::make_shared()}, + {"source_replica_hostname", std::make_shared()}, + {"source_replica_port", std::make_shared()}, {"interserver_scheme", std::make_shared()}, + {"URI", std::make_shared()}, {"to_detached", std::make_shared()}, - {"memory_usage", std::make_shared()}, {"thread_id", std::make_shared()}, }; } @@ -48,10 +51,11 @@ void StorageSystemFetches::fillData(MutableColumns & res_columns, const Context res_columns[i++]->insert(fetch.total_size_bytes_compressed); res_columns[i++]->insert(fetch.bytes_read_compressed); res_columns[i++]->insert(fetch.source_replica_path); - res_columns[i++]->insert(fetch.source_replica_address); + res_columns[i++]->insert(fetch.source_replica_hostname); + res_columns[i++]->insert(fetch.source_replica_port); res_columns[i++]->insert(fetch.interserver_scheme); + res_columns[i++]->insert(fetch.uri); res_columns[i++]->insert(fetch.to_detached); - res_columns[i++]->insert(fetch.memory_usage); res_columns[i++]->insert(fetch.thread_id); } } diff --git a/src/Storages/System/StorageSystemFetches.h b/src/Storages/System/StorageSystemFetches.h index be1b66193bf..7c93bce7ee7 100644 --- a/src/Storages/System/StorageSystemFetches.h +++ b/src/Storages/System/StorageSystemFetches.h @@ -1,7 +1,6 @@ #pragma once -#include -#include + #include #include @@ -11,6 +10,7 @@ namespace DB class Context; +/// system.fetches table. Takes data from context.getReplicatedFetchesList() class StorageSystemFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock { friend struct ext::shared_ptr_helper; diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6b24bc30460..7c44065320b 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -814,6 +814,7 @@ services: tmpfs: {tmpfs} cap_add: - SYS_PTRACE + - NET_ADMIN depends_on: {depends_on} user: '{user}' env_file: diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index 194903efff4..add812bea58 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -19,6 +19,7 @@ class PartitionManager: def __init__(self): self._iptables_rules = [] + self._netem_delayed_instances = [] _NetworkManager.get() def drop_instance_zk_connections(self, instance, action='DROP'): @@ -46,11 +47,18 @@ class PartitionManager: self._add_rule(create_rule(left, right)) self._add_rule(create_rule(right, left)) + def add_network_delay(self, instance, delay_ms): + self._add_tc_netem_delay(instance, delay_ms) + def heal_all(self): while self._iptables_rules: rule = self._iptables_rules.pop() _NetworkManager.get().delete_iptables_rule(**rule) + while self._netem_delayed_instances: + instance = self._netem_delayed_instances.pop() + instance.exec_in_container(["bash", "-c", "tc qdisc del dev eth0 root netem"], user="root") + def pop_rules(self): res = self._iptables_rules[:] self.heal_all() @@ -73,6 +81,10 @@ class PartitionManager: _NetworkManager.get().delete_iptables_rule(**rule) self._iptables_rules.remove(rule) + def _add_tc_netem_delay(self, instance, delay_ms): + instance.exec_in_container(["bash", "-c", "tc qdisc add dev eth0 root netem delay {}ms".format(delay_ms)], user="root") + self._netem_delayed_instances.append(instance) + def __enter__(self): return self diff --git a/tests/integration/test_system_fetches/__init__.py b/tests/integration/test_system_fetches/__init__.py new file mode 100644 index 00000000000..e5a0d9b4834 --- /dev/null +++ b/tests/integration/test_system_fetches/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/env python3 diff --git a/tests/integration/test_system_fetches/test.py b/tests/integration/test_system_fetches/test.py new file mode 100644 index 00000000000..2746ced300c --- /dev/null +++ b/tests/integration/test_system_fetches/test.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + + +import pytest +import time +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager +from helpers.test_tools import assert_eq_with_retry +import random +import string +import json + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', with_zookeeper=True) +node2 = cluster.add_instance('node2', with_zookeeper=True) + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + yield cluster + + finally: + cluster.shutdown() + +def get_random_string(length): + return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + +def test_system_fetches(started_cluster): + node1.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple()") + node2.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple()") + + with PartitionManager() as pm: + node2.query("SYSTEM STOP FETCHES t") + node1.query("INSERT INTO t SELECT number, '{}' FROM numbers(10000)".format(get_random_string(104857))) + pm.add_network_delay(node1, 80) + node2.query("SYSTEM START FETCHES t") + fetches_result = [] + for _ in range(1000): + result = json.loads(node2.query("SELECT * FROM system.fetches FORMAT JSON")) + if not result["data"]: + if fetches_result: + break + time.sleep(0.1) + else: + fetches_result.append(result["data"][0]) + print(fetches_result[-1]) + time.sleep(0.1) + + node2.query("SYSTEM SYNC REPLICA t", timeout=10) + assert node2.query("SELECT COUNT() FROM t") == "10000\n" + + for elem in fetches_result: + elem['bytes_read_compressed'] = float(elem['bytes_read_compressed']) + elem['total_size_bytes_compressed'] = float(elem['total_size_bytes_compressed']) + elem['progress'] = float(elem['progress']) + elem['elapsed'] = float(elem['elapsed']) + + assert len(fetches_result) > 0 + first_non_empty = fetches_result[0] + + assert first_non_empty['database'] == "default" + assert first_non_empty['table'] == "t" + assert first_non_empty['source_replica_hostname'] == 'node1' + assert first_non_empty['source_replica_port'] == 9009 + assert first_non_empty['source_replica_path'] == '/clickhouse/test/t/replicas/1' + assert first_non_empty['interserver_scheme'] == 'http' + assert first_non_empty['partition_id'] == 'all' + assert first_non_empty['URI'].startswith('http://node1:9009/?endpoint=DataPartsExchange') + + for elem in fetches_result: + assert elem['bytes_read_compressed'] <= elem['total_size_bytes_compressed'], "Bytes read ({}) more than total bytes ({}). It's a bug".format(elem['bytes_read_compressed'], elem['total_size_bytes_compressed']) + assert 0.0 <= elem['progress'] <= 1.0, "Progress shouldn't less than 0 and bigger than 1, got {}".format(elem['progress']) + assert 0.0 <= elem['elapsed'], "Elapsed time must be greater than 0, got {}".format(elem['elapsed']) + + prev_progress = first_non_empty['progress'] + for elem in fetches_result: + assert elem['progress'] >= prev_progress, "Progress decreasing prev{}, next {}? It's a bug".format(prev_progress, elem['progress']) + prev_progress = elem['progress'] + + prev_bytes = first_non_empty['bytes_read_compressed'] + for elem in fetches_result: + assert elem['bytes_read_compressed'] >= prev_bytes, "Bytes read decreasing prev {}, next {}? It's a bug".format(prev_bytes, elem['bytes_read_compressed']) + prev_bytes = elem['bytes_read_compressed'] + + prev_elapsed = first_non_empty['elapsed'] + for elem in fetches_result: + assert elem['elapsed'] >= prev_elapsed, "Elapsed time decreasing prev {}, next {}? It's a bug".format(prev_elapsed, elem['elapsed']) + prev_elapsed = elem['elapsed'] From 31d63efdac7d888de000b6a3b428181f2fc81d37 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:26:10 +0300 Subject: [PATCH 321/432] Fix style --- src/Storages/MergeTree/BackgroundProcessList.h | 2 +- src/Storages/ya.make | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/BackgroundProcessList.h b/src/Storages/MergeTree/BackgroundProcessList.h index f90aa1c88a8..11516ad2ce7 100644 --- a/src/Storages/MergeTree/BackgroundProcessList.h +++ b/src/Storages/MergeTree/BackgroundProcessList.h @@ -85,5 +85,5 @@ public: virtual void onEntryDestroy(const Entry & /* entry */) {} virtual inline ~BackgroundProcessList() {} }; - + } diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 107433b5e73..289a226ce7d 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -91,6 +91,7 @@ SRCS( MergeTree/MergeTreeWriteAheadLog.cpp MergeTree/MergeType.cpp MergeTree/registerStorageMergeTree.cpp + MergeTree/ReplicatedFetchesList.cpp MergeTree/ReplicatedMergeTreeAddress.cpp MergeTree/ReplicatedMergeTreeAltersSequence.cpp MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -155,6 +156,7 @@ SRCS( System/StorageSystemDistributionQueue.cpp System/StorageSystemEnabledRoles.cpp System/StorageSystemEvents.cpp + System/StorageSystemFetches.cpp System/StorageSystemFormats.cpp System/StorageSystemFunctions.cpp System/StorageSystemGrants.cpp From 3824fcb0762878cff9c927e63700912b63a850af Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:47:42 +0300 Subject: [PATCH 322/432] Rename file and some tweaks --- src/Interpreters/Context.cpp | 2 +- .../MergeTree/BackgroundProcessList.h | 2 ++ src/Storages/MergeTree/DataPartsExchange.cpp | 19 ++++++++++--------- ...etchesList.cpp => ReplicatedFetchList.cpp} | 2 +- ...tedFetchesList.h => ReplicatedFetchList.h} | 0 src/Storages/System/StorageSystemFetches.cpp | 2 +- src/Storages/System/StorageSystemFetches.h | 2 +- src/Storages/ya.make | 2 +- tests/integration/test_system_fetches/test.py | 3 +++ 9 files changed, 20 insertions(+), 14 deletions(-) rename src/Storages/MergeTree/{ReplicatedFetchesList.cpp => ReplicatedFetchList.cpp} (97%) rename src/Storages/MergeTree/{ReplicatedFetchesList.h => ReplicatedFetchList.h} (100%) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 38e8ffdf76a..293ac4ab9e7 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Storages/MergeTree/BackgroundProcessList.h b/src/Storages/MergeTree/BackgroundProcessList.h index 11516ad2ce7..2f4cc020796 100644 --- a/src/Storages/MergeTree/BackgroundProcessList.h +++ b/src/Storages/MergeTree/BackgroundProcessList.h @@ -9,6 +9,8 @@ namespace DB { +/// Common code for background processes lists, like system.merges and system.fetches +/// Look at examples in MergeList and ReplicatedFetchList template class BackgroundProcessList; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index c4a7816581a..bb92202b004 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include namespace CurrentMetrics @@ -305,14 +305,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( /// We don't know real size of part because sender server version is too old reservation = data.makeEmptyReservationOnLargestDisk(); } - auto storage_id = data.getStorageID(); - String new_part_path = data.getFullPathOnDisk(reservation->getDisk()) + part_name + "/"; - auto entry = data.global_context.getReplicatedFetchList().insert( - storage_id.getDatabaseName(), storage_id.getTableName(), - part_info.partition_id, part_name, new_part_path, - replica_path, uri, to_detached, sum_files_size); - - in.setNextReadCallback(ReplicatedFetchReadCallback(*entry)); bool sync = (data_settings->min_compressed_bytes_to_fsync_after_fetch && sum_files_size >= data_settings->min_compressed_bytes_to_fsync_after_fetch); @@ -321,6 +313,15 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( if (server_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_TYPE) readStringBinary(part_type, in); + auto storage_id = data.getStorageID(); + String new_part_path = part_type == "InMemory" ? "memory" : data.getFullPathOnDisk(reservation->getDisk()) + part_name + "/"; + auto entry = data.global_context.getReplicatedFetchList().insert( + storage_id.getDatabaseName(), storage_id.getTableName(), + part_info.partition_id, part_name, new_part_path, + replica_path, uri, to_detached, sum_files_size); + + in.setNextReadCallback(ReplicatedFetchReadCallback(*entry)); + return part_type == "InMemory" ? downloadPartToMemory(part_name, metadata_snapshot, std::move(reservation), in) : downloadPartToDisk(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(reservation), in); } diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.cpp b/src/Storages/MergeTree/ReplicatedFetchList.cpp similarity index 97% rename from src/Storages/MergeTree/ReplicatedFetchesList.cpp rename to src/Storages/MergeTree/ReplicatedFetchList.cpp index 92e60d9dde1..63971a7b2e1 100644 --- a/src/Storages/MergeTree/ReplicatedFetchesList.cpp +++ b/src/Storages/MergeTree/ReplicatedFetchList.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/src/Storages/MergeTree/ReplicatedFetchesList.h b/src/Storages/MergeTree/ReplicatedFetchList.h similarity index 100% rename from src/Storages/MergeTree/ReplicatedFetchesList.h rename to src/Storages/MergeTree/ReplicatedFetchList.h diff --git a/src/Storages/System/StorageSystemFetches.cpp b/src/Storages/System/StorageSystemFetches.cpp index 0c992457c6c..080915db514 100644 --- a/src/Storages/System/StorageSystemFetches.cpp +++ b/src/Storages/System/StorageSystemFetches.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include #include diff --git a/src/Storages/System/StorageSystemFetches.h b/src/Storages/System/StorageSystemFetches.h index 7c93bce7ee7..3e422c5bc01 100644 --- a/src/Storages/System/StorageSystemFetches.h +++ b/src/Storages/System/StorageSystemFetches.h @@ -10,7 +10,7 @@ namespace DB class Context; -/// system.fetches table. Takes data from context.getReplicatedFetchesList() +/// system.fetches table. Takes data from context.getReplicatedFetchList() class StorageSystemFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock { friend struct ext::shared_ptr_helper; diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 289a226ce7d..063b0d3296b 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -91,7 +91,7 @@ SRCS( MergeTree/MergeTreeWriteAheadLog.cpp MergeTree/MergeType.cpp MergeTree/registerStorageMergeTree.cpp - MergeTree/ReplicatedFetchesList.cpp + MergeTree/ReplicatedFetchList.cpp MergeTree/ReplicatedMergeTreeAddress.cpp MergeTree/ReplicatedMergeTreeAltersSequence.cpp MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp diff --git a/tests/integration/test_system_fetches/test.py b/tests/integration/test_system_fetches/test.py index 2746ced300c..bd2345984ea 100644 --- a/tests/integration/test_system_fetches/test.py +++ b/tests/integration/test_system_fetches/test.py @@ -66,6 +66,9 @@ def test_system_fetches(started_cluster): assert first_non_empty['source_replica_port'] == 9009 assert first_non_empty['source_replica_path'] == '/clickhouse/test/t/replicas/1' assert first_non_empty['interserver_scheme'] == 'http' + assert first_non_empty['result_part_name'] == 'all_0_0_0' + assert first_non_empty['result_part_path'].startswith('/var/lib/clickhouse/') + assert first_non_empty['result_part_path'].endswith('all_0_0_0/') assert first_non_empty['partition_id'] == 'all' assert first_non_empty['URI'].startswith('http://node1:9009/?endpoint=DataPartsExchange') From db16942716a625df8884dfbf10c39586c1d8e6b2 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:50:43 +0300 Subject: [PATCH 323/432] Better comment --- src/IO/ReadWriteBufferFromHTTP.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 10cacf10dd9..0a3b4d374b5 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -155,6 +155,7 @@ namespace detail } public: + using NextReadCallback = std::function; using OutStreamCallback = std::function; explicit ReadWriteBufferFromHTTPBase( @@ -222,9 +223,14 @@ namespace detail return def; } - void setNextReadCallback(std::function read_callback_) + /// Set function to call on each nextImpl, useful when you need to track + /// progress. + /// NOTE: parameter on each call is not incremental -- it's all bytes count + /// passed through the buffer + void setNextReadCallback(NextReadCallback read_callback_) { read_callback = read_callback_; + /// Some data maybe already read read_callback(count()); } }; From ee4e6caf70857c5e4b57a66fb50899eee416930b Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:52:49 +0300 Subject: [PATCH 324/432] Remove redundant metric increment --- src/Storages/MergeTree/DataPartsExchange.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index bb92202b004..e87ae599521 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -18,7 +18,6 @@ namespace CurrentMetrics { extern const Metric ReplicatedSend; - extern const Metric ReplicatedFetch; } namespace DB @@ -380,8 +379,6 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( if (disk->exists(part_download_path)) throw Exception("Directory " + fullPath(disk, part_download_path) + " already exists.", ErrorCodes::DIRECTORY_ALREADY_EXISTS); - CurrentMetrics::Increment metric_increment{CurrentMetrics::ReplicatedFetch}; - disk->createDirectories(part_download_path); std::optional sync_guard; From f35edfb2a59ea7d413d13322cb60149f33e7951d Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 15:57:55 +0300 Subject: [PATCH 325/432] Remove some unused fields --- src/Storages/MergeTree/ReplicatedFetchList.cpp | 3 +-- src/Storages/MergeTree/ReplicatedFetchList.h | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedFetchList.cpp b/src/Storages/MergeTree/ReplicatedFetchList.cpp index 63971a7b2e1..82bc8ae21e0 100644 --- a/src/Storages/MergeTree/ReplicatedFetchList.cpp +++ b/src/Storages/MergeTree/ReplicatedFetchList.cpp @@ -1,10 +1,10 @@ #include #include #include -#include namespace DB { + ReplicatedFetchListElement::ReplicatedFetchListElement( const std::string & database_, const std::string & table_, const std::string & partition_id_, const std::string & result_part_name_, @@ -26,7 +26,6 @@ ReplicatedFetchListElement::ReplicatedFetchListElement( { } - ReplicatedFetchInfo ReplicatedFetchListElement::getInfo() const { ReplicatedFetchInfo res; diff --git a/src/Storages/MergeTree/ReplicatedFetchList.h b/src/Storages/MergeTree/ReplicatedFetchList.h index 670e1fb984a..ed134de3f2c 100644 --- a/src/Storages/MergeTree/ReplicatedFetchList.h +++ b/src/Storages/MergeTree/ReplicatedFetchList.h @@ -50,21 +50,22 @@ struct ReplicatedFetchListElement : private boost::noncopyable const std::string result_part_name; const std::string result_part_path; - std::string source_replica_path; - std::string source_replica_hostname; - UInt16 source_replica_port; - std::string interserver_scheme; - std::string uri; + const std::string source_replica_path; + const std::string source_replica_hostname; + const UInt16 source_replica_port; + const std::string interserver_scheme; + const std::string uri; const UInt8 to_detached; Stopwatch watch; std::atomic progress{}; - std::atomic is_cancelled{}; + /// How many bytes already read std::atomic bytes_read_compressed{}; - UInt64 total_size_bytes_compressed{}; + /// Total bytes to read + const UInt64 total_size_bytes_compressed{}; - UInt64 thread_id; + const UInt64 thread_id; ReplicatedFetchListElement( const std::string & database_, const std::string & table_, @@ -78,6 +79,7 @@ struct ReplicatedFetchListElement : private boost::noncopyable using ReplicatedFetchListEntry = BackgroundProcessListEntry; +/// List of currently processing replicated fetches class ReplicatedFetchList final : public BackgroundProcessList { private: From e40c509392ce7a8634f67837dd1c608dce78f5d3 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 16:00:40 +0300 Subject: [PATCH 326/432] More clear callback name --- src/IO/ReadWriteBufferFromHTTP.h | 14 +++++++------- src/Storages/MergeTree/DataPartsExchange.cpp | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 0a3b4d374b5..6ff99690bb4 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -106,7 +106,7 @@ namespace detail std::vector cookies; HTTPHeaderEntries http_header_entries; RemoteHostFilter remote_host_filter; - std::function read_callback; + std::function next_callback; std::istream * call(const Poco::URI uri_, Poco::Net::HTTPResponse & response) { @@ -155,7 +155,7 @@ namespace detail } public: - using NextReadCallback = std::function; + using NextCallback = std::function; using OutStreamCallback = std::function; explicit ReadWriteBufferFromHTTPBase( @@ -206,8 +206,8 @@ namespace detail bool nextImpl() override { - if (read_callback) - read_callback(count()); + if (next_callback) + next_callback(count()); if (!impl->next()) return false; internal_buffer = impl->buffer(); @@ -227,11 +227,11 @@ namespace detail /// progress. /// NOTE: parameter on each call is not incremental -- it's all bytes count /// passed through the buffer - void setNextReadCallback(NextReadCallback read_callback_) + void setNextCallback(NextCallback next_callback_) { - read_callback = read_callback_; + next_callback = next_callback_; /// Some data maybe already read - read_callback(count()); + next_callback(count()); } }; } diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index e87ae599521..a6c3c562d91 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -319,7 +319,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( part_info.partition_id, part_name, new_part_path, replica_path, uri, to_detached, sum_files_size); - in.setNextReadCallback(ReplicatedFetchReadCallback(*entry)); + in.setNextCallback(ReplicatedFetchReadCallback(*entry)); return part_type == "InMemory" ? downloadPartToMemory(part_name, metadata_snapshot, std::move(reservation), in) : downloadPartToDisk(part_name, replica_path, to_detached, tmp_prefix_, sync, std::move(reservation), in); From 3d3d246bdcd1140c272df9676868bac33283e2e0 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 16:07:50 +0300 Subject: [PATCH 327/432] Rename table --- src/Storages/MergeTree/DataPartsExchange.cpp | 1 + ...SystemFetches.cpp => StorageSystemReplicatedFetches.cpp} | 6 +++--- ...rageSystemFetches.h => StorageSystemReplicatedFetches.h} | 6 +++--- src/Storages/System/attachSystemTables.cpp | 4 ++-- src/Storages/ya.make | 2 +- .../__init__.py | 0 .../test.py | 4 ++-- 7 files changed, 12 insertions(+), 11 deletions(-) rename src/Storages/System/{StorageSystemFetches.cpp => StorageSystemReplicatedFetches.cpp} (90%) rename src/Storages/System/{StorageSystemFetches.h => StorageSystemReplicatedFetches.h} (59%) rename tests/integration/{test_system_fetches => test_system_replicated_fetches}/__init__.py (100%) rename tests/integration/{test_system_fetches => test_system_replicated_fetches}/test.py (97%) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index a6c3c562d91..b9c4c6b625e 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -52,6 +52,7 @@ std::string getEndpointId(const std::string & node_id) return "DataPartsExchange:" + node_id; } +/// Simple functor for tracking fetch progress in system.fetches table. struct ReplicatedFetchReadCallback { ReplicatedFetchList::Entry & replicated_fetch_entry; diff --git a/src/Storages/System/StorageSystemFetches.cpp b/src/Storages/System/StorageSystemReplicatedFetches.cpp similarity index 90% rename from src/Storages/System/StorageSystemFetches.cpp rename to src/Storages/System/StorageSystemReplicatedFetches.cpp index 080915db514..53bec5aa42f 100644 --- a/src/Storages/System/StorageSystemFetches.cpp +++ b/src/Storages/System/StorageSystemReplicatedFetches.cpp @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -8,7 +8,7 @@ namespace DB { -NamesAndTypesList StorageSystemFetches::getNamesAndTypes() +NamesAndTypesList StorageSystemReplicatedFetches::getNamesAndTypes() { return { {"database", std::make_shared()}, @@ -30,7 +30,7 @@ NamesAndTypesList StorageSystemFetches::getNamesAndTypes() }; } -void StorageSystemFetches::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const +void StorageSystemReplicatedFetches::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { const auto access = context.getAccess(); const bool check_access_for_tables = !access->isGranted(AccessType::SHOW_TABLES); diff --git a/src/Storages/System/StorageSystemFetches.h b/src/Storages/System/StorageSystemReplicatedFetches.h similarity index 59% rename from src/Storages/System/StorageSystemFetches.h rename to src/Storages/System/StorageSystemReplicatedFetches.h index 3e422c5bc01..4ec1f9b9f6c 100644 --- a/src/Storages/System/StorageSystemFetches.h +++ b/src/Storages/System/StorageSystemReplicatedFetches.h @@ -11,11 +11,11 @@ namespace DB class Context; /// system.fetches table. Takes data from context.getReplicatedFetchList() -class StorageSystemFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock +class StorageSystemReplicatedFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock { - friend struct ext::shared_ptr_helper; + friend struct ext::shared_ptr_helper; public: - std::string getName() const override { return "SystemFetches"; } + std::string getName() const override { return "SystemReplicatedFetches"; } static NamesAndTypesList getNamesAndTypes(); diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index 012b54e3c45..a4158f256c1 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -138,7 +138,7 @@ void attachSystemTablesServer(IDatabase & system_database, bool has_zookeeper) attach(system_database, "clusters"); attach(system_database, "graphite_retentions"); attach(system_database, "macros"); - attach(system_database, "fetches"); + attach(system_database, "replicated_fetches"); if (has_zookeeper) attach(system_database, "zookeeper"); diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 063b0d3296b..bf1a6982976 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -156,7 +156,6 @@ SRCS( System/StorageSystemDistributionQueue.cpp System/StorageSystemEnabledRoles.cpp System/StorageSystemEvents.cpp - System/StorageSystemFetches.cpp System/StorageSystemFormats.cpp System/StorageSystemFunctions.cpp System/StorageSystemGrants.cpp @@ -179,6 +178,7 @@ SRCS( System/StorageSystemQuotasUsage.cpp System/StorageSystemQuotaUsage.cpp System/StorageSystemReplicas.cpp + System/StorageSystemReplicatedFetches.cpp System/StorageSystemReplicationQueue.cpp System/StorageSystemRoleGrants.cpp System/StorageSystemRoles.cpp diff --git a/tests/integration/test_system_fetches/__init__.py b/tests/integration/test_system_replicated_fetches/__init__.py similarity index 100% rename from tests/integration/test_system_fetches/__init__.py rename to tests/integration/test_system_replicated_fetches/__init__.py diff --git a/tests/integration/test_system_fetches/test.py b/tests/integration/test_system_replicated_fetches/test.py similarity index 97% rename from tests/integration/test_system_fetches/test.py rename to tests/integration/test_system_replicated_fetches/test.py index bd2345984ea..cefb3256893 100644 --- a/tests/integration/test_system_fetches/test.py +++ b/tests/integration/test_system_replicated_fetches/test.py @@ -27,7 +27,7 @@ def started_cluster(): def get_random_string(length): return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) -def test_system_fetches(started_cluster): +def test_system_replicated_fetches(started_cluster): node1.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple()") node2.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple()") @@ -38,7 +38,7 @@ def test_system_fetches(started_cluster): node2.query("SYSTEM START FETCHES t") fetches_result = [] for _ in range(1000): - result = json.loads(node2.query("SELECT * FROM system.fetches FORMAT JSON")) + result = json.loads(node2.query("SELECT * FROM system.replicated_fetches FORMAT JSON")) if not result["data"]: if fetches_result: break From afc28c84a03d22af4c378af0e48dbd40f7307bc9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 27 Oct 2020 16:07:58 +0300 Subject: [PATCH 328/432] Fix double free for shared exception message in case of dictGet from not loaded dictionary. --- src/Functions/FunctionsExternalDictionaries.h | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index 0fae3de1fb2..a0ef6c893ed 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -57,6 +57,7 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int ILLEGAL_COLUMN; extern const int BAD_ARGUMENTS; + extern const int DICTIONARIES_WAS_NOT_LOADED; } @@ -82,14 +83,29 @@ public: std::shared_ptr getDictionary(const String & dictionary_name) { - String resolved_name = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name); - auto dict = external_loader.getDictionary(resolved_name); - if (!access_checked) + // Exception from external_loader may be shared for dictGet call from multiple threads. + // Don't just rethrow it, because sharing the same exception object + // between multiple threads can lead to weird effects if they decide to + // modify it, for example, by adding some error context. + try { - context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); - access_checked = true; + String resolved_name = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name); + auto dict = external_loader.getDictionary(resolved_name); + if (!access_checked) + { + context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); + access_checked = true; + } + return dict; + } + catch (...) + { + throw DB::Exception(ErrorCodes::DICTIONARIES_WAS_NOT_LOADED, + "Failed to load dictionary '{}': {}", + dictionary_name, + getCurrentExceptionMessage(true /*with stack trace*/, + true /*check embedded stack trace*/)); } - return dict; } std::shared_ptr getDictionary(const ColumnWithTypeAndName & column) From 6253d1466abd894ccf3b678cb64368ce922e9862 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 16:09:14 +0300 Subject: [PATCH 329/432] Fix comments --- src/Storages/MergeTree/BackgroundProcessList.h | 2 +- src/Storages/MergeTree/DataPartsExchange.cpp | 2 +- src/Storages/System/StorageSystemReplicatedFetches.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/BackgroundProcessList.h b/src/Storages/MergeTree/BackgroundProcessList.h index 2f4cc020796..b447753ca74 100644 --- a/src/Storages/MergeTree/BackgroundProcessList.h +++ b/src/Storages/MergeTree/BackgroundProcessList.h @@ -9,7 +9,7 @@ namespace DB { -/// Common code for background processes lists, like system.merges and system.fetches +/// Common code for background processes lists, like system.merges and system.replicated_fetches /// Look at examples in MergeList and ReplicatedFetchList template diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index b9c4c6b625e..2e8705e761d 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -52,7 +52,7 @@ std::string getEndpointId(const std::string & node_id) return "DataPartsExchange:" + node_id; } -/// Simple functor for tracking fetch progress in system.fetches table. +/// Simple functor for tracking fetch progress in system.replicated_fetches table. struct ReplicatedFetchReadCallback { ReplicatedFetchList::Entry & replicated_fetch_entry; diff --git a/src/Storages/System/StorageSystemReplicatedFetches.h b/src/Storages/System/StorageSystemReplicatedFetches.h index 4ec1f9b9f6c..34081923e4f 100644 --- a/src/Storages/System/StorageSystemReplicatedFetches.h +++ b/src/Storages/System/StorageSystemReplicatedFetches.h @@ -10,7 +10,7 @@ namespace DB class Context; -/// system.fetches table. Takes data from context.getReplicatedFetchList() +/// system.replicated_fetches table. Takes data from context.getReplicatedFetchList() class StorageSystemReplicatedFetches final : public ext::shared_ptr_helper, public IStorageSystemOneBlock { friend struct ext::shared_ptr_helper; From b39b2932bc269cc92fbceb713f5417c523af17fc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 27 Oct 2020 16:49:08 +0300 Subject: [PATCH 330/432] Added test. --- ...2_dictionary_load_exception_race.reference | 0 .../01542_dictionary_load_exception_race.sh | 44 +++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 tests/queries/0_stateless/01542_dictionary_load_exception_race.reference create mode 100755 tests/queries/0_stateless/01542_dictionary_load_exception_race.sh diff --git a/tests/queries/0_stateless/01542_dictionary_load_exception_race.reference b/tests/queries/0_stateless/01542_dictionary_load_exception_race.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh b/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh new file mode 100755 index 00000000000..8782faac003 --- /dev/null +++ b/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS database_for_dict" +$CLICKHOUSE_CLIENT --query "CREATE DATABASE database_for_dict" +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS database_for_dict.table_for_dict" +$CLICKHOUSE_CLIENT --query "CREATE TABLE database_for_dict.table_for_dict (key_column UInt64, second_column UInt64, third_column String) ENGINE = MergeTree() ORDER BY key_column" +$CLICKHOUSE_CLIENT --query "INSERT INTO database_for_dict.table_for_dict VALUES (100500, 10000000, 'Hello world')" + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db" +$CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db" +$CLICKHOUSE_CLIENT --query "CREATE DICTIONARY ordinary_db.dict1 ( key_column UInt64 DEFAULT 0, second_column UInt64 DEFAULT 1, third_column String DEFAULT 'qqq' ) PRIMARY KEY key_column SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict')) LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT()) SETTINGS(max_result_bytes=1)" + +function dict_get_thread() +{ + while true; do + $CLICKHOUSE_CLIENT --query "SELECT dictGetString('ordinary_db.dict1', 'third_column', toUInt64(rand() % 1000)) from numbers(2)" &>/dev/null + done +} + +export -f dict_get_thread; + +TIMEOUT=10 + +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & +timeout $TIMEOUT bash -c dict_get_thread 2> /dev/null & + +wait + +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db" +$CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS database_for_dict" From 71298ba496344e3b6875c9925b01cacf1fbb535e Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 27 Oct 2020 17:21:51 +0300 Subject: [PATCH 331/432] Move fix to ExternalLoader::checkLoaded. --- src/Functions/FunctionsExternalDictionaries.h | 28 ++++--------------- src/Interpreters/ExternalLoader.cpp | 20 ++++++++++++- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index a0ef6c893ed..0fae3de1fb2 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -57,7 +57,6 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int ILLEGAL_COLUMN; extern const int BAD_ARGUMENTS; - extern const int DICTIONARIES_WAS_NOT_LOADED; } @@ -83,29 +82,14 @@ public: std::shared_ptr getDictionary(const String & dictionary_name) { - // Exception from external_loader may be shared for dictGet call from multiple threads. - // Don't just rethrow it, because sharing the same exception object - // between multiple threads can lead to weird effects if they decide to - // modify it, for example, by adding some error context. - try + String resolved_name = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name); + auto dict = external_loader.getDictionary(resolved_name); + if (!access_checked) { - String resolved_name = DatabaseCatalog::instance().resolveDictionaryName(dictionary_name); - auto dict = external_loader.getDictionary(resolved_name); - if (!access_checked) - { - context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); - access_checked = true; - } - return dict; - } - catch (...) - { - throw DB::Exception(ErrorCodes::DICTIONARIES_WAS_NOT_LOADED, - "Failed to load dictionary '{}': {}", - dictionary_name, - getCurrentExceptionMessage(true /*with stack trace*/, - true /*check embedded stack trace*/)); + context.checkAccess(AccessType::dictGet, dict->getDatabaseOrNoDatabaseTag(), dict->getDictionaryID().getTableName()); + access_checked = true; } + return dict; } std::shared_ptr getDictionary(const ColumnWithTypeAndName & column) diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index dcef36de175..b5f802aa4bc 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -28,6 +28,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; + extern const int DICTIONARIES_WAS_NOT_LOADED; } @@ -1404,7 +1405,24 @@ void ExternalLoader::checkLoaded(const ExternalLoader::LoadResult & result, if (result.status == ExternalLoader::Status::LOADING) throw Exception(type_name + " '" + result.name + "' is still loading", ErrorCodes::BAD_ARGUMENTS); if (result.exception) - std::rethrow_exception(result.exception); + { + // Exception is shared for multiple threads. + // Don't just rethrow it, because sharing the same exception object + // between multiple threads can lead to weird effects if they decide to + // modify it, for example, by adding some error context. + try + { + std::rethrow_exception(result.exception); + } + catch (...) + { + throw DB::Exception(ErrorCodes::DICTIONARIES_WAS_NOT_LOADED, + "Failed to load dictionary '{}': {}", + result.name, + getCurrentExceptionMessage(true /*with stack trace*/, + true /*check embedded stack trace*/)); + } + } if (result.status == ExternalLoader::Status::NOT_EXIST) throw Exception(type_name + " '" + result.name + "' not found", ErrorCodes::BAD_ARGUMENTS); if (result.status == ExternalLoader::Status::NOT_LOADED) From 382a6e62ecbe38d6c4a0b2f1d7ef3d6f9abca226 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 27 Oct 2020 17:55:03 +0300 Subject: [PATCH 332/432] Update AggregateFunctionStudentTTest.h --- src/AggregateFunctions/AggregateFunctionStudentTTest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionStudentTTest.h b/src/AggregateFunctions/AggregateFunctionStudentTTest.h index 5084e34e56f..0aef8f3ee2a 100644 --- a/src/AggregateFunctions/AggregateFunctionStudentTTest.h +++ b/src/AggregateFunctions/AggregateFunctionStudentTTest.h @@ -26,7 +26,7 @@ namespace ErrorCodes #if defined(OS_DARWIN) extern "C" { - double lgamma_r(double x, int * signgamp); + double lgammal_r(double x, int * signgamp); } #endif From 42eb784a26937c7c21ac668a80b8a95cfd5332fa Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Tue, 27 Oct 2020 17:55:19 +0300 Subject: [PATCH 333/432] Update AggregateFunctionWelchTTest.h --- src/AggregateFunctions/AggregateFunctionWelchTTest.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/AggregateFunctions/AggregateFunctionWelchTTest.h b/src/AggregateFunctions/AggregateFunctionWelchTTest.h index 8fd7ebeee6b..b598f25162e 100644 --- a/src/AggregateFunctions/AggregateFunctionWelchTTest.h +++ b/src/AggregateFunctions/AggregateFunctionWelchTTest.h @@ -27,7 +27,7 @@ namespace ErrorCodes #if defined(OS_DARWIN) extern "C" { - double lgamma_r(double x, int * signgamp); + double lgammal_r(double x, int * signgamp); } #endif From 4bbaf3e99e4870f02756e26546c92c9134d1acc9 Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Tue, 27 Oct 2020 18:03:35 +0300 Subject: [PATCH 334/432] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0c07021630b..e2a128eee25 100644 --- a/README.md +++ b/README.md @@ -17,4 +17,6 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-october-virtual-meetup-office-hours-tickets-123129500651) on October 22, 2020. +* [The Second ClickHouse Meetup East](https://www.eventbrite.com/e/the-second-clickhouse-meetup-east-tickets-126787955187) on October 31, 2020. +* [ClickHouse for Enterprise Meetup (in Russian)](https://arenadata-events.timepad.ru/event/1465249/) on November 10, 2020. + From 2a26136c9ef96d5f01a22b8ab12ec5d05fb586d3 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Tue, 27 Oct 2020 18:18:05 +0300 Subject: [PATCH 335/432] add __init__.py --- tests/integration/test_disabled_mysql_server/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/integration/test_disabled_mysql_server/__init__.py diff --git a/tests/integration/test_disabled_mysql_server/__init__.py b/tests/integration/test_disabled_mysql_server/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 60f2d822d73e19649453e4db87855993610ea526 Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 18:27:12 +0300 Subject: [PATCH 336/432] Fix fake race condition on system.merges merge_algorithm --- src/Storages/MergeTree/MergeList.cpp | 2 +- src/Storages/MergeTree/MergeList.h | 3 ++- .../MergeTree/MergeTreeDataMergerMutator.cpp | 16 ++++++++-------- .../test_concurrent_ttl_merges/test.py | 4 ++-- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index 5b044622b36..ba6c2a3d462 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -68,7 +68,7 @@ MergeInfo MergeListElement::getInfo() const res.memory_usage = memory_tracker.get(); res.thread_id = thread_id; res.merge_type = toString(merge_type); - res.merge_algorithm = toString(merge_algorithm); + res.merge_algorithm = toString(merge_algorithm.load(std::memory_order_relaxed)); for (const auto & source_part_name : source_part_names) res.source_part_names.emplace_back(source_part_name); diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index c1166c55703..09c61250afd 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -92,7 +92,8 @@ struct MergeListElement : boost::noncopyable UInt64 thread_id; MergeType merge_type; - MergeAlgorithm merge_algorithm; + /// Detected after merge already started + std::atomic merge_algorithm; MergeListElement(const std::string & database, const std::string & table, const FutureMergedMutatedPart & future_part); diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index b29966751f9..b7581907fd3 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -710,10 +710,10 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor size_t sum_input_rows_upper_bound = merge_entry->total_rows_count; size_t sum_compressed_bytes_upper_bound = merge_entry->total_size_bytes_compressed; - MergeAlgorithm merge_alg = chooseMergeAlgorithm(parts, sum_input_rows_upper_bound, gathering_columns, deduplicate, need_remove_expired_values); - merge_entry->merge_algorithm = merge_alg; + MergeAlgorithm chosen_merge_algorithm = chooseMergeAlgorithm(parts, sum_input_rows_upper_bound, gathering_columns, deduplicate, need_remove_expired_values); + merge_entry->merge_algorithm.store(chosen_merge_algorithm, std::memory_order_relaxed); - LOG_DEBUG(log, "Selected MergeAlgorithm: {}", toString(merge_alg)); + LOG_DEBUG(log, "Selected MergeAlgorithm: {}", toString(chosen_merge_algorithm)); /// Note: this is done before creating input streams, because otherwise data.data_parts_mutex /// (which is locked in data.getTotalActiveSizeInBytes()) @@ -728,7 +728,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor std::unique_ptr rows_sources_write_buf; std::optional column_sizes; - if (merge_alg == MergeAlgorithm::Vertical) + if (chosen_merge_algorithm == MergeAlgorithm::Vertical) { tmp_disk->createDirectories(new_part_tmp_path); rows_sources_file_path = new_part_tmp_path + "rows_sources"; @@ -818,7 +818,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor ProcessorPtr merged_transform; /// If merge is vertical we cannot calculate it - bool blocks_are_granules_size = (merge_alg == MergeAlgorithm::Vertical); + bool blocks_are_granules_size = (chosen_merge_algorithm == MergeAlgorithm::Vertical); UInt64 merge_block_size = data_settings->merge_max_block_size; switch (data.merging_params.mode) @@ -917,7 +917,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor { /// The same progress from merge_entry could be used for both algorithms (it should be more accurate) /// But now we are using inaccurate row-based estimation in Horizontal case for backward compatibility - Float64 progress = (merge_alg == MergeAlgorithm::Horizontal) + Float64 progress = (chosen_merge_algorithm == MergeAlgorithm::Horizontal) ? std::min(1., 1. * rows_written / sum_input_rows_upper_bound) : std::min(1., merge_entry->progress.load(std::memory_order_relaxed)); @@ -938,7 +938,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor MergeTreeData::DataPart::Checksums checksums_gathered_columns; /// Gather ordinary columns - if (merge_alg == MergeAlgorithm::Vertical) + if (chosen_merge_algorithm == MergeAlgorithm::Vertical) { size_t sum_input_rows_exact = merge_entry->rows_read; merge_entry->columns_written = merging_column_names.size(); @@ -1054,7 +1054,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor ReadableSize(merge_entry->bytes_read_uncompressed / elapsed_seconds)); } - if (merge_alg != MergeAlgorithm::Vertical) + if (chosen_merge_algorithm != MergeAlgorithm::Vertical) to.writeSuffixAndFinalizePart(new_data_part, need_sync); else to.writeSuffixAndFinalizePart(new_data_part, need_sync, &storage_columns, &checksums_gathered_columns); diff --git a/tests/integration/test_concurrent_ttl_merges/test.py b/tests/integration/test_concurrent_ttl_merges/test.py index f067e65f58a..65bc3828b38 100644 --- a/tests/integration/test_concurrent_ttl_merges/test.py +++ b/tests/integration/test_concurrent_ttl_merges/test.py @@ -5,8 +5,8 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/fast_background_pool.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_pool.xml'], with_zookeeper=True) +node1 = cluster.add_instance('node1', main_configs=['configs/fast_background_pool.xml', 'configs/log_conf.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_pool.xml', 'configs/log_conf.xml'], with_zookeeper=True) @pytest.fixture(scope="module") From 3ec5fc7a4c59411bb0b479d20fba02a66697469b Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 27 Oct 2020 18:29:06 +0300 Subject: [PATCH 337/432] Add explicit --- src/Storages/MergeTree/DataPartsExchange.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 2e8705e761d..eaf32385908 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -57,11 +57,10 @@ struct ReplicatedFetchReadCallback { ReplicatedFetchList::Entry & replicated_fetch_entry; - ReplicatedFetchReadCallback(ReplicatedFetchList::Entry & replicated_fetch_entry_) + explicit ReplicatedFetchReadCallback(ReplicatedFetchList::Entry & replicated_fetch_entry_) : replicated_fetch_entry(replicated_fetch_entry_) {} - void operator() (size_t bytes_count) { replicated_fetch_entry->bytes_read_compressed.store(bytes_count, std::memory_order_relaxed); From c7236a93596937e0875a5748b5f757df58406398 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 27 Oct 2020 19:12:53 +0300 Subject: [PATCH 338/432] Try fix tests. --- src/Interpreters/ExternalLoader.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/ExternalLoader.cpp b/src/Interpreters/ExternalLoader.cpp index b5f802aa4bc..e1713c7cbbb 100644 --- a/src/Interpreters/ExternalLoader.cpp +++ b/src/Interpreters/ExternalLoader.cpp @@ -1414,6 +1414,11 @@ void ExternalLoader::checkLoaded(const ExternalLoader::LoadResult & result, { std::rethrow_exception(result.exception); } + catch (const Poco::Exception & e) + { + /// This will create a copy for Poco::Exception and DB::Exception + e.rethrow(); + } catch (...) { throw DB::Exception(ErrorCodes::DICTIONARIES_WAS_NOT_LOADED, From 2f752b0db5f2dbb67b1ab7937dee3c457e41e6cd Mon Sep 17 00:00:00 2001 From: Ivan Blinkov Date: Tue, 27 Oct 2020 20:40:54 +0300 Subject: [PATCH 339/432] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e2a128eee25..03b5c988586 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,6 @@ ClickHouse is an open-source column-oriented database management system that all ## Upcoming Events -* [The Second ClickHouse Meetup East](https://www.eventbrite.com/e/the-second-clickhouse-meetup-east-tickets-126787955187) on October 31, 2020. -* [ClickHouse for Enterprise Meetup (in Russian)](https://arenadata-events.timepad.ru/event/1465249/) on November 10, 2020. +* [The Second ClickHouse Meetup East (online)](https://www.eventbrite.com/e/the-second-clickhouse-meetup-east-tickets-126787955187) on October 31, 2020. +* [ClickHouse for Enterprise Meetup (online in Russian)](https://arenadata-events.timepad.ru/event/1465249/) on November 10, 2020. From fc14fde24acd7e06547c266959fd14614b597999 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 26 Oct 2020 23:01:06 +0300 Subject: [PATCH 340/432] Fix DROP TABLE for Distributed (racy with INSERT)
``` drop() on T1275: 0 DB::StorageDistributed::drop (this=0x7f9ed34f0000) at ../contrib/libcxx/include/__hash_table:966 1 0x000000000d557242 in DB::DatabaseOnDisk::dropTable (this=0x7f9fc22706d8, context=..., table_name=...) at ../contrib/libcxx/include/new:340 2 0x000000000d6fcf7c in DB::InterpreterDropQuery::executeToTable (this=this@entry=0x7f9e42560dc0, query=...) at ../contrib/libcxx/include/memory:3826 3 0x000000000d6ff5ee in DB::InterpreterDropQuery::execute (this=0x7f9e42560dc0) at ../src/Interpreters/InterpreterDropQuery.cpp:50 4 0x000000000daa40c0 in DB::executeQueryImpl (begin=, end=, context=..., internal=, stage=DB::QueryProcessingStage::Complete, has_query_tail=false, istr=0x0) at ../src/Interpreters/executeQuery.cpp:420 5 0x000000000daa59df in DB::executeQuery (query=..., context=..., internal=internal@entry=false, stage=, may_have_embedded_data=) at ../contrib/libcxx/include/string:1487 6 0x000000000e1369e6 in DB::TCPHandler::runImpl (this=this@entry=0x7f9ddf3a9000) at ../src/Server/TCPHandler.cpp:254 7 0x000000000e1379c9 in DB::TCPHandler::run (this=0x7f9ddf3a9000) at ../src/Server/TCPHandler.cpp:1326 8 0x000000001086fac7 in Poco::Net::TCPServerConnection::start (this=this@entry=0x7f9ddf3a9000) at ../contrib/poco/Net/src/TCPServerConnection.cpp:43 9 0x000000001086ff2b in Poco::Net::TCPServerDispatcher::run (this=0x7f9e4eba5c00) at ../contrib/poco/Net/src/TCPServerDispatcher.cpp:114 10 0x00000000109dbe8e in Poco::PooledThread::run (this=0x7f9e4a2d2f80) at ../contrib/poco/Foundation/src/ThreadPool.cpp:199 11 0x00000000109d78f9 in Poco::ThreadImpl::runnableEntry (pThread=) at ../contrib/poco/Foundation/include/Poco/SharedPtr.h:401 12 0x00007f9fc3cccea7 in start_thread (arg=) at pthread_create.c:477 13 0x00007f9fc3bebeaf in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95 StorageDistributedDirectoryMonitor on T166: 0 DB::StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor (this=0x7f9ea7ab1400, storage_=..., path_=..., pool_=..., monitor_blocker_=..., bg_pool_=...) at ../src/Storages/Distributed/DirectoryMonitor.cpp:81 1 0x000000000dbf684e in std::__1::make_unique<> () at ../contrib/libcxx/include/memory:3474 2 DB::StorageDistributed::requireDirectoryMonitor (this=0x7f9ed34f0000, disk=..., name=...) at ../src/Storages/StorageDistributed.cpp:682 3 0x000000000de3d5fa in DB::DistributedBlockOutputStream::writeToShard (this=this@entry=0x7f9ed39c7418, block=..., dir_names=...) at ../src/Storages/Distributed/DistributedBlockOutputStream.cpp:634 4 0x000000000de3e214 in DB::DistributedBlockOutputStream::writeAsyncImpl (this=this@entry=0x7f9ed39c7418, block=..., shard_id=shard_id@entry=79) at ../src/Storages/Distributed/DistributedBlockOutputStream.cpp:539 5 0x000000000de3e47b in DB::DistributedBlockOutputStream::writeSplitAsync (this=this@entry=0x7f9ed39c7418, block=...) at ../contrib/libcxx/include/vector:1546 6 0x000000000de3eab0 in DB::DistributedBlockOutputStream::writeAsync (block=..., this=0x7f9ed39c7418) at ../src/Storages/Distributed/DistributedBlockOutputStream.cpp:141 7 DB::DistributedBlockOutputStream::write (this=0x7f9ed39c7418, block=...) at ../src/Storages/Distributed/DistributedBlockOutputStream.cpp:135 8 0x000000000d73b376 in DB::PushingToViewsBlockOutputStream::write (this=this@entry=0x7f9ea7a8cf58, block=...) at ../src/DataStreams/PushingToViewsBlockOutputStream.cpp:157 9 0x000000000d7853eb in DB::AddingDefaultBlockOutputStream::write (this=0x7f9ed383d118, block=...) at ../contrib/libcxx/include/memory:3826 10 0x000000000d740790 in DB::SquashingBlockOutputStream::write (this=0x7f9ed383de18, block=...) at ../contrib/libcxx/include/memory:3826 11 0x000000000d68c308 in DB::CountingBlockOutputStream::write (this=0x7f9ea7ac6d60, block=...) at ../contrib/libcxx/include/memory:3826 12 0x000000000ddab449 in DB::StorageBuffer::writeBlockToDestination (this=this@entry=0x7f9fbd56a000, block=..., table=...) at ../src/Storages/StorageBuffer.cpp:747 13 0x000000000ddabfa6 in DB::StorageBuffer::flushBuffer (this=this@entry=0x7f9fbd56a000, buffer=..., check_thresholds=check_thresholds@entry=true, locked=locked@entry=false, reset_block_structure=reset_block_structure@entry=false) at ../src/Storages/StorageBuffer.cpp:661 14 0x000000000ddac415 in DB::StorageBuffer::flushAllBuffers (reset_blocks_structure=false, check_thresholds=true, this=0x7f9fbd56a000) at ../src/Storages/StorageBuffer.cpp:605 shutdown() on T1275: 0 DB::StorageDistributed::shutdown (this=0x7f9ed34f0000) at ../contrib/libcxx/include/atomic:1612 1 0x000000000d6fd938 in DB::InterpreterDropQuery::executeToTable (this=this@entry=0x7f98530c79a0, query=...) at ../src/Storages/TableLockHolder.h:12 2 0x000000000d6ff5ee in DB::InterpreterDropQuery::execute (this=0x7f98530c79a0) at ../src/Interpreters/InterpreterDropQuery.cpp:50 3 0x000000000daa40c0 in DB::executeQueryImpl (begin=, end=, context=..., internal=, stage=DB::QueryProcessingStage::Complete, has_query_tail=false, istr=0x0) at ../src/Interpreters/executeQuery.cpp:420 4 0x000000000daa59df in DB::executeQuery (query=..., context=..., internal=internal@entry=false, stage=, may_have_embedded_data=) at ../contrib/libcxx/include/string:1487 5 0x000000000e1369e6 in DB::TCPHandler::runImpl (this=this@entry=0x7f9ddf3a9000) at ../src/Server/TCPHandler.cpp:254 6 0x000000000e1379c9 in DB::TCPHandler::run (this=0x7f9ddf3a9000) at ../src/Server/TCPHandler.cpp:1326 7 0x000000001086fac7 in Poco::Net::TCPServerConnection::start (this=this@entry=0x7f9ddf3a9000) at ../contrib/poco/Net/src/TCPServerConnection.cpp:43 8 0x000000001086ff2b in Poco::Net::TCPServerDispatcher::run (this=0x7f9e4eba5c00) at ../contrib/poco/Net/src/TCPServerDispatcher.cpp:114 9 0x00000000109dbe8e in Poco::PooledThread::run (this=0x7f9e4a2d2f80) at ../contrib/poco/Foundation/src/ThreadPool.cpp:199 10 0x00000000109d78f9 in Poco::ThreadImpl::runnableEntry (pThread=) at ../contrib/poco/Foundation/include/Poco/SharedPtr.h:401 11 0x00007f9fc3cccea7 in start_thread (arg=) at pthread_create.c:477 12 0x00007f9fc3bebeaf in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95 ```
--- src/Storages/StorageDistributed.cpp | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 9046940b3f7..0c1561fca9b 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -80,7 +80,6 @@ namespace ErrorCodes extern const int TYPE_MISMATCH; extern const int TOO_MANY_ROWS; extern const int UNABLE_TO_SKIP_UNUSED_SHARDS; - extern const int LOGICAL_ERROR; } namespace ActionLocks @@ -600,15 +599,22 @@ void StorageDistributed::shutdown() monitors_blocker.cancelForever(); std::lock_guard lock(cluster_nodes_mutex); + + LOG_DEBUG(log, "Joining background threads for async INSERT"); cluster_nodes_data.clear(); + LOG_DEBUG(log, "Background threads for async INSERT joined"); } void StorageDistributed::drop() { - // shutdown() should be already called - // and by the same reason we cannot use truncate() here, since - // cluster_nodes_data already cleaned - if (!cluster_nodes_data.empty()) - throw Exception("drop called before shutdown", ErrorCodes::LOGICAL_ERROR); + // Some INSERT in-between shutdown() and drop() can call + // requireDirectoryMonitor() again, so call shutdown() to clear them, but + // when the drop() (this function) executed none of INSERT is allowed in + // parallel. + // + // And second time shutdown() should be fast, since none of + // DirectoryMonitor should do anything, because ActionBlocker is canceled + // (in shutdown()). + shutdown(); // Distributed table w/o sharding_key does not allows INSERTs if (relative_data_path.empty()) From 2352c144e84791405de28b8f903114445140bbd7 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Oct 2020 22:28:40 +0300 Subject: [PATCH 341/432] Update reference --- .../queries/0_stateless/00273_quantiles.reference | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/00273_quantiles.reference b/tests/queries/0_stateless/00273_quantiles.reference index d75d11191f4..616e06841e4 100644 --- a/tests/queries/0_stateless/00273_quantiles.reference +++ b/tests/queries/0_stateless/00273_quantiles.reference @@ -6,17 +6,17 @@ [0,1,10,50,100,200,300,400,500,600,700,800,900,950,990,999,1000] [0,0.50100005,9.51,49.55,99.6,199.7,299.8,399.9,500,600.1,700.2,800.3,900.4,950.45,990.49,999.499,1000] [0,1,10,50,100,200,300,400,500,600,700,800,900,950,990,999,1000] -1 333334 [699140.3,835642,967430.8] [699999,833333,966666] +1 333334 [699144.2,835663,967429.2] [699999,833333,966666] 2 266667 [426549.5,536255.5,638957.6] [426665,533332,639999] -3 114285 [296938.5,342335,388777.5] [297142,342856,388570] +3 114285 [296938,342324,388778] [297142,342856,388570] 4 63492 [228370.2,254019.5,279351.4] [228571,253968,279364] 5 40404 [185603.4,202009,218107] [185858,202020,218181] -6 27972 [156598.6,167864,179118.40000000002] [156643,167832,179020] -7 20513 [135401,143553.5,151792.5] [135384,143589,151794] +6 27972 [156598.7,167866,179118.3] [156643,167832,179020] +7 20513 [135400.8,143550,151792.6] [135384,143589,151794] 8 15686 [119239.20000000001,125463,131772.40000000002] [119215,125490,131764] -9 12384 [106509.79999999999,111538,116415.8] [106501,111455,116408] -10 10025 [96223.6,100347,104288.6] [96240,100250,104260] -11 8282 [87732.70000000001,91035,94408.6] [87784,91097,94409] +9 12384 [106510.20000000001,111539,116415.7] [106501,111455,116408] +10 10025 [96223.2,100346,104288.7] [96240,100250,104260] +11 8282 [87732.8,91036,94410.20000000001] [87784,91097,94409] 12 6957 [80694.6,83477,86259.4] [80694,83477,86260] 13 5925 [74666.40000000001,77036,79405.6] [74666,77036,79406] 14 5109 [69475.8,71519,73562.2] [69475,71519,73563] From e1b8d0da7ba72a1f5bb2ef8ad528c8701c4824fd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 27 Oct 2020 22:41:36 +0300 Subject: [PATCH 342/432] Trailing whitespace --- utils/simple-backport/changelog.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/simple-backport/changelog.sh b/utils/simple-backport/changelog.sh index 92957987cab..d3d9714cb04 100755 --- a/utils/simple-backport/changelog.sh +++ b/utils/simple-backport/changelog.sh @@ -66,7 +66,7 @@ do # Filter out PRs by bots. user_login=$(jq -r .user.login "$file") - + filter_bot=$(echo "$user_login" | grep -q "\[bot\]$" && echo "Skip." || echo "Ok." ||:) filter_robot=$(echo "$user_login" | grep -q "robot-clickhouse" && echo "Skip." || echo "Ok." ||:) From dba8c85ae4f8ebc0371eef8965e6e87c05cd284d Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 27 Oct 2020 22:56:54 +0300 Subject: [PATCH 343/432] do not wait while holding lock --- src/Databases/DatabaseLazy.cpp | 6 -- src/Databases/DatabaseLazy.h | 1 - src/Databases/IDatabase.h | 4 - .../MySQL/DatabaseMaterializeTablesIterator.h | 5 - src/Interpreters/InterpreterDropQuery.cpp | 100 +++++++++++++----- src/Interpreters/InterpreterDropQuery.h | 6 +- 6 files changed, 77 insertions(+), 45 deletions(-) diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 81414902a33..0119f17f843 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -329,10 +329,4 @@ const StoragePtr & DatabaseLazyIterator::table() const return current_storage; } -void DatabaseLazyIterator::reset() -{ - if (current_storage) - current_storage.reset(); -} - } diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 58e5e465eef..13c14863efb 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -122,7 +122,6 @@ public: bool isValid() const override; const String & name() const override; const StoragePtr & table() const override; - void reset() override; private: const DatabaseLazy & database; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index 9b744259406..b28bd5fd599 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -44,8 +44,6 @@ public: /// (a database with support for lazy tables loading /// - it maintains a list of tables but tables are loaded lazily). virtual const StoragePtr & table() const = 0; - /// Reset reference counter to the StoragePtr. - virtual void reset() = 0; virtual ~IDatabaseTablesIterator() = default; @@ -95,8 +93,6 @@ public: const String & name() const override { return it->first; } const StoragePtr & table() const override { return it->second; } - - void reset() override { it->second.reset(); } }; /// Copies list of dictionaries and iterates through such snapshot. diff --git a/src/Databases/MySQL/DatabaseMaterializeTablesIterator.h b/src/Databases/MySQL/DatabaseMaterializeTablesIterator.h index 5a0ec242c2f..86a5cbf8206 100644 --- a/src/Databases/MySQL/DatabaseMaterializeTablesIterator.h +++ b/src/Databases/MySQL/DatabaseMaterializeTablesIterator.h @@ -28,11 +28,6 @@ public: return tables.emplace_back(storage); } - void reset() override - { - tables.clear(); - } - UUID uuid() const override { return nested_iterator->uuid(); } DatabaseMaterializeTablesIterator(DatabaseTablesIteratorPtr nested_iterator_, DatabaseMaterializeMySQL * database_) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index c4ebe596649..8bf8675b15d 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -52,13 +52,37 @@ BlockIO InterpreterDropQuery::execute() return executeToDictionary(drop.database, drop.table, drop.kind, drop.if_exists, drop.temporary, drop.no_ddl_lock); } else if (!drop.database.empty()) - return executeToDatabase(drop.database, drop.kind, drop.if_exists, drop.no_delay); + return executeToDatabase(drop); else throw Exception("Nothing to drop, both names are empty", ErrorCodes::LOGICAL_ERROR); } +void InterpreterDropQuery::waitForTableToBeActuallyDroppedOrDetached(const ASTDropQuery & query, const DatabasePtr & db, const UUID & uuid_to_wait) +{ + if (uuid_to_wait == UUIDHelpers::Nil) + return; + + if (query.kind == ASTDropQuery::Kind::Drop) + DatabaseCatalog::instance().waitTableFinallyDropped(uuid_to_wait); + else if (query.kind == ASTDropQuery::Kind::Detach) + { + if (auto * atomic = typeid_cast(db.get())) + atomic->waitDetachedTableNotInUse(uuid_to_wait); + } +} + BlockIO InterpreterDropQuery::executeToTable(const ASTDropQuery & query) +{ + DatabasePtr database; + UUID table_to_wait_on = UUIDHelpers::Nil; + auto res = executeToTableImpl(query, database, table_to_wait_on); + if (query.no_delay) + waitForTableToBeActuallyDroppedOrDetached(query, database, table_to_wait_on); + return res; +} + +BlockIO InterpreterDropQuery::executeToTableImpl(const ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait) { /// NOTE: it does not contain UUID, we will resolve it with locked DDLGuard auto table_id = StorageID(query); @@ -125,19 +149,9 @@ BlockIO InterpreterDropQuery::executeToTable(const ASTDropQuery & query) database->dropTable(context, table_id.table_name, query.no_delay); } - } - table.reset(); - ddl_guard = {}; - if (query.no_delay) - { - if (query.kind == ASTDropQuery::Kind::Drop) - DatabaseCatalog::instance().waitTableFinallyDropped(table_id.uuid); - else if (query.kind == ASTDropQuery::Kind::Detach) - { - if (auto * atomic = typeid_cast(database.get())) - atomic->waitDetachedTableNotInUse(table_id.uuid); - } + db = database; + uuid_to_wait = table_id.uuid; } return {}; @@ -223,19 +237,48 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name, } -BlockIO InterpreterDropQuery::executeToDatabase(const String & database_name, ASTDropQuery::Kind kind, bool if_exists, bool no_delay) +BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) { + DatabasePtr database; + std::vector tables_to_wait; + BlockIO res; + try + { + res = executeToDatabaseImpl(query, database, tables_to_wait); + } + catch (...) + { + if (query.no_delay) + { + for (const auto table_uuid : tables_to_wait) + waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); + } + throw; + } + + if (query.no_delay) + { + for (const auto table_uuid : tables_to_wait) + waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); + } + return res; +} + +BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector & uuids_to_wait) +{ + const auto & database_name = query.database; auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); - if (auto database = tryGetDatabase(database_name, if_exists)) + database = tryGetDatabase(database_name, query.if_exists); + if (database) { - if (kind == ASTDropQuery::Kind::Truncate) + if (query.kind == ASTDropQuery::Kind::Truncate) { throw Exception("Unable to truncate database", ErrorCodes::SYNTAX_ERROR); } - else if (kind == ASTDropQuery::Kind::Detach || kind == ASTDropQuery::Kind::Drop) + else if (query.kind == ASTDropQuery::Kind::Detach || query.kind == ASTDropQuery::Kind::Drop) { - bool drop = kind == ASTDropQuery::Kind::Drop; + bool drop = query.kind == ASTDropQuery::Kind::Drop; context.checkAccess(AccessType::DROP_DATABASE, database_name); if (database->shouldBeEmptyOnDetach()) @@ -246,21 +289,22 @@ BlockIO InterpreterDropQuery::executeToDatabase(const String & database_name, AS for (auto iterator = database->getDictionariesIterator(); iterator->isValid(); iterator->next()) { String current_dictionary = iterator->name(); - executeToDictionary(database_name, current_dictionary, kind, false, false, false); + executeToDictionary(database_name, current_dictionary, query.kind, false, false, false); } - ASTDropQuery query; - query.kind = kind; - query.if_exists = true; - query.database = database_name; - query.no_delay = no_delay; + ASTDropQuery query_for_table; + query_for_table.kind = query.kind; + query_for_table.if_exists = true; + query_for_table.database = database_name; + query_for_table.no_delay = query.no_delay; for (auto iterator = database->getTablesIterator(context); iterator->isValid(); iterator->next()) { - /// Reset reference counter of the StoragePtr to allow synchronous drop. - iterator->reset(); - query.table = iterator->name(); - executeToTable(query); + DatabasePtr db; + UUID table_to_wait = UUIDHelpers::Nil; + query_for_table.table = iterator->name(); + executeToTableImpl(query_for_table, db, table_to_wait); + uuids_to_wait.push_back(table_to_wait); } } diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index 0e1fd47b079..1fc4acffa04 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -29,9 +29,13 @@ private: ASTPtr query_ptr; Context & context; - BlockIO executeToDatabase(const String & database_name, ASTDropQuery::Kind kind, bool if_exists, bool no_delay); + BlockIO executeToDatabase(const ASTDropQuery & query); + BlockIO executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & db, std::vector & uuids_to_wait); BlockIO executeToTable(const ASTDropQuery & query); + BlockIO executeToTableImpl(const ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait); + + static void waitForTableToBeActuallyDroppedOrDetached(const ASTDropQuery & query, const DatabasePtr & db, const UUID & uuid_to_wait); BlockIO executeToDictionary(const String & database_name, const String & dictionary_name, ASTDropQuery::Kind kind, bool if_exists, bool is_temporary, bool no_ddl_lock); From 4b59882c30674cb1e7a39523db83970fa908840e Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 27 Oct 2020 20:28:52 +0000 Subject: [PATCH 344/432] Remove redundant, add virt col --- .../table-engines/integrations/rabbitmq.md | 3 ++- .../table-engines/integrations/rabbitmq.md | 3 ++- .../RabbitMQ/RabbitMQBlockInputStream.cpp | 4 +++- .../ReadBufferFromRabbitMQConsumer.cpp | 18 ++++-------------- .../RabbitMQ/ReadBufferFromRabbitMQConsumer.h | 12 ++---------- src/Storages/RabbitMQ/StorageRabbitMQ.cpp | 8 ++++---- 6 files changed, 17 insertions(+), 31 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/rabbitmq.md b/docs/en/engines/table-engines/integrations/rabbitmq.md index 0340603eaae..b0901ee6f6e 100644 --- a/docs/en/engines/table-engines/integrations/rabbitmq.md +++ b/docs/en/engines/table-engines/integrations/rabbitmq.md @@ -148,4 +148,5 @@ Example: - `_channel_id` - ChannelID, on which consumer, who received the message, was declared. - `_delivery_tag` - DeliveryTag of the received message. Scoped per channel. - `_redelivered` - `redelivered` flag of the message. -- `_message_id` - MessageID of the received message; non-empty if was set, when message was published. +- `_message_id` - messageID of the received message; non-empty if was set, when message was published. +- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published. diff --git a/docs/ru/engines/table-engines/integrations/rabbitmq.md b/docs/ru/engines/table-engines/integrations/rabbitmq.md index 75f20827df2..dedb5842d68 100644 --- a/docs/ru/engines/table-engines/integrations/rabbitmq.md +++ b/docs/ru/engines/table-engines/integrations/rabbitmq.md @@ -140,4 +140,5 @@ Example: - `_channel_id` - идентификатор канала `ChannelID`, на котором было получено сообщение. - `_delivery_tag` - значение `DeliveryTag` полученного сообщения. Уникально в рамках одного канала. - `_redelivered` - флаг `redelivered`. (Не равно нулю, если есть возможность, что сообщение было получено более, чем одним каналом.) -- `_message_id` - значение `MessageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. +- `_message_id` - значение поля `messageID` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. +- `_timestamp` - значение поля `timestamp` полученного сообщения. Данное поле непусто, если указано в параметрах при отправке сообщения. diff --git a/src/Storages/RabbitMQ/RabbitMQBlockInputStream.cpp b/src/Storages/RabbitMQ/RabbitMQBlockInputStream.cpp index c74081d8802..830c6224b9e 100644 --- a/src/Storages/RabbitMQ/RabbitMQBlockInputStream.cpp +++ b/src/Storages/RabbitMQ/RabbitMQBlockInputStream.cpp @@ -27,7 +27,7 @@ RabbitMQBlockInputStream::RabbitMQBlockInputStream( , non_virtual_header(metadata_snapshot->getSampleBlockNonMaterialized()) , sample_block(non_virtual_header) , virtual_header(metadata_snapshot->getSampleBlockForColumns( - {"_exchange_name", "_channel_id", "_delivery_tag", "_redelivered", "_message_id"}, + {"_exchange_name", "_channel_id", "_delivery_tag", "_redelivered", "_message_id", "_timestamp"}, storage.getVirtuals(), storage.getStorageID())) { for (const auto & column : virtual_header) @@ -158,6 +158,7 @@ Block RabbitMQBlockInputStream::readImpl() auto delivery_tag = buffer->getDeliveryTag(); auto redelivered = buffer->getRedelivered(); auto message_id = buffer->getMessageID(); + auto timestamp = buffer->getTimestamp(); buffer->updateAckTracker({delivery_tag, channel_id}); @@ -168,6 +169,7 @@ Block RabbitMQBlockInputStream::readImpl() virtual_columns[2]->insert(delivery_tag); virtual_columns[3]->insert(redelivered); virtual_columns[4]->insert(message_id); + virtual_columns[5]->insert(timestamp); } total_rows = total_rows + new_rows; diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp index 8c7b435cd2a..661e645cc0c 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.cpp @@ -17,32 +17,23 @@ namespace DB ReadBufferFromRabbitMQConsumer::ReadBufferFromRabbitMQConsumer( ChannelPtr consumer_channel_, HandlerPtr event_handler_, - const String & exchange_name_, std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - const String & queue_base_, Poco::Logger * log_, char row_delimiter_, - size_t num_queues_, - const String & deadletter_exchange_, uint32_t queue_size_, const std::atomic & stopped_) : ReadBuffer(nullptr, 0) , consumer_channel(std::move(consumer_channel_)) , event_handler(event_handler_) - , exchange_name(exchange_name_) , queues(queues_) , channel_base(channel_base_) , channel_id_base(channel_id_base_) - , queue_base(queue_base_) - , num_queues(num_queues_) - , deadletter_exchange(deadletter_exchange_) , log(log_) , row_delimiter(row_delimiter_) - , queue_size(queue_size_) , stopped(stopped_) - , received(queue_size * num_queues) + , received(queue_size_) { setupChannel(); } @@ -74,10 +65,9 @@ void ReadBufferFromRabbitMQConsumer::subscribe() if (row_delimiter != '\0') message_received += row_delimiter; - if (message.hasMessageID()) - received.push({message_received, message.messageID(), redelivered, AckTracker(delivery_tag, channel_id)}); - else - received.push({message_received, "", redelivered, AckTracker(delivery_tag, channel_id)}); + received.push({message_received, message.hasMessageID() ? message.messageID() : "", + message.hasTimestamp() ? message.timestamp() : 0, + redelivered, AckTracker(delivery_tag, channel_id)}); } }) .onError([&](const char * message) diff --git a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h index 556e069083c..476db3f5e94 100644 --- a/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h +++ b/src/Storages/RabbitMQ/ReadBufferFromRabbitMQConsumer.h @@ -25,15 +25,11 @@ public: ReadBufferFromRabbitMQConsumer( ChannelPtr consumer_channel_, HandlerPtr event_handler_, - const String & exchange_name_, std::vector & queues_, size_t channel_id_base_, const String & channel_base_, - const String & queue_base_, Poco::Logger * log_, char row_delimiter_, - size_t num_queues_, - const String & deadletter_exchange_, uint32_t queue_size_, const std::atomic & stopped_); @@ -52,6 +48,7 @@ public: { String message; String message_id; + uint64_t timestamp; bool redelivered; AckTracker track; }; @@ -74,6 +71,7 @@ public: auto getDeliveryTag() const { return current.track.delivery_tag; } auto getRedelivered() const { return current.redelivered; } auto getMessageID() const { return current.message_id; } + auto getTimestamp() const { return current.timestamp; } private: bool nextImpl() override; @@ -83,18 +81,12 @@ private: ChannelPtr consumer_channel; HandlerPtr event_handler; - - const String exchange_name; std::vector queues; const String channel_base; const size_t channel_id_base; - const String queue_base; - const size_t num_queues; - const String deadletter_exchange; Poco::Logger * log; char row_delimiter; bool allowed = true; - uint32_t queue_size; const std::atomic & stopped; String channel_id; diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index e49b18ea391..c32590ca0ba 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -689,9 +689,8 @@ ConsumerBufferPtr StorageRabbitMQ::createReadBuffer() ChannelPtr consumer_channel = std::make_shared(connection.get()); return std::make_shared( - consumer_channel, event_handler, consumer_exchange, queues, ++consumer_id, - unique_strbase, queue_base, log, row_delimiter, num_queues, - deadletter_exchange, queue_size, stream_cancelled); + consumer_channel, event_handler, queues, ++consumer_id, + unique_strbase, log, row_delimiter, queue_size, stream_cancelled); } @@ -983,7 +982,8 @@ NamesAndTypesList StorageRabbitMQ::getVirtuals() const {"_channel_id", std::make_shared()}, {"_delivery_tag", std::make_shared()}, {"_redelivered", std::make_shared()}, - {"_message_id", std::make_shared()} + {"_message_id", std::make_shared()}, + {"_timestamp", std::make_shared()} }; } From b5ccb5ed5b47b43c87b54bfbeb6ac723c6d751e1 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Tue, 27 Oct 2020 23:52:49 +0300 Subject: [PATCH 345/432] review suggestions --- src/Databases/DatabaseAtomic.cpp | 11 +++++++++-- src/Interpreters/DatabaseCatalog.cpp | 16 +++++++++++++--- src/Interpreters/DatabaseCatalog.h | 8 +++++++- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 6c1ca1e8fce..b50bdb8d894 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -261,22 +261,29 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora { DetachedTables not_in_use; auto table_data_path = getTableDataPath(query); + bool locked_uuid = false; try { std::unique_lock lock{mutex}; if (query.database != database_name) throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`", database_name, query.database); + /// Do some checks before renaming file from .tmp to .sql not_in_use = cleanupDetachedTables(); assertDetachedTableNotInUse(query.uuid); - DatabaseCatalog::instance().addUUIDMapping(query.uuid, {}, {}); - renameNoReplace(table_metadata_tmp_path, table_metadata_path); + /// We will get en exception if some table with the same UUID exists (even if it's detached table or table from another database) + DatabaseCatalog::instance().addUUIDMapping(query.uuid); + locked_uuid = true; + /// It throws if `table_metadata_path` already exists (it's possible if table was detached) + renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of) attachTableUnlocked(query.table, table, lock); /// Should never throw table_name_to_path.emplace(query.table, table_data_path); } catch (...) { Poco::File(table_metadata_tmp_path).remove(); + if (locked_uuid) + DatabaseCatalog::instance().removeUUIDMappingFinally(query.uuid); throw; } tryCreateSymlink(query.table, table_data_path); diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 90e1a32827f..906863f3f44 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -164,7 +164,12 @@ void DatabaseCatalog::shutdownImpl() std::lock_guard lock(databases_mutex); assert(std::find_if(uuid_map.begin(), uuid_map.end(), [](const auto & elem) { - const auto & not_empty_mapping = [] (const auto & mapping) { return mapping.second.second; }; + /// Ensure that all UUID mappings are emtpy (i.e. all mappings contain nullptr instead of a pointer to storage) + const auto & not_empty_mapping = [] (const auto & mapping) + { + auto & table = mapping.second.second; + return table; + }; auto it = std::find_if(elem.map.begin(), elem.map.end(), not_empty_mapping); return it != elem.map.end(); }) == uuid_map.end()); @@ -423,6 +428,11 @@ DatabasePtr DatabaseCatalog::getSystemDatabase() const return getDatabase(SYSTEM_DATABASE); } +void DatabaseCatalog::addUUIDMapping(const UUID & uuid) +{ + addUUIDMapping(uuid, nullptr, nullptr); +} + void DatabaseCatalog::addUUIDMapping(const UUID & uuid, const DatabasePtr & database, const StoragePtr & table) { assert(uuid != UUIDHelpers::Nil && getFirstLevelIdx(uuid) < uuid_map.size()); @@ -744,7 +754,7 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr LOG_WARNING(log, "Cannot parse metadata of partially dropped table {} from {}. Will remove metadata file and data directory. Garbage may be left in /store directory and ZooKeeper.", table_id.getNameForLogs(), dropped_metadata_path); } - addUUIDMapping(table_id.uuid, {}, {}); + addUUIDMapping(table_id.uuid); drop_time = Poco::File(dropped_metadata_path).getLastModified().epochTime(); } @@ -786,7 +796,7 @@ void DatabaseCatalog::dropTableDataTask() } else { - LOG_TRACE(log, "No tables to drop. Queue size: {}", tables_marked_dropped.size()); + LOG_TRACE(log, "Not found any suitable tables to drop, still have {} tables in drop queue", tables_marked_dropped.size()); } need_reschedule = !tables_marked_dropped.empty(); } diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 46646f2ff05..d26307a3bc3 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -167,13 +167,19 @@ public: /// If table has UUID, addUUIDMapping(...) must be called when table attached to some database /// removeUUIDMapping(...) must be called when it detached, /// and removeUUIDMappingFinally(...) must be called when table is dropped and its data removed from disk. - /// To "lock" some UUID and prevent collision, addUUIDMapping(...) may be called with nullptr arguments. /// Such tables can be accessed by persistent UUID instead of database and table name. void addUUIDMapping(const UUID & uuid, const DatabasePtr & database, const StoragePtr & table); void removeUUIDMapping(const UUID & uuid); void removeUUIDMappingFinally(const UUID & uuid); /// For moving table between databases void updateUUIDMapping(const UUID & uuid, DatabasePtr database, StoragePtr table); + /// This method adds empty mapping (with database and storage equal to nullptr). + /// It's required to "lock" some UUIDs and protect us from collision. + /// Collisions of random 122-bit integers are very unlikely to happen, + /// but we allow to explicitly specify UUID in CREATE query (in particular for testing). + /// If some UUID was already added and we are trying to add it again, + /// this method will throw an exception. + void addUUIDMapping(const UUID & uuid); static String getPathForUUID(const UUID & uuid); From 722e5fe73f947c6fca7f3e09f96060b677f2b670 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 28 Oct 2020 00:53:21 +0300 Subject: [PATCH 346/432] Rename check --- tests/ci/ci_config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/ci_config.json b/tests/ci/ci_config.json index 418b8ce0356..4e5f36a5f5d 100644 --- a/tests/ci/ci_config.json +++ b/tests/ci/ci_config.json @@ -333,7 +333,7 @@ "with_coverage": false } }, - "Functional stateless tests (release, polymorphic parts enabled)": { + "Functional stateless tests (release, wide parts enabled)": { "required_build_properties": { "compiler": "clang-11", "package_type": "deb", From ff6499cdc48ecac7882612f9fb0341a562c73b77 Mon Sep 17 00:00:00 2001 From: Denis Zhuravlev Date: Tue, 27 Oct 2020 19:02:56 -0300 Subject: [PATCH 347/432] test for bug_13492 --- ...533_distinct_depends_on_max_threads.reference | 2 ++ .../01533_distinct_depends_on_max_threads.sql | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/01533_distinct_depends_on_max_threads.reference create mode 100644 tests/queries/0_stateless/01533_distinct_depends_on_max_threads.sql diff --git a/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.reference b/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.sql b/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.sql new file mode 100644 index 00000000000..4f32576e86b --- /dev/null +++ b/tests/queries/0_stateless/01533_distinct_depends_on_max_threads.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS bug_13492; + +CREATE TABLE bug_13492 (`d` DateTime) ENGINE = MergeTree +PARTITION BY toYYYYMMDD(d) ORDER BY tuple(); + +INSERT INTO bug_13492 SELECT addDays(now(), number) FROM numbers(100); + +SET max_threads = 5; + +SELECT DISTINCT 1 FROM bug_13492, numbers(1) n; + +SET max_threads = 2; + +SELECT DISTINCT 1 FROM bug_13492, numbers(1) n; + +DROP TABLE bug_13492; From 2953795330f40c5e0ef0b52078461526526a9183 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 27 Oct 2020 18:45:04 -0400 Subject: [PATCH 348/432] Fixing bug in LDAP add_config method where log size was grabbed before container was stopped. Updating ldap/external_user_directory/tests/authentications.py to reduce test time. --- tests/testflows/helpers/cluster.py | 28 ++ .../ldap/authentication/regression.py | 7 +- .../ldap/authentication/tests/common.py | 18 +- .../external_user_directory/regression.py | 7 +- .../tests/authentications.py | 372 ++++++++---------- 5 files changed, 218 insertions(+), 214 deletions(-) diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index 27e06a7c17f..d173547a916 100755 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -54,6 +54,34 @@ class ClickHouseNode(Node): continue assert False, "container is not healthy" + def stop(self, timeout=300, safe=True): + """Stop node. + """ + if safe: + self.query("SYSTEM STOP MOVES") + self.query("SYSTEM STOP MERGES") + self.query("SYSTEM FLUSH LOGS") + with By("waiting for 5 sec for moves and merges to stop"): + time.sleep(5) + with And("forcing to sync everything to disk"): + self.command("sync", timeout=30) + + with self.cluster.lock: + for key in list(self.cluster._bash.keys()): + if key.endswith(f"-{self.name}"): + shell = self.cluster._bash.pop(key) + shell.__exit__(None, None, None) + + self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout) + + def start(self, timeout=300, wait_healthy=True): + """Start node. + """ + self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout) + + if wait_healthy: + self.wait_healthy(timeout) + def restart(self, timeout=300, safe=True, wait_healthy=True): """Restart node. """ diff --git a/tests/testflows/ldap/authentication/regression.py b/tests/testflows/ldap/authentication/regression.py index 9d0a5ca743f..ed75ce4fe75 100755 --- a/tests/testflows/ldap/authentication/regression.py +++ b/tests/testflows/ldap/authentication/regression.py @@ -33,7 +33,7 @@ xfails = { RQ_SRS_007_LDAP_Authentication("1.0") ) @XFails(xfails) -def regression(self, local, clickhouse_binary_path): +def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): """ClickHouse integration with LDAP regression module. """ nodes = { @@ -43,6 +43,11 @@ def regression(self, local, clickhouse_binary_path): with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: self.context.cluster = cluster + if stress is not None or not hasattr(self.context, "stress"): + self.context.stress = stress + if parallel is not None or not hasattr(self.context, "parallel"): + self.context.parallel = parallel + Scenario(run=load("ldap.authentication.tests.sanity", "scenario")) Scenario(run=load("ldap.authentication.tests.multiple_servers", "scenario")) Feature(run=load("ldap.authentication.tests.connections", "feature")) diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index 0fd9670fae0..ed8d46df92b 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -85,6 +85,8 @@ def add_config(config, timeout=60, restart=False): :param config: configuration file description :param timeout: timeout, default: 20 sec """ + node = current().context.node + def check_preprocessed_config_is_updated(after_removal=False): """Check that preprocessed config is updated. """ @@ -116,13 +118,16 @@ def add_config(config, timeout=60, restart=False): with When("I close terminal to the node to be restarted"): bash.close() - with And("I get the current log size"): - logsize = \ - node.command("stat --format=%s /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ - 0].strip() + with And("I stop ClickHouse to apply the config changes"): + node.stop(safe=False) - with And("I restart ClickHouse to apply the config changes"): - node.restart(safe=False) + with And("I get the current log size"): + cmd = node.cluster.command(None, + f"stat --format=%s {os.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log") + logsize = cmd.output.split(" ")[0].strip() + + with And("I start ClickHouse back up"): + node.start() with Then("I tail the log file from using previous log size as the offset"): bash.prompt = bash.__class__.prompt @@ -139,7 +144,6 @@ def add_config(config, timeout=60, restart=False): f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", timeout=timeout) - node = current().context.node try: with Given(f"{config.name}"): if settings.debug: diff --git a/tests/testflows/ldap/external_user_directory/regression.py b/tests/testflows/ldap/external_user_directory/regression.py index 6ce860a6fd2..bd404d54438 100755 --- a/tests/testflows/ldap/external_user_directory/regression.py +++ b/tests/testflows/ldap/external_user_directory/regression.py @@ -33,7 +33,7 @@ xfails = { RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0") ) @XFails(xfails) -def regression(self, local, clickhouse_binary_path): +def regression(self, local, clickhouse_binary_path, stress=None, parallel=None): """ClickHouse LDAP external user directory regression module. """ nodes = { @@ -42,6 +42,11 @@ def regression(self, local, clickhouse_binary_path): with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: self.context.cluster = cluster + + if stress is not None or not hasattr(self.context, "stress"): + self.context.stress = stress + if parallel is not None or not hasattr(self.context, "parallel"): + self.context.parallel = parallel Scenario(run=load("ldap.authentication.tests.sanity", "scenario")) Scenario(run=load("ldap.external_user_directory.tests.simple", "scenario")) diff --git a/tests/testflows/ldap/external_user_directory/tests/authentications.py b/tests/testflows/ldap/external_user_directory/tests/authentications.py index 9b216e7dd30..47c10121b68 100644 --- a/tests/testflows/ldap/external_user_directory/tests/authentications.py +++ b/tests/testflows/ldap/external_user_directory/tests/authentications.py @@ -92,25 +92,23 @@ def parallel_login(self, server, user_count=10, timeout=200): with Given("a group of LDAP users"): users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*users): - tasks = [] - try: - with When("users try to login in parallel", description=""" - * with valid username and password - * with invalid username and valid password - * with valid username and invalid password - """): - p = Pool(15) - for i in range(25): - tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) + with ldap_users(*users): + tasks = [] + try: + with When("users try to login in parallel", description=""" + * with valid username and password + * with invalid username and valid password + * with valid username and invalid password + """): + p = Pool(15) + for i in range(25): + tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) - finally: - with Then("it should work"): - join(tasks, timeout) + finally: + with Then("it should work"): + join(tasks, timeout) @TestScenario @Requirements( @@ -127,25 +125,23 @@ def parallel_login_with_the_same_user(self, server, timeout=200): with Given("only one LDAP user"): users = [{"cn": f"parallel_user1", "userpassword": randomword(20)}] - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*users): - tasks = [] - try: - with When("the same user tries to login in parallel", description=""" - * with valid username and password - * with invalid username and valid password - * with valid username and invalid password - """): - p = Pool(15) - for i in range(25): - tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) + with ldap_users(*users): + tasks = [] + try: + with When("the same user tries to login in parallel", description=""" + * with valid username and password + * with invalid username and valid password + * with valid username and invalid password + """): + p = Pool(15) + for i in range(25): + tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) - finally: - with Then("it should work"): - join(tasks, timeout) + finally: + with Then("it should work"): + join(tasks, timeout) @TestScenario def login_after_ldap_external_user_directory_is_removed(self, server): @@ -162,6 +158,7 @@ def login_after_ldap_external_user_directory_is_removed(self, server): login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) @TestScenario +@Tags("custom config") @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_SameUser("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") @@ -204,6 +201,7 @@ def parallel_login_with_the_same_user_multiple_servers(self, server, timeout=200 join(tasks, timeout) @TestScenario +@Tags("custom config") @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_MultipleServers("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") @@ -256,6 +254,7 @@ def parallel_login_with_multiple_servers(self, server, user_count=10, timeout=20 join(tasks, timeout) @TestScenario +@Tags("custom config") @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalAndMultipleLDAP("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") @@ -323,20 +322,18 @@ def parallel_login_with_rbac_users(self, server, user_count=10, timeout=200): users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with rbac_users(*users): - tasks = [] - try: - with When("I login in parallel"): - p = Pool(15) - for i in range(25): - tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) - finally: - with Then("it should work"): - join(tasks, timeout) + with rbac_users(*users): + tasks = [] + try: + with When("I login in parallel"): + p = Pool(15) + for i in range(25): + tasks.append(p.apply_async(login_with_valid_username_and_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,))) + tasks.append(p.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,))) + finally: + with Then("it should work"): + join(tasks, timeout) @TestScenario @Requirements( @@ -347,10 +344,8 @@ def login_after_user_is_added_to_ldap(self, server): """ user = {"cn": "myuser", "userpassword": "myuser"} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with When(f"I add user to LDAP and try to login"): - add_user_to_ldap_and_login(user=user, server=server) + with When(f"I add user to LDAP and try to login"): + add_user_to_ldap_and_login(user=user, server=server) @TestScenario @Requirements( @@ -363,27 +358,25 @@ def login_after_user_is_deleted_from_ldap(self, server): self.context.ldap_node = self.context.cluster.node(server) user = None - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - try: - with Given(f"I add user to LDAP"): - user = {"cn": "myuser", "userpassword": "myuser"} - user = add_user_to_ldap(**user) + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query(username=user["cn"], password=user["userpassword"]) - with When("I delete this user from LDAP"): - delete_user_from_ldap(user) + with When("I delete this user from LDAP"): + delete_user_from_ldap(user) - with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - ) - finally: - with Finally("I make sure LDAP user is deleted"): - if user is not None: - delete_user_from_ldap(user, exitcode=None) + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) @TestScenario @Requirements( @@ -396,31 +389,29 @@ def login_after_user_password_changed_in_ldap(self, server): self.context.ldap_node = self.context.cluster.node(server) user = None - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - try: - with Given(f"I add user to LDAP"): - user = {"cn": "myuser", "userpassword": "myuser"} - user = add_user_to_ldap(**user) + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query(username=user["cn"], password=user["userpassword"]) - with When("I change user password in LDAP"): - change_user_password_in_ldap(user, "newpassword") + with When("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") - with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - ) + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) - with And("when I try to login with the new password it should work"): - login_and_execute_query(username=user["cn"], password="newpassword") + with And("when I try to login with the new password it should work"): + login_and_execute_query(username=user["cn"], password="newpassword") - finally: - with Finally("I make sure LDAP user is deleted"): - if user is not None: - delete_user_from_ldap(user, exitcode=None) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) @TestScenario @Requirements( @@ -434,27 +425,25 @@ def login_after_user_cn_changed_in_ldap(self, server): user = None new_user = None - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - try: - with Given(f"I add user to LDAP"): - user = {"cn": "myuser", "userpassword": "myuser"} - user = add_user_to_ldap(**user) + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": "myuser"} + user = add_user_to_ldap(**user) - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query(username=user["cn"], password=user["userpassword"]) - with When("I change user password in LDAP"): - new_user = change_user_cn_in_ldap(user, "myuser2") + with When("I change user password in LDAP"): + new_user = change_user_cn_in_ldap(user, "myuser2") - with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - ) - finally: - with Finally("I make sure LDAP user is deleted"): - if new_user is not None: - delete_user_from_ldap(new_user, exitcode=None) + with Then("when I try to login again it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=4, + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + ) + finally: + with Finally("I make sure LDAP user is deleted"): + if new_user is not None: + delete_user_from_ldap(new_user, exitcode=None) @TestScenario @Requirements( @@ -467,31 +456,29 @@ def login_after_ldap_server_is_restarted(self, server, timeout=60): self.context.ldap_node = self.context.cluster.node(server) user = None - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - try: - with Given(f"I add user to LDAP"): - user = {"cn": "myuser", "userpassword": getuid()} - user = add_user_to_ldap(**user) + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": getuid()} + user = add_user_to_ldap(**user) - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query(username=user["cn"], password=user["userpassword"]) - with When("I restart LDAP server"): - self.context.ldap_node.restart() + with When("I restart LDAP server"): + self.context.ldap_node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): - started = time.time() - while True: - r = self.context.node.query("SELECT 1", - settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) - if r.exitcode == 0: - break - assert time.time() - started < timeout, error(r.output) - finally: - with Finally("I make sure LDAP user is deleted"): - if user is not None: - delete_user_from_ldap(user, exitcode=None) + with Then("I try to login until it works", description=f"timeout {timeout} sec"): + started = time.time() + while True: + r = self.context.node.query("SELECT 1", + settings=[("user", user["cn"]), ("password", user["userpassword"])], + no_checks=True) + if r.exitcode == 0: + break + assert time.time() - started < timeout, error(r.output) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) @TestScenario @Requirements( @@ -504,31 +491,29 @@ def login_after_clickhouse_server_is_restarted(self, server, timeout=60): self.context.ldap_node = self.context.cluster.node(server) user = None - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - try: - with Given(f"I add user to LDAP"): - user = {"cn": "myuser", "userpassword": getuid()} - user = add_user_to_ldap(**user) + try: + with Given(f"I add user to LDAP"): + user = {"cn": "myuser", "userpassword": getuid()} + user = add_user_to_ldap(**user) - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query(username=user["cn"], password=user["userpassword"]) - with When("I restart ClickHouse server"): - self.context.node.restart() + with When("I restart ClickHouse server"): + self.context.node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): - started = time.time() - while True: - r = self.context.node.query("SELECT 1", - settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) - if r.exitcode == 0: - break - assert time.time() - started < timeout, error(r.output) - finally: - with Finally("I make sure LDAP user is deleted"): - if user is not None: - delete_user_from_ldap(user, exitcode=None) + with Then("I try to login until it works", description=f"timeout {timeout} sec"): + started = time.time() + while True: + r = self.context.node.query("SELECT 1", + settings=[("user", user["cn"]), ("password", user["userpassword"])], + no_checks=True) + if r.exitcode == 0: + break + assert time.time() - started < timeout, error(r.output) + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) @TestScenario @Requirements( @@ -542,9 +527,7 @@ def valid_username_with_valid_empty_password(self, server): exitcode = 4 message = f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -561,9 +544,7 @@ def valid_username_and_invalid_empty_password(self, server): exitcode = 4 message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -575,10 +556,8 @@ def valid_username_and_password(self, server): username = "valid_username_and_password" user = {"cn": username, "userpassword": username} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with When(f"I add user {username} to LDAP and try to login"): - add_user_to_ldap_and_login(user=user, server=server) + with When(f"I add user {username} to LDAP and try to login"): + add_user_to_ldap_and_login(user=user, server=server) @TestScenario @Requirements( @@ -593,9 +572,7 @@ def valid_username_and_password_invalid_server(self, server=None): exitcode = 4 message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) + login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) @TestScenario @Requirements( @@ -608,9 +585,7 @@ def valid_long_username_and_short_password(self, server): username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" user = {"cn": username, "userpassword": "long_username"} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, server=server) + add_user_to_ldap_and_login(user=user, server=server) @TestScenario @Requirements( @@ -626,9 +601,7 @@ def invalid_long_username_and_valid_short_password(self, server): exitcode = 4 message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -641,9 +614,7 @@ def valid_short_username_and_long_password(self, server): username = "long_password" user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, server=server) + add_user_to_ldap_and_login(user=user, server=server) @TestScenario @Requirements( @@ -659,9 +630,7 @@ def valid_short_username_and_invalid_long_password(self, server): exitcode = 4 message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -677,9 +646,7 @@ def valid_username_and_invalid_password(self, server): exitcode = 4 message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -695,9 +662,7 @@ def invalid_username_and_valid_password(self, server): exitcode = 4 message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( @@ -710,9 +675,7 @@ def valid_utf8_username_and_ascii_password(self, server): username = "utf8_username_Gãńdåłf_Thê_Gręât" user = {"cn": username, "userpassword": "utf8_username"} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, server=server) + add_user_to_ldap_and_login(user=user, server=server) @TestScenario @Requirements( @@ -725,18 +688,14 @@ def valid_ascii_username_and_utf8_password(self, server): username = "utf8_password" user = {"cn": username, "userpassword": "utf8_password_Gãńdåłf_Thê_Gręât"} - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - add_user_to_ldap_and_login(user=user, server=server) + add_user_to_ldap_and_login(user=user, server=server) @TestScenario def empty_username_and_empty_password(self, server=None): """Check that we can login using empty username and empty password as it will use the default user and that has an empty password. """ - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - login_and_execute_query(username="", password="") + login_and_execute_query(username="", password="") @TestScenario @Requirements( @@ -763,18 +722,16 @@ def user_lookup_priority(self, server): "ldap": {"username": "ldap", "password": "userldap"} } - with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users.values()]): - with rbac_users({"cn": "local", "userpassword": "local"}): - with When("I try to login as 'default' user which is also defined in users.xml it should fail"): - login_and_execute_query(**users["default"], exitcode=exitcode, message=message.format(username="default")) + with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users.values()]): + with rbac_users({"cn": "local", "userpassword": "local"}): + with When("I try to login as 'default' user which is also defined in users.xml it should fail"): + login_and_execute_query(**users["default"], exitcode=exitcode, message=message.format(username="default")) - with When("I try to login as 'local' user which is also defined in local storage it should fail"): - login_and_execute_query(**users["local"], exitcode=exitcode, message=message.format(username="local")) + with When("I try to login as 'local' user which is also defined in local storage it should fail"): + login_and_execute_query(**users["local"], exitcode=exitcode, message=message.format(username="local")) - with When("I try to login as 'ldap' user defined only in LDAP it should work"): - login_and_execute_query(**users["ldap"]) + with When("I try to login as 'ldap' user defined only in LDAP it should work"): + login_and_execute_query(**users["ldap"]) @TestOutline(Feature) @@ -795,5 +752,10 @@ def feature(self, servers=None, server=None, node="clickhouse1"): server = "openldap1" with ldap_servers(servers): - for scenario in loads(current_module(), Scenario): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + for scenario in loads(current_module(), Scenario, filter=~has.tag("custom config")): + Scenario(test=scenario, flags=TE)(server=server) + + for scenario in loads(current_module(), Scenario, filter=has.tag("custom config")): Scenario(test=scenario, flags=TE)(server=server) From d4e640daaeda7824dbda3182367b4bc56655ab80 Mon Sep 17 00:00:00 2001 From: Vitaliy Zakaznikov Date: Tue, 27 Oct 2020 18:50:59 -0400 Subject: [PATCH 349/432] Updating TestFlows to 1.6.59 Updating AES encryption requirements.py to include Specification Updating AES regression.py to link Specification Fixing typos in LDAP authentication SRS --- docker/test/testflows/runner/Dockerfile | 2 +- tests/testflows/aes_encryption/regression.py | 1 + .../requirements/requirements.py | 2980 ++++++++++++++--- .../requirements/requirements.md | 4 +- .../requirements/requirements.py | 827 ++++- 5 files changed, 3161 insertions(+), 653 deletions(-) diff --git a/docker/test/testflows/runner/Dockerfile b/docker/test/testflows/runner/Dockerfile index 9565e39598c..06241d6d497 100644 --- a/docker/test/testflows/runner/Dockerfile +++ b/docker/test/testflows/runner/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ ENV TZ=Europe/Moscow RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN pip3 install urllib3 testflows==1.6.57 docker-compose docker dicttoxml kazoo tzlocal +RUN pip3 install urllib3 testflows==1.6.59 docker-compose docker dicttoxml kazoo tzlocal ENV DOCKER_CHANNEL stable ENV DOCKER_VERSION 17.09.1-ce diff --git a/tests/testflows/aes_encryption/regression.py b/tests/testflows/aes_encryption/regression.py index 6c4598dae28..06610f6e42b 100755 --- a/tests/testflows/aes_encryption/regression.py +++ b/tests/testflows/aes_encryption/regression.py @@ -48,6 +48,7 @@ xfails = { @TestFeature @Name("aes encryption") @ArgumentParser(argparser) +@Specifications(SRS_008_ClickHouse_AES_Encryption_Functions) @Requirements( RQ_SRS008_AES_Functions("1.0"), RQ_SRS008_AES_Functions_DifferentModes("1.0") diff --git a/tests/testflows/aes_encryption/requirements/requirements.py b/tests/testflows/aes_encryption/requirements/requirements.py index bae8b5cc3c1..d92e159547d 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.py +++ b/tests/testflows/aes_encryption/requirements/requirements.py @@ -1,10 +1,1956 @@ # These requirements were auto generated # from software requirements specification (SRS) -# document by TestFlows v1.6.200731.1222107. +# document by TestFlows v1.6.201026.1232822. # Do not edit by hand but re-generate instead # using 'tfs requirements generate' command. +from testflows.core import Specification from testflows.core import Requirement +SRS_008_ClickHouse_AES_Encryption_Functions = Specification( + name='SRS-008 ClickHouse AES Encryption Functions', + description=None, + author=None, + date=None, + status=None, + approved_by=None, + approved_date=None, + approved_version=None, + version=None, + group=None, + type=None, + link=None, + uid=None, + parent=None, + children=None, + content=''' +# SRS-008 ClickHouse AES Encryption Functions +# Software Requirements Specification + +## Table of Contents +* 1 [Revision History](#revision-history) +* 2 [Introduction](#introduction) +* 3 [Terminology](#terminology) +* 4 [Requirements](#requirements) + * 4.1 [Generic](#generic) + * 4.1.1 [RQ.SRS008.AES.Functions](#rqsrs008aesfunctions) + * 4.1.2 [RQ.SRS008.AES.Functions.Compatability.MySQL](#rqsrs008aesfunctionscompatabilitymysql) + * 4.1.3 [RQ.SRS008.AES.Functions.Compatability.Dictionaries](#rqsrs008aesfunctionscompatabilitydictionaries) + * 4.1.4 [RQ.SRS008.AES.Functions.Compatability.Engine.Database.MySQL](#rqsrs008aesfunctionscompatabilityenginedatabasemysql) + * 4.1.5 [RQ.SRS008.AES.Functions.Compatability.Engine.Table.MySQL](#rqsrs008aesfunctionscompatabilityenginetablemysql) + * 4.1.6 [RQ.SRS008.AES.Functions.Compatability.TableFunction.MySQL](#rqsrs008aesfunctionscompatabilitytablefunctionmysql) + * 4.1.7 [RQ.SRS008.AES.Functions.DifferentModes](#rqsrs008aesfunctionsdifferentmodes) + * 4.1.8 [RQ.SRS008.AES.Functions.DataFromMultipleSources](#rqsrs008aesfunctionsdatafrommultiplesources) + * 4.1.9 [RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues](#rqsrs008aesfunctionssuppressoutputofsensitivevalues) + * 4.1.10 [RQ.SRS008.AES.Functions.InvalidParameters](#rqsrs008aesfunctionsinvalidparameters) + * 4.1.11 [RQ.SRS008.AES.Functions.MismatchedKey](#rqsrs008aesfunctionsmismatchedkey) + * 4.1.12 [RQ.SRS008.AES.Functions.Check.Performance](#rqsrs008aesfunctionscheckperformance) + * 4.1.13 [RQ.SRS008.AES.Function.Check.Performance.BestCase](#rqsrs008aesfunctioncheckperformancebestcase) + * 4.1.14 [RQ.SRS008.AES.Function.Check.Performance.WorstCase](#rqsrs008aesfunctioncheckperformanceworstcase) + * 4.1.15 [RQ.SRS008.AES.Functions.Check.Compression](#rqsrs008aesfunctionscheckcompression) + * 4.1.16 [RQ.SRS008.AES.Functions.Check.Compression.LowCardinality](#rqsrs008aesfunctionscheckcompressionlowcardinality) + * 4.2 [Specific](#specific) + * 4.2.1 [RQ.SRS008.AES.Encrypt.Function](#rqsrs008aesencryptfunction) + * 4.2.2 [RQ.SRS008.AES.Encrypt.Function.Syntax](#rqsrs008aesencryptfunctionsyntax) + * 4.2.3 [RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors](#rqsrs008aesencryptfunctionnisttestvectors) + * 4.2.4 [RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText](#rqsrs008aesencryptfunctionparametersplaintext) + * 4.2.5 [RQ.SRS008.AES.Encrypt.Function.Parameters.Key](#rqsrs008aesencryptfunctionparameterskey) + * 4.2.6 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode](#rqsrs008aesencryptfunctionparametersmode) + * 4.2.7 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat](#rqsrs008aesencryptfunctionparametersmodevaluesformat) + * 4.2.8 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid](#rqsrs008aesencryptfunctionparametersmodevalueinvalid) + * 4.2.9 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-ecb) + * 4.2.10 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-ecb) + * 4.2.11 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-ecb) + * 4.2.12 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-cbc) + * 4.2.13 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-cbc) + * 4.2.14 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-cbc) + * 4.2.15 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-cfb1) + * 4.2.16 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-cfb1) + * 4.2.17 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-cfb1) + * 4.2.18 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-cfb8) + * 4.2.19 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-cfb8) + * 4.2.20 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-cfb8) + * 4.2.21 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-cfb128) + * 4.2.22 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-cfb128) + * 4.2.23 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-cfb128) + * 4.2.24 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-ofb) + * 4.2.25 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-ofb) + * 4.2.26 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-ofb) + * 4.2.27 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-gcm) + * 4.2.28 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-gcm) + * 4.2.29 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-gcm) + * 4.2.30 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR](#rqsrs008aesencryptfunctionparametersmodevalueaes-128-ctr) + * 4.2.31 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR](#rqsrs008aesencryptfunctionparametersmodevalueaes-192-ctr) + * 4.2.32 [RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR](#rqsrs008aesencryptfunctionparametersmodevalueaes-256-ctr) + * 4.2.33 [RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector](#rqsrs008aesencryptfunctionparametersinitializationvector) + * 4.2.34 [RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData](#rqsrs008aesencryptfunctionparametersadditionalauthenticateddata) + * 4.2.35 [RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue](#rqsrs008aesencryptfunctionparametersreturnvalue) + * 4.2.36 [RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError](#rqsrs008aesencryptfunctionkeylengthinvalidlengtherror) + * 4.2.37 [RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError](#rqsrs008aesencryptfunctioninitializationvectorlengthinvalidlengtherror) + * 4.2.38 [RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode](#rqsrs008aesencryptfunctioninitializationvectornotvalidformode) + * 4.2.39 [RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode](#rqsrs008aesencryptfunctionadditionalauthenticationdatanotvalidformode) + * 4.2.40 [RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length](#rqsrs008aesencryptfunctionadditionalauthenticationdatalength) + * 4.2.41 [RQ.SRS008.AES.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-ecbkeyandinitializationvectorlength) + * 4.2.42 [RQ.SRS008.AES.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-ecbkeyandinitializationvectorlength) + * 4.2.43 [RQ.SRS008.AES.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-ecbkeyandinitializationvectorlength) + * 4.2.44 [RQ.SRS008.AES.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-cbckeyandinitializationvectorlength) + * 4.2.45 [RQ.SRS008.AES.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-cbckeyandinitializationvectorlength) + * 4.2.46 [RQ.SRS008.AES.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-cbckeyandinitializationvectorlength) + * 4.2.47 [RQ.SRS008.AES.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-cfb1keyandinitializationvectorlength) + * 4.2.48 [RQ.SRS008.AES.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-cfb1keyandinitializationvectorlength) + * 4.2.49 [RQ.SRS008.AES.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-cfb1keyandinitializationvectorlength) + * 4.2.50 [RQ.SRS008.AES.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-cfb8keyandinitializationvectorlength) + * 4.2.51 [RQ.SRS008.AES.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-cfb8keyandinitializationvectorlength) + * 4.2.52 [RQ.SRS008.AES.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-cfb8keyandinitializationvectorlength) + * 4.2.53 [RQ.SRS008.AES.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-cfb128keyandinitializationvectorlength) + * 4.2.54 [RQ.SRS008.AES.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-cfb128keyandinitializationvectorlength) + * 4.2.55 [RQ.SRS008.AES.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-cfb128keyandinitializationvectorlength) + * 4.2.56 [RQ.SRS008.AES.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-ofbkeyandinitializationvectorlength) + * 4.2.57 [RQ.SRS008.AES.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-ofbkeyandinitializationvectorlength) + * 4.2.58 [RQ.SRS008.AES.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-ofbkeyandinitializationvectorlength) + * 4.2.59 [RQ.SRS008.AES.Encrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-gcmkeyandinitializationvectorlength) + * 4.2.60 [RQ.SRS008.AES.Encrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-gcmkeyandinitializationvectorlength) + * 4.2.61 [RQ.SRS008.AES.Encrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-gcmkeyandinitializationvectorlength) + * 4.2.62 [RQ.SRS008.AES.Encrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-128-ctrkeyandinitializationvectorlength) + * 4.2.63 [RQ.SRS008.AES.Encrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-192-ctrkeyandinitializationvectorlength) + * 4.2.64 [RQ.SRS008.AES.Encrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length](#rqsrs008aesencryptfunctionaes-256-ctrkeyandinitializationvectorlength) + * 4.2.65 [RQ.SRS008.AES.Decrypt.Function](#rqsrs008aesdecryptfunction) + * 4.2.66 [RQ.SRS008.AES.Decrypt.Function.Syntax](#rqsrs008aesdecryptfunctionsyntax) + * 4.2.67 [RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText](#rqsrs008aesdecryptfunctionparametersciphertext) + * 4.2.68 [RQ.SRS008.AES.Decrypt.Function.Parameters.Key](#rqsrs008aesdecryptfunctionparameterskey) + * 4.2.69 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode](#rqsrs008aesdecryptfunctionparametersmode) + * 4.2.70 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat](#rqsrs008aesdecryptfunctionparametersmodevaluesformat) + * 4.2.71 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid](#rqsrs008aesdecryptfunctionparametersmodevalueinvalid) + * 4.2.72 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-ecb) + * 4.2.73 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-ecb) + * 4.2.74 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-ecb) + * 4.2.75 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-cbc) + * 4.2.76 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-cbc) + * 4.2.77 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-cbc) + * 4.2.78 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-cfb1) + * 4.2.79 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-cfb1) + * 4.2.80 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-cfb1) + * 4.2.81 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-cfb8) + * 4.2.82 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-cfb8) + * 4.2.83 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-cfb8) + * 4.2.84 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-cfb128) + * 4.2.85 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-cfb128) + * 4.2.86 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-cfb128) + * 4.2.87 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-ofb) + * 4.2.88 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-ofb) + * 4.2.89 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-ofb) + * 4.2.90 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-gcm) + * 4.2.91 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-gcm) + * 4.2.92 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-gcm) + * 4.2.93 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR](#rqsrs008aesdecryptfunctionparametersmodevalueaes-128-ctr) + * 4.2.94 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR](#rqsrs008aesdecryptfunctionparametersmodevalueaes-192-ctr) + * 4.2.95 [RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR](#rqsrs008aesdecryptfunctionparametersmodevalueaes-256-ctr) + * 4.2.96 [RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector](#rqsrs008aesdecryptfunctionparametersinitializationvector) + * 4.2.97 [RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData](#rqsrs008aesdecryptfunctionparametersadditionalauthenticateddata) + * 4.2.98 [RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue](#rqsrs008aesdecryptfunctionparametersreturnvalue) + * 4.2.99 [RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError](#rqsrs008aesdecryptfunctionkeylengthinvalidlengtherror) + * 4.2.100 [RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError](#rqsrs008aesdecryptfunctioninitializationvectorlengthinvalidlengtherror) + * 4.2.101 [RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode](#rqsrs008aesdecryptfunctioninitializationvectornotvalidformode) + * 4.2.102 [RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode](#rqsrs008aesdecryptfunctionadditionalauthenticationdatanotvalidformode) + * 4.2.103 [RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length](#rqsrs008aesdecryptfunctionadditionalauthenticationdatalength) + * 4.2.104 [RQ.SRS008.AES.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-ecbkeyandinitializationvectorlength) + * 4.2.105 [RQ.SRS008.AES.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-ecbkeyandinitializationvectorlength) + * 4.2.106 [RQ.SRS008.AES.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-ecbkeyandinitializationvectorlength) + * 4.2.107 [RQ.SRS008.AES.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-cbckeyandinitializationvectorlength) + * 4.2.108 [RQ.SRS008.AES.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-cbckeyandinitializationvectorlength) + * 4.2.109 [RQ.SRS008.AES.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-cbckeyandinitializationvectorlength) + * 4.2.110 [RQ.SRS008.AES.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-cfb1keyandinitializationvectorlength) + * 4.2.111 [RQ.SRS008.AES.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-cfb1keyandinitializationvectorlength) + * 4.2.112 [RQ.SRS008.AES.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-cfb1keyandinitializationvectorlength) + * 4.2.113 [RQ.SRS008.AES.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-cfb8keyandinitializationvectorlength) + * 4.2.114 [RQ.SRS008.AES.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-cfb8keyandinitializationvectorlength) + * 4.2.115 [RQ.SRS008.AES.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-cfb8keyandinitializationvectorlength) + * 4.2.116 [RQ.SRS008.AES.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-cfb128keyandinitializationvectorlength) + * 4.2.117 [RQ.SRS008.AES.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-cfb128keyandinitializationvectorlength) + * 4.2.118 [RQ.SRS008.AES.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-cfb128keyandinitializationvectorlength) + * 4.2.119 [RQ.SRS008.AES.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-ofbkeyandinitializationvectorlength) + * 4.2.120 [RQ.SRS008.AES.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-ofbkeyandinitializationvectorlength) + * 4.2.121 [RQ.SRS008.AES.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-ofbkeyandinitializationvectorlength) + * 4.2.122 [RQ.SRS008.AES.Decrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-gcmkeyandinitializationvectorlength) + * 4.2.123 [RQ.SRS008.AES.Decrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-gcmkeyandinitializationvectorlength) + * 4.2.124 [RQ.SRS008.AES.Decrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-gcmkeyandinitializationvectorlength) + * 4.2.125 [RQ.SRS008.AES.Decrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-128-ctrkeyandinitializationvectorlength) + * 4.2.126 [RQ.SRS008.AES.Decrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-192-ctrkeyandinitializationvectorlength) + * 4.2.127 [RQ.SRS008.AES.Decrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length](#rqsrs008aesdecryptfunctionaes-256-ctrkeyandinitializationvectorlength) + * 4.3 [MySQL Specific Functions](#mysql-specific-functions) + * 4.3.1 [RQ.SRS008.AES.MySQL.Encrypt.Function](#rqsrs008aesmysqlencryptfunction) + * 4.3.2 [RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax](#rqsrs008aesmysqlencryptfunctionsyntax) + * 4.3.3 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText](#rqsrs008aesmysqlencryptfunctionparametersplaintext) + * 4.3.4 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key](#rqsrs008aesmysqlencryptfunctionparameterskey) + * 4.3.5 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode](#rqsrs008aesmysqlencryptfunctionparametersmode) + * 4.3.6 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat](#rqsrs008aesmysqlencryptfunctionparametersmodevaluesformat) + * 4.3.7 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid](#rqsrs008aesmysqlencryptfunctionparametersmodevalueinvalid) + * 4.3.8 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-ecb) + * 4.3.9 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-ecb) + * 4.3.10 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-ecb) + * 4.3.11 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-cbc) + * 4.3.12 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-cbc) + * 4.3.13 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-cbc) + * 4.3.14 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-cfb1) + * 4.3.15 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-cfb1) + * 4.3.16 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-cfb1) + * 4.3.17 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-cfb8) + * 4.3.18 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-cfb8) + * 4.3.19 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-cfb8) + * 4.3.20 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-cfb128) + * 4.3.21 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-cfb128) + * 4.3.22 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-cfb128) + * 4.3.23 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-ofb) + * 4.3.24 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-ofb) + * 4.3.25 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-ofb) + * 4.3.26 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-gcmerror) + * 4.3.27 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-gcmerror) + * 4.3.28 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-gcmerror) + * 4.3.29 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-128-ctrerror) + * 4.3.30 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-192-ctrerror) + * 4.3.31 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error](#rqsrs008aesmysqlencryptfunctionparametersmodevalueaes-256-ctrerror) + * 4.3.32 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector](#rqsrs008aesmysqlencryptfunctionparametersinitializationvector) + * 4.3.33 [RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue](#rqsrs008aesmysqlencryptfunctionparametersreturnvalue) + * 4.3.34 [RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError](#rqsrs008aesmysqlencryptfunctionkeylengthtooshorterror) + * 4.3.35 [RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong](#rqsrs008aesmysqlencryptfunctionkeylengthtoolong) + * 4.3.36 [RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError](#rqsrs008aesmysqlencryptfunctioninitializationvectorlengthtooshorterror) + * 4.3.37 [RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong](#rqsrs008aesmysqlencryptfunctioninitializationvectorlengthtoolong) + * 4.3.38 [RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode](#rqsrs008aesmysqlencryptfunctioninitializationvectornotvalidformode) + * 4.3.39 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-ecbkeyandinitializationvectorlength) + * 4.3.40 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-ecbkeyandinitializationvectorlength) + * 4.3.41 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-ecbkeyandinitializationvectorlength) + * 4.3.42 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-cbckeyandinitializationvectorlength) + * 4.3.43 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-cbckeyandinitializationvectorlength) + * 4.3.44 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-cbckeyandinitializationvectorlength) + * 4.3.45 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-cfb1keyandinitializationvectorlength) + * 4.3.46 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-cfb1keyandinitializationvectorlength) + * 4.3.47 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-cfb1keyandinitializationvectorlength) + * 4.3.48 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-cfb8keyandinitializationvectorlength) + * 4.3.49 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-cfb8keyandinitializationvectorlength) + * 4.3.50 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-cfb8keyandinitializationvectorlength) + * 4.3.51 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-cfb128keyandinitializationvectorlength) + * 4.3.52 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-cfb128keyandinitializationvectorlength) + * 4.3.53 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-cfb128keyandinitializationvectorlength) + * 4.3.54 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-128-ofbkeyandinitializationvectorlength) + * 4.3.55 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-192-ofbkeyandinitializationvectorlength) + * 4.3.56 [RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqlencryptfunctionaes-256-ofbkeyandinitializationvectorlength) + * 4.3.57 [RQ.SRS008.AES.MySQL.Decrypt.Function](#rqsrs008aesmysqldecryptfunction) + * 4.3.58 [RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax](#rqsrs008aesmysqldecryptfunctionsyntax) + * 4.3.59 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText](#rqsrs008aesmysqldecryptfunctionparametersciphertext) + * 4.3.60 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key](#rqsrs008aesmysqldecryptfunctionparameterskey) + * 4.3.61 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode](#rqsrs008aesmysqldecryptfunctionparametersmode) + * 4.3.62 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat](#rqsrs008aesmysqldecryptfunctionparametersmodevaluesformat) + * 4.3.63 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid](#rqsrs008aesmysqldecryptfunctionparametersmodevalueinvalid) + * 4.3.64 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-ecb) + * 4.3.65 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-ecb) + * 4.3.66 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-ecb) + * 4.3.67 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-cbc) + * 4.3.68 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-cbc) + * 4.3.69 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-cbc) + * 4.3.70 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-cfb1) + * 4.3.71 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-cfb1) + * 4.3.72 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-cfb1) + * 4.3.73 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-cfb8) + * 4.3.74 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-cfb8) + * 4.3.75 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-cfb8) + * 4.3.76 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-cfb128) + * 4.3.77 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-cfb128) + * 4.3.78 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-cfb128) + * 4.3.79 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-ofb) + * 4.3.80 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-ofb) + * 4.3.81 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-ofb) + * 4.3.82 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-gcmerror) + * 4.3.83 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-gcmerror) + * 4.3.84 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-gcmerror) + * 4.3.85 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-128-ctrerror) + * 4.3.86 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-192-ctrerror) + * 4.3.87 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error](#rqsrs008aesmysqldecryptfunctionparametersmodevalueaes-256-ctrerror) + * 4.3.88 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector](#rqsrs008aesmysqldecryptfunctionparametersinitializationvector) + * 4.3.89 [RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue](#rqsrs008aesmysqldecryptfunctionparametersreturnvalue) + * 4.3.90 [RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError](#rqsrs008aesmysqldecryptfunctionkeylengthtooshorterror) + * 4.3.91 [RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong](#rqsrs008aesmysqldecryptfunctionkeylengthtoolong) + * 4.3.92 [RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError](#rqsrs008aesmysqldecryptfunctioninitializationvectorlengthtooshorterror) + * 4.3.93 [RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong](#rqsrs008aesmysqldecryptfunctioninitializationvectorlengthtoolong) + * 4.3.94 [RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode](#rqsrs008aesmysqldecryptfunctioninitializationvectornotvalidformode) + * 4.3.95 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-ecbkeyandinitializationvectorlength) + * 4.3.96 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-ecbkeyandinitializationvectorlength) + * 4.3.97 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-ecbkeyandinitializationvectorlength) + * 4.3.98 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-cbckeyandinitializationvectorlength) + * 4.3.99 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-cbckeyandinitializationvectorlength) + * 4.3.100 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-cbckeyandinitializationvectorlength) + * 4.3.101 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-cfb1keyandinitializationvectorlength) + * 4.3.102 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-cfb1keyandinitializationvectorlength) + * 4.3.103 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-cfb1keyandinitializationvectorlength) + * 4.3.104 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-cfb8keyandinitializationvectorlength) + * 4.3.105 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-cfb8keyandinitializationvectorlength) + * 4.3.106 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-cfb8keyandinitializationvectorlength) + * 4.3.107 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-cfb128keyandinitializationvectorlength) + * 4.3.108 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-cfb128keyandinitializationvectorlength) + * 4.3.109 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-cfb128keyandinitializationvectorlength) + * 4.3.110 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-128-ofbkeyandinitializationvectorlength) + * 4.3.111 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-192-ofbkeyandinitializationvectorlength) + * 4.3.112 [RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length](#rqsrs008aesmysqldecryptfunctionaes-256-ofbkeyandinitializationvectorlength) +* 5 [References](#references) + +## Revision History + +This document is stored in an electronic form using [Git] source control management software +hosted in a [GitHub Repository]. +All the updates are tracked using the [Revision History]. + +## Introduction + +Users need an ability to encrypt and decrypt column data with tenant specific keys. +Use cases include protection of sensitive column values and [GDPR] right to forget policies. +The implementation will support capabilities of the [MySQL aes_encrypt] and [MySQL aes_decrypt] +functions which encrypt and decrypt values using the [AES] (Advanced Encryption Standard) +algorithm. This functionality will enable encryption and decryption of data +accessed on remote [MySQL] servers via [MySQL Dictionary] or [MySQL Database Engine], +[MySQL Table Engine], or [MySQL Table Function]. + +## Terminology + +* **AES** - + Advanced Encryption Standard ([AES]) + +## Requirements + +### Generic + +#### RQ.SRS008.AES.Functions +version: 1.0 + +[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data. + +#### RQ.SRS008.AES.Functions.Compatability.MySQL +version: 1.0 + +[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7]. + +#### RQ.SRS008.AES.Functions.Compatability.Dictionaries +version: 1.0 + +[ClickHouse] SHALL support encryption and decryption of data accessed on remote +[MySQL] servers using [MySQL Dictionary]. + +#### RQ.SRS008.AES.Functions.Compatability.Engine.Database.MySQL +version: 1.0 + +[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine], + +#### RQ.SRS008.AES.Functions.Compatability.Engine.Table.MySQL +version: 1.0 + +[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine]. + +#### RQ.SRS008.AES.Functions.Compatability.TableFunction.MySQL +version: 1.0 + +[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function]. + +#### RQ.SRS008.AES.Functions.DifferentModes +version: 1.0 + +[ClickHouse] SHALL allow different modes to be supported in a single SQL statement +using explicit function parameters. + +#### RQ.SRS008.AES.Functions.DataFromMultipleSources +version: 1.0 + +[ClickHouse] SHALL support handling encryption and decryption of data from multiple sources +in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary], +[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function] +with possibly different encryption schemes. + +#### RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues +version: 1.0 + +[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log, +error log, and `query_log` table to prevent leakage of sensitive values. + +#### RQ.SRS008.AES.Functions.InvalidParameters +version: 1.0 + +[ClickHouse] SHALL return an error when parameters are invalid. + +#### RQ.SRS008.AES.Functions.Mismatched.Key +version: 1.0 + +[ClickHouse] SHALL return garbage for mismatched keys. + +#### RQ.SRS008.AES.Functions.Mismatched.IV +version: 1.0 + +[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it. + +#### RQ.SRS008.AES.Functions.Mismatched.AAD +version: 1.0 + +[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it. + +#### RQ.SRS008.AES.Functions.Mismatched.Mode +version: 1.0 + +[ClickHouse] SHALL return an error or garbage for mismatched mode. + +#### RQ.SRS008.AES.Functions.Check.Performance +version: 1.0 + +Performance of [AES] encryption functions SHALL be measured. + +#### RQ.SRS008.AES.Function.Check.Performance.BestCase +version: 1.0 + +Performance of [AES] encryption functions SHALL be checked for the best case +scenario where there is one key, one initialization vector, and one large stream of data. + +#### RQ.SRS008.AES.Function.Check.Performance.WorstCase +version: 1.0 + +Performance of [AES] encryption functions SHALL be checked for the worst case +where there are `N` keys, `N` initialization vectors and `N` very small streams of data. + +#### RQ.SRS008.AES.Functions.Check.Compression +version: 1.0 + +Effect of [AES] encryption on column compression SHALL be measured. + +#### RQ.SRS008.AES.Functions.Check.Compression.LowCardinality +version: 1.0 + +Effect of [AES] encryption on the compression of a column with [LowCardinality] data type +SHALL be measured. + +### Specific + +#### RQ.SRS008.AES.Encrypt.Function +version: 1.0 + +[ClickHouse] SHALL support `aes_encrypt` function to encrypt data using [AES]. + +#### RQ.SRS008.AES.Encrypt.Function.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `aes_encrypt` function + +```sql +aes_encrypt(plaintext, key, mode, [iv, aad]) +``` + +#### RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors +version: 1.0 + +[ClickHouse] `aes_encrypt` function output SHALL produce output that matches [NIST test vectors]. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText +version: 1.0 + +[ClickHouse] SHALL support `plaintext` accepting any data type as +the first parameter to the `aes_encrypt` function that SHALL specify the data to be encrypted. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Key +version: 1.0 + +[ClickHouse] SHALL support `key` with `String` or `FixedString` data types +as the second parameter to the `aes_encrypt` function that SHALL specify the encryption key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode +version: 1.0 + +[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter +to the `aes_encrypt` function that SHALL specify encryption key length and block encryption mode. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat +version: 1.0 + +[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter +of the `aes_encrypt` function where +the `key_length` SHALL specifies the length of the key and SHALL accept +`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption +mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as +[CTR] and [GCM] as the values. For example, `aes-256-ofb`. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt` +function is not valid with the exception where such a mode is supported by the underlying +[OpenSSL] implementation. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-128-gcm` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key. +An `AEAD` 16-byte tag is appended to the resulting ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-192-gcm` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key. +An `AEAD` 16-byte tag is appended to the resulting ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-256-gcm` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key. +An `AEAD` 16-byte tag is appended to the resulting ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_encrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector +version: 1.0 + +[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth +parameter to the `aes_encrypt` function that SHALL specify the initialization vector for block modes that require +it. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData +version: 1.0 + +[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth +parameter to the `aes_encrypt` function that SHALL specify the additional authenticated data +for block modes that require it. + +#### RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue +version: 1.0 + +[ClickHouse] SHALL return the encrypted value of the data +using `String` data type as the result of `aes_encrypt` function. + +#### RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError +version: 1.0 + +[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_encrypt` function for a given block mode. + +#### RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` length is specified and not of the exact size for the `aes_encrypt` function for a given block mode. + +#### RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt` function for a mode that does not need it. + +#### RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_encrypt` function for a mode that does not need it. + +#### RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length +version: 1.0 + +[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_encrypt` function. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes +and if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Encrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Decrypt.Function +version: 1.0 + +[ClickHouse] SHALL support `aes_decrypt` function to decrypt data using [AES]. + +#### RQ.SRS008.AES.Decrypt.Function.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `aes_decrypt` function + +```sql +aes_decrypt(ciphertext, key, mode, [iv, aad]) +``` + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText +version: 1.0 + +[ClickHouse] SHALL support `ciphertext` accepting `FixedString` or `String` data types as +the first parameter to the `aes_decrypt` function that SHALL specify the data to be decrypted. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Key +version: 1.0 + +[ClickHouse] SHALL support `key` with `String` or `FixedString` data types +as the second parameter to the `aes_decrypt` function that SHALL specify the encryption key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode +version: 1.0 + +[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter +to the `aes_decrypt` function that SHALL specify encryption key length and block encryption mode. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat +version: 1.0 + +[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter +of the `aes_decrypt` function where +the `key_length` SHALL specifies the length of the key and SHALL accept +`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption +mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as +[CTR] and [GCM] as the values. For example, `aes-256-ofb`. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt` +function is not valid with the exception where such a mode is supported by the underlying +[OpenSSL] implementation. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-128-gcm` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key. +An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-192-gcm` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key. +An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM +version: 1.0 + +[ClickHouse] SHALL support `aes-256-gcm` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key. +An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to +the [RFC5116]. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_decrypt` function +and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector +version: 1.0 + +[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth +parameter to the `aes_decrypt` function that SHALL specify the initialization vector for block modes that require +it. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData +version: 1.0 + +[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth +parameter to the `aes_decrypt` function that SHALL specify the additional authenticated data +for block modes that require it. + +#### RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue +version: 1.0 + +[ClickHouse] SHALL return the decrypted value of the data +using `String` data type as the result of `aes_decrypt` function. + +#### RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError +version: 1.0 + +[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_decrypt` function for a given block mode. + +#### RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` is speficified and the length is not exact for the `aes_decrypt` function for a given block mode. + +#### RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt` function +for a mode that does not need it. + +#### RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_decrypt` function +for a mode that does not need it. + +#### RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length +version: 1.0 + +[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_decrypt` function. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes +or `iv` or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes +and if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes +or `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes or `aad` is specified. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes +or `iv` is not specified or is less than 8 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes +or if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes +or if specified `iv` is not 16 bytes. + +#### RQ.SRS008.AES.Decrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes +or if specified `iv` is not 16 bytes. + +### MySQL Specific Functions + +#### RQ.SRS008.AES.MySQL.Encrypt.Function +version: 1.0 + +[ClickHouse] SHALL support `aes_encrypt_mysql` function to encrypt data using [AES]. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `aes_encrypt_mysql` function + +```sql +aes_encrypt_mysql(plaintext, key, mode, [iv]) +``` + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText +version: 1.0 + +[ClickHouse] SHALL support `plaintext` accepting any data type as +the first parameter to the `aes_encrypt_mysql` function that SHALL specify the data to be encrypted. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key +version: 1.0 + +[ClickHouse] SHALL support `key` with `String` or `FixedString` data types +as the second parameter to the `aes_encrypt_mysql` function that SHALL specify the encryption key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode +version: 1.0 + +[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter +to the `aes_encrypt_mysql` function that SHALL specify encryption key length and block encryption mode. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat +version: 1.0 + +[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter +of the `aes_encrypt_mysql` function where +the `key_length` SHALL specifies the length of the key and SHALL accept +`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption +mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt_mysql` +function is not valid with the exception where such a mode is supported by the underlying +[OpenSSL] implementation. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the +`aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector +version: 1.0 + +[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth +parameter to the `aes_encrypt_mysql` function that SHALL specify the initialization vector for block modes that require +it. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue +version: 1.0 + +[ClickHouse] SHALL return the encrypted value of the data +using `String` data type as the result of `aes_encrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError +version: 1.0 + +[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_encrypt_mysql` +function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong +version: 1.0 + +[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required +for the `aes_encrypt_mysql` function for a given block mode. + +```python +def fold_key(key, cipher_key_size): + key = list(key) if not isinstance(key, (list, tuple)) else key + folded_key = key[:cipher_key_size] + for i in range(cipher_key_size, len(key)): + print(i % cipher_key_size, i) + folded_key[i % cipher_key_size] ^= key[i] + return folded_key +``` + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum +that is required for the `aes_encrypt_mysql` function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong +version: 1.0 + +[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and +its length is longer than required for the `aes_encrypt_mysql` function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt_mysql` +function for a mode that does not need it. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes +and if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function +version: 1.0 + +[ClickHouse] SHALL support `aes_decrypt_mysql` function to decrypt data using [AES]. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `aes_decrypt_mysql` function + +```sql +aes_decrypt_mysql(ciphertext, key, mode, [iv]) +``` + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText +version: 1.0 + +[ClickHouse] SHALL support `ciphertext` accepting any data type as +the first parameter to the `aes_decrypt_mysql` function that SHALL specify the data to be decrypted. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key +version: 1.0 + +[ClickHouse] SHALL support `key` with `String` or `FixedString` data types +as the second parameter to the `aes_decrypt_mysql` function that SHALL specify the encryption key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode +version: 1.0 + +[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter +to the `aes_decrypt_mysql` function that SHALL specify encryption key length and block encryption mode. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat +version: 1.0 + +[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter +of the `aes_decrypt_mysql` function where +the `key_length` SHALL specifies the length of the key and SHALL accept +`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption +mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt_mysql` +function is not valid with the exception where such a mode is supported by the underlying +[OpenSSL] implementation. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128 +version: 1.0 + +[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB +version: 1.0 + +[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function +and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error +version: 1.0 + +[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the +`aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector +version: 1.0 + +[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth +parameter to the `aes_decrypt_mysql` function that SHALL specify the initialization vector for block modes that require +it. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue +version: 1.0 + +[ClickHouse] SHALL return the decrypted value of the data +using `String` data type as the result of `aes_decrypt_mysql` function. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError +version: 1.0 + +[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_decrypt_mysql` +function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong +version: 1.0 + +[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required +for the `aes_decrypt_mysql` function for a given block mode. + +```python +def fold_key(key, cipher_key_size): + key = list(key) if not isinstance(key, (list, tuple)) else key + folded_key = key[:cipher_key_size] + for i in range(cipher_key_size, len(key)): + print(i % cipher_key_size, i) + folded_key[i % cipher_key_size] ^= key[i] + return folded_key +``` + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum +that is required for the `aes_decrypt_mysql` function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong +version: 1.0 + +[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and +its length is longer than required for the `aes_decrypt_mysql` function for a given block mode. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode +version: 1.0 + +[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt_mysql` +function for a mode that does not need it. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes +or `iv` is specified. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes +and if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes +or if specified `iv` is less than 16 bytes. + +#### RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length +version: 1.0 + +[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes +or if specified `iv` is less than 16 bytes. + +## References + +* **GDPR:** https://en.wikipedia.org/wiki/General_Data_Protection_Regulation +* **MySQL:** https://www.mysql.com/ +* **AES:** https://en.wikipedia.org/wiki/Advanced_Encryption_Standard +* **ClickHouse:** https://clickhouse.tech +* **Git:** https://git-scm.com/ + +[OpenSSL]: https://www.openssl.org/ +[LowCardinality]: https://clickhouse.tech/docs/en/sql-reference/data-types/lowcardinality/ +[MergeTree]: https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/ +[MySQL Database Engine]: https://clickhouse.tech/docs/en/engines/database-engines/mysql/ +[MySQL Table Engine]: https://clickhouse.tech/docs/en/engines/table-engines/integrations/mysql/ +[MySQL Table Function]: https://clickhouse.tech/docs/en/sql-reference/table-functions/mysql/ +[MySQL Dictionary]: https://clickhouse.tech/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources/#dicts-external_dicts_dict_sources-mysql +[GCM]: https://en.wikipedia.org/wiki/Galois/Counter_Mode +[CTR]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_(CTR) +[CBC]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_block_chaining_(CBC) +[ECB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB) +[CFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) +[CFB1]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) +[CFB8]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) +[CFB128]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_feedback_(CFB) +[OFB]: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Output_feedback_(OFB) +[GDPR]: https://en.wikipedia.org/wiki/General_Data_Protection_Regulation +[RFC5116]: https://tools.ietf.org/html/rfc5116#section-5.1 +[MySQL]: https://www.mysql.com/ +[MySQL 5.7]: https://dev.mysql.com/doc/refman/5.7/en/ +[MySQL aes_encrypt]: https://dev.mysql.com/doc/refman/5.7/en/encryption-functions.html#function_aes-encrypt +[MySQL aes_decrypt]: https://dev.mysql.com/doc/refman/5.7/en/encryption-functions.html#function_aes-decrypt +[AES]: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard +[ClickHouse]: https://clickhouse.tech +[GitHub repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/aes_encryption/requirements/requirements.md +[Revision history]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/aes_encryption/requirements/requirements.md +[Git]: https://git-scm.com/ +[NIST test vectors]: https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program +''') + RQ_SRS008_AES_Functions = Requirement( name='RQ.SRS008.AES.Functions', version='1.0', @@ -14,9 +1960,9 @@ RQ_SRS008_AES_Functions = Requirement( uid=None, description=( '[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Compatability_MySQL = Requirement( name='RQ.SRS008.AES.Functions.Compatability.MySQL', @@ -27,9 +1973,9 @@ RQ_SRS008_AES_Functions_Compatability_MySQL = Requirement( uid=None, description=( '[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Compatability_Dictionaries = Requirement( name='RQ.SRS008.AES.Functions.Compatability.Dictionaries', @@ -41,9 +1987,9 @@ RQ_SRS008_AES_Functions_Compatability_Dictionaries = Requirement( description=( '[ClickHouse] SHALL support encryption and decryption of data accessed on remote\n' '[MySQL] servers using [MySQL Dictionary].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Compatability_Engine_Database_MySQL = Requirement( name='RQ.SRS008.AES.Functions.Compatability.Engine.Database.MySQL', @@ -54,9 +2000,9 @@ RQ_SRS008_AES_Functions_Compatability_Engine_Database_MySQL = Requirement( uid=None, description=( '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine],\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Compatability_Engine_Table_MySQL = Requirement( name='RQ.SRS008.AES.Functions.Compatability.Engine.Table.MySQL', @@ -67,9 +2013,9 @@ RQ_SRS008_AES_Functions_Compatability_Engine_Table_MySQL = Requirement( uid=None, description=( '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Compatability_TableFunction_MySQL = Requirement( name='RQ.SRS008.AES.Functions.Compatability.TableFunction.MySQL', @@ -80,9 +2026,9 @@ RQ_SRS008_AES_Functions_Compatability_TableFunction_MySQL = Requirement( uid=None, description=( '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_DifferentModes = Requirement( name='RQ.SRS008.AES.Functions.DifferentModes', @@ -94,9 +2040,9 @@ RQ_SRS008_AES_Functions_DifferentModes = Requirement( description=( '[ClickHouse] SHALL allow different modes to be supported in a single SQL statement\n' 'using explicit function parameters.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_DataFromMultipleSources = Requirement( name='RQ.SRS008.AES.Functions.DataFromMultipleSources', @@ -110,9 +2056,9 @@ RQ_SRS008_AES_Functions_DataFromMultipleSources = Requirement( 'in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary],\n' '[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function]\n' 'with possibly different encryption schemes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_SuppressOutputOfSensitiveValues = Requirement( name='RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues', @@ -124,9 +2070,9 @@ RQ_SRS008_AES_Functions_SuppressOutputOfSensitiveValues = Requirement( description=( '[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log,\n' 'error log, and `query_log` table to prevent leakage of sensitive values.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_InvalidParameters = Requirement( name='RQ.SRS008.AES.Functions.InvalidParameters', @@ -137,9 +2083,9 @@ RQ_SRS008_AES_Functions_InvalidParameters = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error when parameters are invalid.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Mismatched_Key = Requirement( name='RQ.SRS008.AES.Functions.Mismatched.Key', @@ -150,9 +2096,9 @@ RQ_SRS008_AES_Functions_Mismatched_Key = Requirement( uid=None, description=( '[ClickHouse] SHALL return garbage for mismatched keys.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Mismatched_IV = Requirement( name='RQ.SRS008.AES.Functions.Mismatched.IV', @@ -163,9 +2109,9 @@ RQ_SRS008_AES_Functions_Mismatched_IV = Requirement( uid=None, description=( '[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Mismatched_AAD = Requirement( name='RQ.SRS008.AES.Functions.Mismatched.AAD', @@ -176,9 +2122,9 @@ RQ_SRS008_AES_Functions_Mismatched_AAD = Requirement( uid=None, description=( '[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Mismatched_Mode = Requirement( name='RQ.SRS008.AES.Functions.Mismatched.Mode', @@ -189,9 +2135,9 @@ RQ_SRS008_AES_Functions_Mismatched_Mode = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error or garbage for mismatched mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Check_Performance = Requirement( name='RQ.SRS008.AES.Functions.Check.Performance', @@ -202,9 +2148,9 @@ RQ_SRS008_AES_Functions_Check_Performance = Requirement( uid=None, description=( 'Performance of [AES] encryption functions SHALL be measured.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Function_Check_Performance_BestCase = Requirement( name='RQ.SRS008.AES.Function.Check.Performance.BestCase', @@ -216,9 +2162,9 @@ RQ_SRS008_AES_Function_Check_Performance_BestCase = Requirement( description=( 'Performance of [AES] encryption functions SHALL be checked for the best case\n' 'scenario where there is one key, one initialization vector, and one large stream of data.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Function_Check_Performance_WorstCase = Requirement( name='RQ.SRS008.AES.Function.Check.Performance.WorstCase', @@ -230,9 +2176,9 @@ RQ_SRS008_AES_Function_Check_Performance_WorstCase = Requirement( description=( 'Performance of [AES] encryption functions SHALL be checked for the worst case\n' 'where there are `N` keys, `N` initialization vectors and `N` very small streams of data.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Check_Compression = Requirement( name='RQ.SRS008.AES.Functions.Check.Compression', @@ -243,9 +2189,9 @@ RQ_SRS008_AES_Functions_Check_Compression = Requirement( uid=None, description=( 'Effect of [AES] encryption on column compression SHALL be measured.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Functions_Check_Compression_LowCardinality = Requirement( name='RQ.SRS008.AES.Functions.Check.Compression.LowCardinality', @@ -257,9 +2203,9 @@ RQ_SRS008_AES_Functions_Check_Compression_LowCardinality = Requirement( description=( 'Effect of [AES] encryption on the compression of a column with [LowCardinality] data type\n' 'SHALL be measured.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function = Requirement( name='RQ.SRS008.AES.Encrypt.Function', @@ -270,9 +2216,9 @@ RQ_SRS008_AES_Encrypt_Function = Requirement( uid=None, description=( '[ClickHouse] SHALL support `aes_encrypt` function to encrypt data using [AES].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Syntax = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Syntax', @@ -287,9 +2233,9 @@ RQ_SRS008_AES_Encrypt_Function_Syntax = Requirement( '```sql\n' 'aes_encrypt(plaintext, key, mode, [iv, aad])\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_NIST_TestVectors = Requirement( name='RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors', @@ -300,9 +2246,9 @@ RQ_SRS008_AES_Encrypt_Function_NIST_TestVectors = Requirement( uid=None, description=( '[ClickHouse] `aes_encrypt` function output SHALL produce output that matches [NIST test vectors].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText', @@ -314,9 +2260,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText = Requirement( description=( '[ClickHouse] SHALL support `plaintext` accepting any data type as\n' 'the first parameter to the `aes_encrypt` function that SHALL specify the data to be encrypted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Key = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Key', @@ -328,9 +2274,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Key = Requirement( description=( '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' 'as the second parameter to the `aes_encrypt` function that SHALL specify the encryption key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode', @@ -342,9 +2288,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode = Requirement( description=( '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n' 'to the `aes_encrypt` function that SHALL specify encryption key length and block encryption mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat', @@ -360,9 +2306,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' 'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as\n' '[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid', @@ -375,9 +2321,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement( '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt`\n' 'function is not valid with the exception where such a mode is supported by the underlying\n' '[OpenSSL] implementation.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB', @@ -389,9 +2335,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB', @@ -403,9 +2349,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB', @@ -417,9 +2363,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC', @@ -431,9 +2377,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC', @@ -445,9 +2391,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC', @@ -459,9 +2405,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1', @@ -473,9 +2419,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1', @@ -487,9 +2433,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1', @@ -501,9 +2447,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8', @@ -515,9 +2461,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8', @@ -529,9 +2475,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8', @@ -543,9 +2489,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128', @@ -557,9 +2503,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128', @@ -571,9 +2517,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128', @@ -585,9 +2531,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB', @@ -599,9 +2545,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB', @@ -613,9 +2559,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB', @@ -627,9 +2573,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM', @@ -643,9 +2589,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key.\n' 'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM', @@ -659,9 +2605,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key.\n' 'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM', @@ -675,9 +2621,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key.\n' 'An `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR', @@ -689,9 +2635,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR', @@ -703,9 +2649,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR', @@ -717,9 +2663,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_encrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector', @@ -732,9 +2678,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector = Requirement( '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' 'parameter to the `aes_encrypt` function that SHALL specify the initialization vector for block modes that require\n' 'it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData', @@ -747,9 +2693,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_AdditionalAuthenticatedData = Requirem '[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n' 'parameter to the `aes_encrypt` function that SHALL specify the additional authenticated data\n' 'for block modes that require it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue', @@ -761,9 +2707,9 @@ RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue = Requirement( description=( '[ClickHouse] SHALL return the encrypted value of the data\n' 'using `String` data type as the result of `aes_encrypt` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError = Requirement( name='RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError', @@ -774,9 +2720,9 @@ RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_encrypt` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement( name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError', @@ -787,9 +2733,9 @@ RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError = uid=None, description=( '[ClickHouse] SHALL return an error if the `iv` length is specified and not of the exact size for the `aes_encrypt` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode = Requirement( name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode', @@ -800,9 +2746,9 @@ RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode = Requiremen uid=None, description=( '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt` function for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode', @@ -813,9 +2759,9 @@ RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode = Re uid=None, description=( '[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_encrypt` function for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length', @@ -826,9 +2772,9 @@ RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length = Requirement uid=None, description=( '[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_encrypt` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length', @@ -840,9 +2786,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length', @@ -854,9 +2800,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length', @@ -868,9 +2814,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length', @@ -882,9 +2828,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length', @@ -896,9 +2842,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length', @@ -910,9 +2856,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length', @@ -924,9 +2870,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length', @@ -938,9 +2884,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length', @@ -952,9 +2898,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length', @@ -966,9 +2912,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes\n' 'and if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length', @@ -980,9 +2926,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length', @@ -994,9 +2940,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length', @@ -1008,9 +2954,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length', @@ -1022,9 +2968,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length', @@ -1036,9 +2982,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length', @@ -1050,9 +2996,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length', @@ -1064,9 +3010,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length', @@ -1078,9 +3024,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length', @@ -1092,9 +3038,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length', @@ -1106,9 +3052,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length', @@ -1120,9 +3066,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length', @@ -1134,9 +3080,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length', @@ -1148,9 +3094,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Encrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Encrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length', @@ -1162,9 +3108,9 @@ RQ_SRS008_AES_Encrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function = Requirement( name='RQ.SRS008.AES.Decrypt.Function', @@ -1175,9 +3121,9 @@ RQ_SRS008_AES_Decrypt_Function = Requirement( uid=None, description=( '[ClickHouse] SHALL support `aes_decrypt` function to decrypt data using [AES].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Syntax = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Syntax', @@ -1192,9 +3138,9 @@ RQ_SRS008_AES_Decrypt_Function_Syntax = Requirement( '```sql\n' 'aes_decrypt(ciphertext, key, mode, [iv, aad])\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_CipherText = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText', @@ -1206,9 +3152,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_CipherText = Requirement( description=( '[ClickHouse] SHALL support `ciphertext` accepting `FixedString` or `String` data types as\n' 'the first parameter to the `aes_decrypt` function that SHALL specify the data to be decrypted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Key = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Key', @@ -1220,9 +3166,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Key = Requirement( description=( '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' 'as the second parameter to the `aes_decrypt` function that SHALL specify the encryption key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode', @@ -1234,9 +3180,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode = Requirement( description=( '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n' 'to the `aes_decrypt` function that SHALL specify encryption key length and block encryption mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat', @@ -1252,9 +3198,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' 'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB] as well as\n' '[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid', @@ -1267,9 +3213,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement( '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt`\n' 'function is not valid with the exception where such a mode is supported by the underlying\n' '[OpenSSL] implementation.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB', @@ -1281,9 +3227,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB', @@ -1295,9 +3241,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB', @@ -1309,9 +3255,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC', @@ -1323,9 +3269,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC', @@ -1337,9 +3283,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC', @@ -1351,9 +3297,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1', @@ -1365,9 +3311,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1', @@ -1379,9 +3325,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1', @@ -1393,9 +3339,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8', @@ -1407,9 +3353,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8', @@ -1421,9 +3367,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8', @@ -1435,9 +3381,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( description=( '[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128', @@ -1449,9 +3395,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128', @@ -1463,9 +3409,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128', @@ -1477,9 +3423,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requiremen description=( '[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB', @@ -1491,9 +3437,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB', @@ -1505,9 +3451,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB', @@ -1519,9 +3465,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM', @@ -1535,9 +3481,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 128 bit key.\n' 'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM', @@ -1551,9 +3497,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 192 bit key.\n' 'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM', @@ -1567,9 +3513,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM = Requirement( 'and [AES] algorithm SHALL use the [GCM] block mode encryption with a 256 bit key.\n' 'An [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' 'the [RFC5116].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR', @@ -1581,9 +3527,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-128-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR', @@ -1595,9 +3541,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-192-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR', @@ -1609,9 +3555,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR = Requirement( description=( '[ClickHouse] SHALL support `aes-256-ctr` as the value for the `mode` parameter of the `aes_decrypt` function\n' 'and [AES] algorithm SHALL use the [CTR] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector', @@ -1624,9 +3570,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector = Requirement( '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' 'parameter to the `aes_decrypt` function that SHALL specify the initialization vector for block modes that require\n' 'it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData', @@ -1639,9 +3585,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_AdditionalAuthenticatedData = Requirem '[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n' 'parameter to the `aes_decrypt` function that SHALL specify the additional authenticated data\n' 'for block modes that require it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue', @@ -1653,9 +3599,9 @@ RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue = Requirement( description=( '[ClickHouse] SHALL return the decrypted value of the data\n' 'using `String` data type as the result of `aes_decrypt` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError = Requirement( name='RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError', @@ -1666,9 +3612,9 @@ RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error if the `key` length is not exact for the `aes_decrypt` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement( name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError', @@ -1679,9 +3625,9 @@ RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError = uid=None, description=( '[ClickHouse] SHALL return an error if the `iv` is speficified and the length is not exact for the `aes_decrypt` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode = Requirement( name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode', @@ -1693,9 +3639,9 @@ RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode = Requiremen description=( '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt` function\n' 'for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode', @@ -1707,9 +3653,9 @@ RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode = Re description=( '[ClickHouse] SHALL return an error if the `aad` is specified for the `aes_decrypt` function\n' 'for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length', @@ -1720,9 +3666,9 @@ RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length = Requirement uid=None, description=( '[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `aes_decrypt` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length', @@ -1734,9 +3680,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ecb` and `key` is not 16 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length', @@ -1748,9 +3694,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ecb` and `key` is not 24 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length', @@ -1762,9 +3708,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ecb` and `key` is not 32 bytes\n' 'or `iv` or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length', @@ -1776,9 +3722,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cbc` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length', @@ -1790,9 +3736,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cbc` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length', @@ -1804,9 +3750,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cbc` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length', @@ -1818,9 +3764,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb1` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length', @@ -1832,9 +3778,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb1` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length', @@ -1846,9 +3792,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb1` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length', @@ -1860,9 +3806,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb8` and `key` is not 16 bytes\n' 'and if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length', @@ -1874,9 +3820,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb8` and `key` is not 24 bytes\n' 'or `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length', @@ -1888,9 +3834,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb8` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length', @@ -1902,9 +3848,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-cfb128` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length', @@ -1916,9 +3862,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-cfb128` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length', @@ -1930,9 +3876,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-cfb128` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length', @@ -1944,9 +3890,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ofb` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length', @@ -1958,9 +3904,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ofb` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length', @@ -1972,9 +3918,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ofb` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes or `aad` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-GCM.KeyAndInitializationVector.Length', @@ -1986,9 +3932,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-gcm` and `key` is not 16 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-GCM.KeyAndInitializationVector.Length', @@ -2000,9 +3946,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-gcm` and `key` is not 24 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-GCM.KeyAndInitializationVector.Length', @@ -2014,9 +3960,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_GCM_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-gcm` and `key` is not 32 bytes\n' 'or `iv` is not specified or is less than 8 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-128-CTR.KeyAndInitializationVector.Length', @@ -2028,9 +3974,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_128_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-128-ctr` and `key` is not 16 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-192-CTR.KeyAndInitializationVector.Length', @@ -2042,9 +3988,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_192_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-192-ctr` and `key` is not 24 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_Decrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.Decrypt.Function.AES-256-CTR.KeyAndInitializationVector.Length', @@ -2056,9 +4002,9 @@ RQ_SRS008_AES_Decrypt_Function_AES_256_CTR_KeyAndInitializationVector_Length = R description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt` function is set to `aes-256-ctr` and `key` is not 32 bytes\n' 'or if specified `iv` is not 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function', @@ -2069,9 +4015,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function = Requirement( uid=None, description=( '[ClickHouse] SHALL support `aes_encrypt_mysql` function to encrypt data using [AES].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Syntax = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax', @@ -2086,9 +4032,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Syntax = Requirement( '```sql\n' 'aes_encrypt_mysql(plaintext, key, mode, [iv])\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_PlainText = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText', @@ -2100,9 +4046,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_PlainText = Requirement( description=( '[ClickHouse] SHALL support `plaintext` accepting any data type as\n' 'the first parameter to the `aes_encrypt_mysql` function that SHALL specify the data to be encrypted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key', @@ -2114,9 +4060,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key = Requirement( description=( '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' 'as the second parameter to the `aes_encrypt_mysql` function that SHALL specify the encryption key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode', @@ -2128,9 +4074,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode = Requirement( description=( '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n' 'to the `aes_encrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat', @@ -2145,9 +4091,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' 'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid', @@ -2160,9 +4106,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt_mysql`\n' 'function is not valid with the exception where such a mode is supported by the underlying\n' '[OpenSSL] implementation.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-ECB', @@ -2174,9 +4120,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_ECB = Require description=( '[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-ECB', @@ -2188,9 +4134,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_ECB = Require description=( '[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-ECB', @@ -2202,9 +4148,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_ECB = Require description=( '[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CBC', @@ -2216,9 +4162,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CBC = Require description=( '[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CBC', @@ -2230,9 +4176,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CBC = Require description=( '[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CBC', @@ -2244,9 +4190,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CBC = Require description=( '[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB1', @@ -2258,9 +4204,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB1', @@ -2272,9 +4218,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB1', @@ -2286,9 +4232,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB8', @@ -2300,9 +4246,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB8', @@ -2314,9 +4260,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB8', @@ -2328,9 +4274,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CFB128', @@ -2342,9 +4288,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CFB128', @@ -2356,9 +4302,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CFB128', @@ -2370,9 +4316,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-OFB', @@ -2384,9 +4330,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_OFB = Require description=( '[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-OFB', @@ -2398,9 +4344,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_OFB = Require description=( '[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-OFB', @@ -2412,9 +4358,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_OFB = Require description=( '[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_encrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error', @@ -2426,9 +4372,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error', @@ -2440,9 +4386,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error', @@ -2454,9 +4400,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error', @@ -2468,9 +4414,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error', @@ -2482,9 +4428,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error', @@ -2496,9 +4442,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_InitializationVector = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector', @@ -2511,9 +4457,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_InitializationVector = Requireme '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' 'parameter to the `aes_encrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n' 'it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue', @@ -2525,9 +4471,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue = Requirement( description=( '[ClickHouse] SHALL return the encrypted value of the data\n' 'using `String` data type as the result of `aes_encrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooShortError = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError', @@ -2539,9 +4485,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooShortError = Requirement( description=( '[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_encrypt_mysql`\n' 'function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooLong = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong', @@ -2563,9 +4509,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooLong = Requirement( '\t\t folded_key[i % cipher_key_size] ^= key[i]\n' '\t return folded_key\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError', @@ -2577,9 +4523,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError = description=( '[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n' 'that is required for the `aes_encrypt_mysql` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooLong = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong', @@ -2591,9 +4537,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooLong = Requi description=( '[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n' 'its length is longer than required for the `aes_encrypt_mysql` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode', @@ -2605,9 +4551,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode = Requ description=( '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt_mysql`\n' 'function for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length', @@ -2619,9 +4565,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length', @@ -2633,9 +4579,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length', @@ -2647,9 +4593,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length', @@ -2661,9 +4607,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length', @@ -2675,9 +4621,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length', @@ -2689,9 +4635,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length', @@ -2703,9 +4649,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length', @@ -2717,9 +4663,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length', @@ -2731,9 +4677,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length', @@ -2745,9 +4691,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes\n' 'and if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length', @@ -2759,9 +4705,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length', @@ -2773,9 +4719,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length', @@ -2787,9 +4733,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length', @@ -2801,9 +4747,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length', @@ -2815,9 +4761,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length', @@ -2829,9 +4775,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_128_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length', @@ -2843,9 +4789,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_192_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Encrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length', @@ -2857,9 +4803,9 @@ RQ_SRS008_AES_MySQL_Encrypt_Function_AES_256_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_encrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function', @@ -2870,9 +4816,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function = Requirement( uid=None, description=( '[ClickHouse] SHALL support `aes_decrypt_mysql` function to decrypt data using [AES].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Syntax = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax', @@ -2887,9 +4833,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Syntax = Requirement( '```sql\n' 'aes_decrypt_mysql(ciphertext, key, mode, [iv])\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_CipherText = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText', @@ -2901,9 +4847,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_CipherText = Requirement( description=( '[ClickHouse] SHALL support `ciphertext` accepting any data type as\n' 'the first parameter to the `aes_decrypt_mysql` function that SHALL specify the data to be decrypted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key', @@ -2915,9 +4861,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key = Requirement( description=( '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' 'as the second parameter to the `aes_decrypt_mysql` function that SHALL specify the encryption key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode', @@ -2929,9 +4875,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode = Requirement( description=( '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n' 'to the `aes_decrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat', @@ -2946,9 +4892,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' 'mode and SHALL accept [ECB], [CBC], [CFB1], [CFB8], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid', @@ -2961,9 +4907,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt_mysql`\n' 'function is not valid with the exception where such a mode is supported by the underlying\n' '[OpenSSL] implementation.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-ECB', @@ -2975,9 +4921,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_ECB = Require description=( '[ClickHouse] SHALL support `aes-128-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-ECB', @@ -2989,9 +4935,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_ECB = Require description=( '[ClickHouse] SHALL support `aes-192-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-ECB', @@ -3003,9 +4949,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_ECB = Require description=( '[ClickHouse] SHALL support `aes-256-ecb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [ECB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CBC', @@ -3017,9 +4963,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CBC = Require description=( '[ClickHouse] SHALL support `aes-128-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CBC', @@ -3031,9 +4977,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CBC = Require description=( '[ClickHouse] SHALL support `aes-192-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CBC', @@ -3045,9 +4991,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CBC = Require description=( '[ClickHouse] SHALL support `aes-256-cbc` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CBC] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB1', @@ -3059,9 +5005,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-128-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB1', @@ -3073,9 +5019,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-192-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB1', @@ -3087,9 +5033,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB1 = Requir description=( '[ClickHouse] SHALL support `aes-256-cfb1` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB1] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB8', @@ -3101,9 +5047,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-128-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB8', @@ -3115,9 +5061,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-192-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB8', @@ -3129,9 +5075,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB8 = Requir description=( '[ClickHouse] SHALL support `aes-256-cfb8` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB8] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CFB128', @@ -3143,9 +5089,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-128-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CFB128', @@ -3157,9 +5103,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-192-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CFB128', @@ -3171,9 +5117,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CFB128 = Requ description=( '[ClickHouse] SHALL support `aes-256-cfb128` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [CFB128] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-OFB', @@ -3185,9 +5131,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_OFB = Require description=( '[ClickHouse] SHALL support `aes-128-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 128 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-OFB', @@ -3199,9 +5145,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_OFB = Require description=( '[ClickHouse] SHALL support `aes-192-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 192 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-OFB', @@ -3213,9 +5159,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_OFB = Require description=( '[ClickHouse] SHALL support `aes-256-ofb` as the value for the `mode` parameter of the `aes_decrypt_mysql` function\n' 'and [AES] algorithm SHALL use the [OFB] block mode encryption with a 256 bit key.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-GCM.Error', @@ -3227,9 +5173,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-128-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-GCM.Error', @@ -3241,9 +5187,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-192-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-GCM.Error', @@ -3255,9 +5201,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_GCM_Error = R description=( '[ClickHouse] SHALL return an error if `aes-256-gcm` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-128-CTR.Error', @@ -3269,9 +5215,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_128_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-128-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-192-CTR.Error', @@ -3283,9 +5229,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_192_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-192-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.AES-256-CTR.Error', @@ -3297,9 +5243,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_AES_256_CTR_Error = R description=( '[ClickHouse] SHALL return an error if `aes-256-ctr` is specified as the value for the `mode` parameter of the\n' '`aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_InitializationVector = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector', @@ -3312,9 +5258,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_InitializationVector = Requireme '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' 'parameter to the `aes_decrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n' 'it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue', @@ -3326,9 +5272,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue = Requirement( description=( '[ClickHouse] SHALL return the decrypted value of the data\n' 'using `String` data type as the result of `aes_decrypt_mysql` function.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooShortError = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError', @@ -3340,9 +5286,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooShortError = Requirement( description=( '[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_decrypt_mysql`\n' 'function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooLong = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong', @@ -3364,9 +5310,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooLong = Requirement( '\t\t folded_key[i % cipher_key_size] ^= key[i]\n' '\t return folded_key\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError', @@ -3378,9 +5324,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError = description=( '[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n' 'that is required for the `aes_decrypt_mysql` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong', @@ -3392,9 +5338,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong = Requi description=( '[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n' 'its length is longer than required for the `aes_decrypt_mysql` function for a given block mode.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode', @@ -3406,9 +5352,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode = Requ description=( '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt_mysql`\n' 'function for a mode that does not need it.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-ECB.KeyAndInitializationVector.Length', @@ -3420,9 +5366,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ecb` and `key` is less than 16 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-ECB.KeyAndInitializationVector.Length', @@ -3434,9 +5380,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ecb` and `key` is less than 24 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-ECB.KeyAndInitializationVector.Length', @@ -3448,9 +5394,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_ECB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ecb` and `key` is less than 32 bytes\n' 'or `iv` is specified.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CBC.KeyAndInitializationVector.Length', @@ -3462,9 +5408,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cbc` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CBC.KeyAndInitializationVector.Length', @@ -3476,9 +5422,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cbc` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CBC.KeyAndInitializationVector.Length', @@ -3490,9 +5436,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CBC_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cbc` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB1.KeyAndInitializationVector.Length', @@ -3504,9 +5450,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb1` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB1.KeyAndInitializationVector.Length', @@ -3518,9 +5464,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb1` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB1.KeyAndInitializationVector.Length', @@ -3532,9 +5478,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB1_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb1` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB8.KeyAndInitializationVector.Length', @@ -3546,9 +5492,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb8` and `key` is less than 16 bytes\n' 'and if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB8.KeyAndInitializationVector.Length', @@ -3560,9 +5506,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb8` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB8.KeyAndInitializationVector.Length', @@ -3574,9 +5520,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB8_KeyAndInitializationVector_Len description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb8` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-CFB128.KeyAndInitializationVector.Length', @@ -3588,9 +5534,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-cfb128` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-CFB128.KeyAndInitializationVector.Length', @@ -3602,9 +5548,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-cfb128` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-CFB128.KeyAndInitializationVector.Length', @@ -3616,9 +5562,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_CFB128_KeyAndInitializationVector_L description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-cfb128` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-128-OFB.KeyAndInitializationVector.Length', @@ -3630,9 +5576,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_128_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-128-ofb` and `key` is less than 16 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-192-OFB.KeyAndInitializationVector.Length', @@ -3644,9 +5590,9 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_192_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-192-ofb` and `key` is less than 24 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Length = Requirement( name='RQ.SRS008.AES.MySQL.Decrypt.Function.AES-256-OFB.KeyAndInitializationVector.Length', @@ -3658,6 +5604,6 @@ RQ_SRS008_AES_MySQL_Decrypt_Function_AES_256_OFB_KeyAndInitializationVector_Leng description=( '[ClickHouse] SHALL return an error when `mode` for the `aes_decrypt_mysql` function is set to `aes-256-ofb` and `key` is less than 32 bytes\n' 'or if specified `iv` is less than 16 bytes.\n' + '\n' ), - link=None - ) + link=None) diff --git a/tests/testflows/ldap/authentication/requirements/requirements.md b/tests/testflows/ldap/authentication/requirements/requirements.md index d322db70330..1c65a29fef4 100644 --- a/tests/testflows/ldap/authentication/requirements/requirements.md +++ b/tests/testflows/ldap/authentication/requirements/requirements.md @@ -460,14 +460,14 @@ time user configuration contains any of the `` entries. #### RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined version: 1.0 -[ClickHouse] SHALL throw an error during any authentification attempt +[ClickHouse] SHALL throw an error during any authentication attempt if the name of the [LDAP] server used inside the `` entry is not defined in the `` section. #### RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty version: 1.0 -[ClickHouse] SHALL throw an error during any authentification attempt +[ClickHouse] SHALL throw an error during any authentication attempt if the name of the [LDAP] server used inside the `` entry is empty. diff --git a/tests/testflows/ldap/authentication/requirements/requirements.py b/tests/testflows/ldap/authentication/requirements/requirements.py index 967e097d1fa..60fbef9b8cd 100644 --- a/tests/testflows/ldap/authentication/requirements/requirements.py +++ b/tests/testflows/ldap/authentication/requirements/requirements.py @@ -1,10 +1,571 @@ # These requirements were auto generated # from software requirements specification (SRS) -# document by TestFlows v1.6.200811.1124123. +# document by TestFlows v1.6.201026.1232822. # Do not edit by hand but re-generate instead # using 'tfs requirements generate' command. +from testflows.core import Specification from testflows.core import Requirement +SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( + name='SRS-007 ClickHouse Authentication of Users via LDAP', + description=None, + author=None, + date=None, + status=None, + approved_by=None, + approved_date=None, + approved_version=None, + version=None, + group=None, + type=None, + link=None, + uid=None, + parent=None, + children=None, + content=''' +# SRS-007 ClickHouse Authentication of Users via LDAP + +## Table of Contents + +* 1 [Revision History](#revision-history) +* 2 [Introduction](#introduction) +* 3 [Terminology](#terminology) +* 4 [Requirements](#requirements) + * 4.1 [Generic](#generic) + * 4.1.1 [RQ.SRS-007.LDAP.Authentication](#rqsrs-007ldapauthentication) + * 4.1.2 [RQ.SRS-007.LDAP.Authentication.MultipleServers](#rqsrs-007ldapauthenticationmultipleservers) + * 4.1.3 [RQ.SRS-007.LDAP.Authentication.Protocol.PlainText](#rqsrs-007ldapauthenticationprotocolplaintext) + * 4.1.4 [RQ.SRS-007.LDAP.Authentication.Protocol.TLS](#rqsrs-007ldapauthenticationprotocoltls) + * 4.1.5 [RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS](#rqsrs-007ldapauthenticationprotocolstarttls) + * 4.1.6 [RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation](#rqsrs-007ldapauthenticationtlscertificatevalidation) + * 4.1.7 [RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned](#rqsrs-007ldapauthenticationtlscertificateselfsigned) + * 4.1.8 [RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority](#rqsrs-007ldapauthenticationtlscertificatespecificcertificationauthority) + * 4.1.9 [RQ.SRS-007.LDAP.Server.Configuration.Invalid](#rqsrs-007ldapserverconfigurationinvalid) + * 4.1.10 [RQ.SRS-007.LDAP.User.Configuration.Invalid](#rqsrs-007ldapuserconfigurationinvalid) + * 4.1.11 [RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous](#rqsrs-007ldapauthenticationmechanismanonymous) + * 4.1.12 [RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated](#rqsrs-007ldapauthenticationmechanismunauthenticated) + * 4.1.13 [RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword](#rqsrs-007ldapauthenticationmechanismnamepassword) + * 4.1.14 [RQ.SRS-007.LDAP.Authentication.Valid](#rqsrs-007ldapauthenticationvalid) + * 4.1.15 [RQ.SRS-007.LDAP.Authentication.Invalid](#rqsrs-007ldapauthenticationinvalid) + * 4.1.16 [RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser](#rqsrs-007ldapauthenticationinvaliddeleteduser) + * 4.1.17 [RQ.SRS-007.LDAP.Authentication.UsernameChanged](#rqsrs-007ldapauthenticationusernamechanged) + * 4.1.18 [RQ.SRS-007.LDAP.Authentication.PasswordChanged](#rqsrs-007ldapauthenticationpasswordchanged) + * 4.1.19 [RQ.SRS-007.LDAP.Authentication.LDAPServerRestart](#rqsrs-007ldapauthenticationldapserverrestart) + * 4.1.20 [RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart](#rqsrs-007ldapauthenticationclickhouseserverrestart) + * 4.1.21 [RQ.SRS-007.LDAP.Authentication.Parallel](#rqsrs-007ldapauthenticationparallel) + * 4.1.22 [RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid](#rqsrs-007ldapauthenticationparallelvalidandinvalid) + * 4.2 [Specific](#specific) + * 4.2.1 [RQ.SRS-007.LDAP.UnreachableServer](#rqsrs-007ldapunreachableserver) + * 4.2.2 [RQ.SRS-007.LDAP.Configuration.Server.Name](#rqsrs-007ldapconfigurationservername) + * 4.2.3 [RQ.SRS-007.LDAP.Configuration.Server.Host](#rqsrs-007ldapconfigurationserverhost) + * 4.2.4 [RQ.SRS-007.LDAP.Configuration.Server.Port](#rqsrs-007ldapconfigurationserverport) + * 4.2.5 [RQ.SRS-007.LDAP.Configuration.Server.Port.Default](#rqsrs-007ldapconfigurationserverportdefault) + * 4.2.6 [RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix](#rqsrs-007ldapconfigurationserverauthdnprefix) + * 4.2.7 [RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix](#rqsrs-007ldapconfigurationserverauthdnsuffix) + * 4.2.8 [RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value](#rqsrs-007ldapconfigurationserverauthdnvalue) + * 4.2.9 [RQ.SRS-007.LDAP.Configuration.Server.EnableTLS](#rqsrs-007ldapconfigurationserverenabletls) + * 4.2.10 [RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default](#rqsrs-007ldapconfigurationserverenabletlsoptionsdefault) + * 4.2.11 [RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No](#rqsrs-007ldapconfigurationserverenabletlsoptionsno) + * 4.2.12 [RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes](#rqsrs-007ldapconfigurationserverenabletlsoptionsyes) + * 4.2.13 [RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS](#rqsrs-007ldapconfigurationserverenabletlsoptionsstarttls) + * 4.2.14 [RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion](#rqsrs-007ldapconfigurationservertlsminimumprotocolversion) + * 4.2.15 [RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values](#rqsrs-007ldapconfigurationservertlsminimumprotocolversionvalues) + * 4.2.16 [RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default](#rqsrs-007ldapconfigurationservertlsminimumprotocolversiondefault) + * 4.2.17 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert](#rqsrs-007ldapconfigurationservertlsrequirecert) + * 4.2.18 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default](#rqsrs-007ldapconfigurationservertlsrequirecertoptionsdefault) + * 4.2.19 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand](#rqsrs-007ldapconfigurationservertlsrequirecertoptionsdemand) + * 4.2.20 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow](#rqsrs-007ldapconfigurationservertlsrequirecertoptionsallow) + * 4.2.21 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try](#rqsrs-007ldapconfigurationservertlsrequirecertoptionstry) + * 4.2.22 [RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never](#rqsrs-007ldapconfigurationservertlsrequirecertoptionsnever) + * 4.2.23 [RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile](#rqsrs-007ldapconfigurationservertlscertfile) + * 4.2.24 [RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile](#rqsrs-007ldapconfigurationservertlskeyfile) + * 4.2.25 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir](#rqsrs-007ldapconfigurationservertlscacertdir) + * 4.2.26 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile](#rqsrs-007ldapconfigurationservertlscacertfile) + * 4.2.27 [RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite](#rqsrs-007ldapconfigurationservertlsciphersuite) + * 4.2.28 [RQ.SRS-007.LDAP.Configuration.Server.Syntax](#rqsrs-007ldapconfigurationserversyntax) + * 4.2.29 [RQ.SRS-007.LDAP.Configuration.User.RBAC](#rqsrs-007ldapconfigurationuserrbac) + * 4.2.30 [RQ.SRS-007.LDAP.Configuration.User.Syntax](#rqsrs-007ldapconfigurationusersyntax) + * 4.2.31 [RQ.SRS-007.LDAP.Configuration.User.Name.Empty](#rqsrs-007ldapconfigurationusernameempty) + * 4.2.32 [RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP](#rqsrs-007ldapconfigurationuserbothpasswordandldap) + * 4.2.33 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined](#rqsrs-007ldapconfigurationuserldapinvalidservernamenotdefined) + * 4.2.34 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty](#rqsrs-007ldapconfigurationuserldapinvalidservernameempty) + * 4.2.35 [RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer](#rqsrs-007ldapconfigurationuseronlyoneserver) + * 4.2.36 [RQ.SRS-007.LDAP.Configuration.User.Name.Long](#rqsrs-007ldapconfigurationusernamelong) + * 4.2.37 [RQ.SRS-007.LDAP.Configuration.User.Name.UTF8](#rqsrs-007ldapconfigurationusernameutf8) + * 4.2.38 [RQ.SRS-007.LDAP.Authentication.Username.Empty](#rqsrs-007ldapauthenticationusernameempty) + * 4.2.39 [RQ.SRS-007.LDAP.Authentication.Username.Long](#rqsrs-007ldapauthenticationusernamelong) + * 4.2.40 [RQ.SRS-007.LDAP.Authentication.Username.UTF8](#rqsrs-007ldapauthenticationusernameutf8) + * 4.2.41 [RQ.SRS-007.LDAP.Authentication.Password.Empty](#rqsrs-007ldapauthenticationpasswordempty) + * 4.2.42 [RQ.SRS-007.LDAP.Authentication.Password.Long](#rqsrs-007ldapauthenticationpasswordlong) + * 4.2.43 [RQ.SRS-007.LDAP.Authentication.Password.UTF8](#rqsrs-007ldapauthenticationpasswordutf8) +* 5 [References](#references) + +## Revision History + +This document is stored in an electronic form using [Git] source control management software +hosted in a [GitHub Repository]. +All the updates are tracked using the [Git]'s [Revision History]. + +## Introduction + +[ClickHouse] currently does not have any integration with [LDAP]. +As the initial step in integrating with [LDAP] this software requirements specification covers +only the requirements to enable authentication of users using an [LDAP] server. + +## Terminology + +* **CA** - + Certificate Authority ([CA]) + +* **LDAP** - + Lightweight Directory Access Protocol ([LDAP]) + +## Requirements + +### Generic + +#### RQ.SRS-007.LDAP.Authentication +version: 1.0 + +[ClickHouse] SHALL support user authentication via an [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.MultipleServers +version: 1.0 + +[ClickHouse] SHALL support specifying multiple [LDAP] servers that can be used to authenticate +users. + +#### RQ.SRS-007.LDAP.Authentication.Protocol.PlainText +version: 1.0 + +[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol. + +#### RQ.SRS-007.LDAP.Authentication.Protocol.TLS +version: 1.0 + +[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol. + +#### RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS +version: 1.0 + +[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a +plain text `ldap://` protocol that is upgraded to [TLS]. + +#### RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation +version: 1.0 + +[ClickHouse] SHALL support certificate validation used for [TLS] connections. + +#### RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned +version: 1.0 + +[ClickHouse] SHALL support self-signed certificates for [TLS] connections. + +#### RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority +version: 1.0 + +[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections. + +#### RQ.SRS-007.LDAP.Server.Configuration.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid. + +#### RQ.SRS-007.LDAP.User.Configuration.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit user login if user configuration is not valid. + +#### RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind] +authentication mechanism. + +#### RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind] +authentication mechanism. + +#### RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword +version: 1.0 + +[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind] +authentication mechanism. + +#### RQ.SRS-007.LDAP.Authentication.Valid +version: 1.0 + +[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if +user name and password match [LDAP] server records for the user. + +#### RQ.SRS-007.LDAP.Authentication.Invalid +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication if either user name or password +do not match [LDAP] server records for the user. + +#### RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication if the user +has been deleted from the [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.UsernameChanged +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication if the username is changed +on the [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.PasswordChanged +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit authentication if the password +for the user is changed on the [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.LDAPServerRestart +version: 1.0 + +[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted. + +#### RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart +version: 1.0 + +[ClickHouse] SHALL support authenticating users after server is restarted. + +#### RQ.SRS-007.LDAP.Authentication.Parallel +version: 1.0 + +[ClickHouse] SHALL support parallel authentication of users using [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid +version: 1.0 + +[ClickHouse] SHALL support authentication of valid users and +prohibit authentication of invalid users using [LDAP] server +in parallel without having invalid attempts affecting valid authentications. + +### Specific + +#### RQ.SRS-007.LDAP.UnreachableServer +version: 1.0 + +[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable. + +#### RQ.SRS-007.LDAP.Configuration.Server.Name +version: 1.0 + +[ClickHouse] SHALL not support empty string as a server name. + +#### RQ.SRS-007.LDAP.Configuration.Server.Host +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify [LDAP] +server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty. + +#### RQ.SRS-007.LDAP.Configuration.Server.Port +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify [LDAP] server port. + +#### RQ.SRS-007.LDAP.Configuration.Server.Port.Default +version: 1.0 + +[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise. + +#### RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify the prefix +of value used to construct the DN to bound to during authentication via [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify the suffix +of value used to construct the DN to bound to during authentication via [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value +version: 1.0 + +[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string. + +> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character. + +#### RQ.SRS-007.LDAP.Configuration.Server.EnableTLS +version: 1.0 + +[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default +version: 1.0 + +[ClickHouse] SHALL use `yes` value as the default for `` parameter +to enable SSL/TLS `ldaps://` protocol. + +#### RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No +version: 1.0 + +[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable +plain text `ldap://` protocol. + +#### RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes +version: 1.0 + +[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable +SSL/TLS `ldaps://` protocol. + +#### RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS +version: 1.0 + +[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable +legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS]. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify +the minimum protocol version of SSL/TLS. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values +version: 1.0 + +[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2` +as a value of the `` parameter. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default +version: 1.0 + +[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify [TLS] peer +certificate verification behavior. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default +version: 1.0 + +[ClickHouse] SHALL use `demand` value as the default for the `` parameter. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand +version: 1.0 + +[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to +enable requesting of client certificate. If no certificate is provided, or a bad certificate is +provided, the session SHALL be immediately terminated. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow +version: 1.0 + +[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to +enable requesting of client certificate. If no +certificate is provided, the session SHALL proceed normally. +If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try +version: 1.0 + +[ClickHouse] SHALL support specifying `try` as the value of `` parameter to +enable requesting of client certificate. If no certificate is provided, the session +SHALL proceed normally. If a bad certificate is provided, the session SHALL be +immediately terminated. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never +version: 1.0 + +[ClickHouse] SHALL support specifying `never` as the value of `` parameter to +disable requesting of client certificate. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile +version: 1.0 + +[ClickHouse] SHALL support `` to specify the path to certificate file used by +[ClickHouse] to establish connection with the [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile +version: 1.0 + +[ClickHouse] SHALL support `` to specify the path to key file for the certificate +specified by the `` parameter. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify to a path to +the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile +version: 1.0 + +[ClickHouse] SHALL support `` parameter to specify a path to a specific +[CA] certificate file used to verify certificates provided by the [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite +version: 1.0 + +[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites. +The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers]. + +For example, + +```xml +ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 +``` + +The available suites SHALL depend on the [OpenSSL] library version and variant used to build +[ClickHouse] and therefore might change. + +#### RQ.SRS-007.LDAP.Configuration.Server.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml` +configuration file or of any configuration file inside the `config.d` directory. + +```xml + + + localhost + 636 + cn= + , ou=users, dc=example, dc=com + yes + tls1.2 + demand + /path/to/tls_cert_file + /path/to/tls_key_file + /path/to/tls_ca_cert_file + /path/to/tls_ca_cert_dir + ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384 + + +``` + +#### RQ.SRS-007.LDAP.Configuration.User.RBAC +version: 1.0 + +[ClickHouse] SHALL support creating users identified using an [LDAP] server using +the following RBAC command + +```sql +CREATE USER name IDENTIFIED WITH ldap_server BY 'server_name' +``` + +#### RQ.SRS-007.LDAP.Configuration.User.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following example syntax to create a user that is authenticated using +an [LDAP] server inside the `users.xml` file or any configuration file inside the `users.d` directory. + +```xml + + + + + my_ldap_server + + + + +``` + +#### RQ.SRS-007.LDAP.Configuration.User.Name.Empty +version: 1.0 + +[ClickHouse] SHALL not support empty string as a user name. + +#### RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP +version: 1.0 + +[ClickHouse] SHALL throw an error if `` is specified for the user and at the same +time user configuration contains any of the `` entries. + +#### RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined +version: 1.0 + +[ClickHouse] SHALL throw an error during any authentication attempt +if the name of the [LDAP] server used inside the `` entry +is not defined in the `` section. + +#### RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty +version: 1.0 + +[ClickHouse] SHALL throw an error during any authentication attempt +if the name of the [LDAP] server used inside the `` entry +is empty. + +#### RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer +version: 1.0 + +[ClickHouse] SHALL support specifying only one [LDAP] server for a given user. + +#### RQ.SRS-007.LDAP.Configuration.User.Name.Long +version: 1.0 + +[ClickHouse] SHALL support long user names of at least 256 bytes +to specify users that can be authenticated using an [LDAP] server. + +#### RQ.SRS-007.LDAP.Configuration.User.Name.UTF8 +version: 1.0 + +[ClickHouse] SHALL support user names that contain [UTF-8] characters. + +#### RQ.SRS-007.LDAP.Authentication.Username.Empty +version: 1.0 + +[ClickHouse] SHALL not support authenticating users with empty username. + +#### RQ.SRS-007.LDAP.Authentication.Username.Long +version: 1.0 + +[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes. + +#### RQ.SRS-007.LDAP.Authentication.Username.UTF8 +version: 1.0 + +[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters. + +#### RQ.SRS-007.LDAP.Authentication.Password.Empty +version: 1.0 + +[ClickHouse] SHALL not support authenticating users with empty passwords +even if an empty password is valid for the user and +is allowed by the [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.Password.Long +version: 1.0 + +[ClickHouse] SHALL support long password of at least 256 bytes +that can be used to authenticate users using an [LDAP] server. + +#### RQ.SRS-007.LDAP.Authentication.Password.UTF8 +version: 1.0 + +[ClickHouse] SHALL support [UTF-8] characters in passwords +used to authenticate users using an [LDAP] server. + +## References + +* **ClickHouse:** https://clickhouse.tech + +[Anonymous Authentication Mechanism of Simple Bind]: https://ldapwiki.com/wiki/Simple%20Authentication#section-Simple+Authentication-AnonymousAuthenticationMechanismOfSimpleBind +[Unauthenticated Authentication Mechanism of Simple Bind]: https://ldapwiki.com/wiki/Simple%20Authentication#section-Simple+Authentication-UnauthenticatedAuthenticationMechanismOfSimpleBind +[Name/Password Authentication Mechanism of Simple Bind]: https://ldapwiki.com/wiki/Simple%20Authentication#section-Simple+Authentication-NamePasswordAuthenticationMechanismOfSimpleBind +[UTF-8]: https://en.wikipedia.org/wiki/UTF-8 +[OpenSSL]: https://www.openssl.org/ +[OpenSSL Ciphers]: https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html +[CA]: https://en.wikipedia.org/wiki/Certificate_authority +[TLS]: https://en.wikipedia.org/wiki/Transport_Layer_Security +[LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol +[ClickHouse]: https://clickhouse.tech +[GitHub]: https://github.com +[GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/ldap/authentication/requirements/requirements.md +[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/ldap/authentication/requirements/requirements.md +[Git]: https://git-scm.com/ +''') + RQ_SRS_007_LDAP_Authentication = Requirement( name='RQ.SRS-007.LDAP.Authentication', version='1.0', @@ -14,9 +575,9 @@ RQ_SRS_007_LDAP_Authentication = Requirement( uid=None, description=( '[ClickHouse] SHALL support user authentication via an [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_MultipleServers = Requirement( name='RQ.SRS-007.LDAP.Authentication.MultipleServers', @@ -28,9 +589,9 @@ RQ_SRS_007_LDAP_Authentication_MultipleServers = Requirement( description=( '[ClickHouse] SHALL support specifying multiple [LDAP] servers that can be used to authenticate\n' 'users.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Protocol_PlainText = Requirement( name='RQ.SRS-007.LDAP.Authentication.Protocol.PlainText', @@ -41,9 +602,9 @@ RQ_SRS_007_LDAP_Authentication_Protocol_PlainText = Requirement( uid=None, description=( '[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Protocol_TLS = Requirement( name='RQ.SRS-007.LDAP.Authentication.Protocol.TLS', @@ -54,9 +615,9 @@ RQ_SRS_007_LDAP_Authentication_Protocol_TLS = Requirement( uid=None, description=( '[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS = Requirement( name='RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS', @@ -68,9 +629,9 @@ RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS = Requirement( description=( '[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n' 'plain text `ldap://` protocol that is upgraded to [TLS].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_Validation = Requirement( name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation', @@ -81,9 +642,9 @@ RQ_SRS_007_LDAP_Authentication_TLS_Certificate_Validation = Requirement( uid=None, description=( '[ClickHouse] SHALL support certificate validation used for [TLS] connections.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SelfSigned = Requirement( name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned', @@ -94,9 +655,9 @@ RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SelfSigned = Requirement( uid=None, description=( '[ClickHouse] SHALL support self-signed certificates for [TLS] connections.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SpecificCertificationAuthority = Requirement( name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority', @@ -107,9 +668,9 @@ RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SpecificCertificationAuthority = uid=None, description=( '[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Server_Configuration_Invalid = Requirement( name='RQ.SRS-007.LDAP.Server.Configuration.Invalid', @@ -120,9 +681,9 @@ RQ_SRS_007_LDAP_Server_Configuration_Invalid = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_User_Configuration_Invalid = Requirement( name='RQ.SRS-007.LDAP.User.Configuration.Invalid', @@ -133,9 +694,9 @@ RQ_SRS_007_LDAP_User_Configuration_Invalid = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error and prohibit user login if user configuration is not valid.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Mechanism_Anonymous = Requirement( name='RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous', @@ -147,9 +708,9 @@ RQ_SRS_007_LDAP_Authentication_Mechanism_Anonymous = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n' 'authentication mechanism.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Mechanism_Unauthenticated = Requirement( name='RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated', @@ -161,9 +722,9 @@ RQ_SRS_007_LDAP_Authentication_Mechanism_Unauthenticated = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n' 'authentication mechanism.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword = Requirement( name='RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword', @@ -175,9 +736,9 @@ RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword = Requirement( description=( '[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n' 'authentication mechanism.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Valid = Requirement( name='RQ.SRS-007.LDAP.Authentication.Valid', @@ -189,9 +750,9 @@ RQ_SRS_007_LDAP_Authentication_Valid = Requirement( description=( '[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n' 'user name and password match [LDAP] server records for the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Invalid = Requirement( name='RQ.SRS-007.LDAP.Authentication.Invalid', @@ -203,9 +764,9 @@ RQ_SRS_007_LDAP_Authentication_Invalid = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n' 'do not match [LDAP] server records for the user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser = Requirement( name='RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser', @@ -217,9 +778,9 @@ RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication if the user\n' 'has been deleted from the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_UsernameChanged = Requirement( name='RQ.SRS-007.LDAP.Authentication.UsernameChanged', @@ -231,9 +792,9 @@ RQ_SRS_007_LDAP_Authentication_UsernameChanged = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n' 'on the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_PasswordChanged = Requirement( name='RQ.SRS-007.LDAP.Authentication.PasswordChanged', @@ -245,9 +806,9 @@ RQ_SRS_007_LDAP_Authentication_PasswordChanged = Requirement( description=( '[ClickHouse] SHALL return an error and prohibit authentication if the password\n' 'for the user is changed on the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_LDAPServerRestart = Requirement( name='RQ.SRS-007.LDAP.Authentication.LDAPServerRestart', @@ -258,9 +819,9 @@ RQ_SRS_007_LDAP_Authentication_LDAPServerRestart = Requirement( uid=None, description=( '[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart = Requirement( name='RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart', @@ -271,9 +832,9 @@ RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart = Requirement( uid=None, description=( '[ClickHouse] SHALL support authenticating users after server is restarted.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Parallel = Requirement( name='RQ.SRS-007.LDAP.Authentication.Parallel', @@ -284,9 +845,9 @@ RQ_SRS_007_LDAP_Authentication_Parallel = Requirement( uid=None, description=( '[ClickHouse] SHALL support parallel authentication of users using [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid = Requirement( name='RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid', @@ -299,9 +860,9 @@ RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid = Requirement( '[ClickHouse] SHALL support authentication of valid users and\n' 'prohibit authentication of invalid users using [LDAP] server\n' 'in parallel without having invalid attempts affecting valid authentications.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_UnreachableServer = Requirement( name='RQ.SRS-007.LDAP.UnreachableServer', @@ -312,9 +873,9 @@ RQ_SRS_007_LDAP_UnreachableServer = Requirement( uid=None, description=( '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_Name = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Name', @@ -325,9 +886,9 @@ RQ_SRS_007_LDAP_Configuration_Server_Name = Requirement( uid=None, description=( '[ClickHouse] SHALL not support empty string as a server name.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_Host = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Host', @@ -339,9 +900,9 @@ RQ_SRS_007_LDAP_Configuration_Server_Host = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify [LDAP]\n' 'server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_Port = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Port', @@ -352,9 +913,9 @@ RQ_SRS_007_LDAP_Configuration_Server_Port = Requirement( uid=None, description=( '[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_Port_Default = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Port.Default', @@ -365,9 +926,9 @@ RQ_SRS_007_LDAP_Configuration_Server_Port_Default = Requirement( uid=None, description=( '[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix', @@ -379,9 +940,9 @@ RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify the prefix\n' 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix', @@ -393,9 +954,9 @@ RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify the suffix\n' 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value', @@ -408,9 +969,9 @@ RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value = Requirement( '[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n' '\n' "> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character.\n" + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS', @@ -421,9 +982,9 @@ RQ_SRS_007_LDAP_Configuration_Server_EnableTLS = Requirement( uid=None, description=( '[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default', @@ -435,9 +996,9 @@ RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default = Requirement( description=( '[ClickHouse] SHALL use `yes` value as the default for `` parameter\n' 'to enable SSL/TLS `ldaps://` protocol.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No', @@ -449,9 +1010,9 @@ RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No = Requirement( description=( '[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable\n' 'plain text `ldap://` protocol.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes', @@ -463,9 +1024,9 @@ RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes = Requirement( description=( '[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable\n' 'SSL/TLS `ldaps://` protocol.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS', @@ -477,9 +1038,9 @@ RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS = Requirement( description=( '[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable\n' 'legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion', @@ -491,9 +1052,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify\n' 'the minimum protocol version of SSL/TLS.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values', @@ -505,9 +1066,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirem description=( '[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n' 'as a value of the `` parameter.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default', @@ -518,9 +1079,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default = Require uid=None, description=( '[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert', @@ -532,9 +1093,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify [TLS] peer\n' 'certificate verification behavior.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default', @@ -545,9 +1106,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default = Requiremen uid=None, description=( '[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand', @@ -560,9 +1121,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand = Requirement '[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n' 'enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n' 'provided, the session SHALL be immediately terminated.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow', @@ -576,9 +1137,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow = Requirement( 'enable requesting of client certificate. If no\n' 'certificate is provided, the session SHALL proceed normally.\n' 'If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try', @@ -592,9 +1153,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try = Requirement( 'enable requesting of client certificate. If no certificate is provided, the session\n' 'SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n' 'immediately terminated.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never', @@ -606,9 +1167,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never = Requirement( description=( '[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n' 'disable requesting of client certificate.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSCertFile = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile', @@ -620,9 +1181,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSCertFile = Requirement( description=( '[ClickHouse] SHALL support `` to specify the path to certificate file used by\n' '[ClickHouse] to establish connection with the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSKeyFile = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile', @@ -634,9 +1195,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSKeyFile = Requirement( description=( '[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n' 'specified by the `` parameter.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSCACertDir = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir', @@ -648,9 +1209,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSCACertDir = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify to a path to\n' 'the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSCACertFile = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile', @@ -662,9 +1223,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSCACertFile = Requirement( description=( '[ClickHouse] SHALL support `` parameter to specify a path to a specific\n' '[CA] certificate file used to verify certificates provided by the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite', @@ -685,9 +1246,9 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite = Requirement( '\n' 'The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n' '[ClickHouse] and therefore might change.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', @@ -718,9 +1279,9 @@ RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( ' \n' '\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_RBAC = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.RBAC', @@ -736,9 +1297,9 @@ RQ_SRS_007_LDAP_Configuration_User_RBAC = Requirement( '```sql\n' "CREATE USER name IDENTIFIED WITH ldap_server BY 'server_name'\n" '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Syntax', @@ -762,9 +1323,9 @@ RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( ' \n' '\n' '```\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', @@ -775,9 +1336,9 @@ RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( uid=None, description=( '[ClickHouse] SHALL not support empty string as a user name.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', @@ -789,9 +1350,9 @@ RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( description=( '[ClickHouse] SHALL throw an error if `` is specified for the user and at the same\n' 'time user configuration contains any of the `` entries.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', @@ -801,12 +1362,12 @@ RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requireme type=None, uid=None, description=( - '[ClickHouse] SHALL throw an error during any authentification attempt\n' + '[ClickHouse] SHALL throw an error during any authentication attempt\n' 'if the name of the [LDAP] server used inside the `` entry\n' 'is not defined in the `` section.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', @@ -816,12 +1377,12 @@ RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( type=None, uid=None, description=( - '[ClickHouse] SHALL throw an error during any authentification attempt\n' + '[ClickHouse] SHALL throw an error during any authentication attempt\n' 'if the name of the [LDAP] server used inside the `` entry\n' 'is empty.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', @@ -832,9 +1393,9 @@ RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( uid=None, description=( '[ClickHouse] SHALL support specifying only one [LDAP] server for a given user.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', @@ -846,9 +1407,9 @@ RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( description=( '[ClickHouse] SHALL support long user names of at least 256 bytes\n' 'to specify users that can be authenticated using an [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', @@ -859,9 +1420,9 @@ RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( uid=None, description=( '[ClickHouse] SHALL support user names that contain [UTF-8] characters.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.Empty', @@ -872,9 +1433,9 @@ RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( uid=None, description=( '[ClickHouse] SHALL not support authenticating users with empty username.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.Long', @@ -885,9 +1446,9 @@ RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( uid=None, description=( '[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', @@ -898,9 +1459,9 @@ RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( uid=None, description=( '[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.Empty', @@ -913,9 +1474,9 @@ RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( '[ClickHouse] SHALL not support authenticating users with empty passwords\n' 'even if an empty password is valid for the user and\n' 'is allowed by the [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.Long', @@ -927,9 +1488,9 @@ RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( description=( '[ClickHouse] SHALL support long password of at least 256 bytes\n' 'that can be used to authenticate users using an [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', @@ -941,6 +1502,6 @@ RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( description=( '[ClickHouse] SHALL support [UTF-8] characters in passwords\n' 'used to authenticate users using an [LDAP] server.\n' + '\n' ), - link=None - ) + link=None) From eee81eef43610da23127c14fbf0d2a28bc936df2 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 01:58:53 +0300 Subject: [PATCH 350/432] Remove dependency on "services" database --- contrib/mariadb-connector-c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c index f5638e954a7..1485b0de3ea 160000 --- a/contrib/mariadb-connector-c +++ b/contrib/mariadb-connector-c @@ -1 +1 @@ -Subproject commit f5638e954a79f50bac7c7a5deaa5a241e0ce8b5f +Subproject commit 1485b0de3eaa1508dfe49a5ba1e4aa2a71fd8335 From 309e1c622f2a6dfcf797d0fa369d6416c804dc15 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 03:13:59 +0300 Subject: [PATCH 351/432] Added a test for #11364 --- .../0_stateless/01534_lambda_array_join.reference | 3 +++ .../queries/0_stateless/01534_lambda_array_join.sql | 12 ++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 tests/queries/0_stateless/01534_lambda_array_join.reference create mode 100644 tests/queries/0_stateless/01534_lambda_array_join.sql diff --git a/tests/queries/0_stateless/01534_lambda_array_join.reference b/tests/queries/0_stateless/01534_lambda_array_join.reference new file mode 100644 index 00000000000..78e098be306 --- /dev/null +++ b/tests/queries/0_stateless/01534_lambda_array_join.reference @@ -0,0 +1,3 @@ +[NULL] +[1,1] +\N 70 diff --git a/tests/queries/0_stateless/01534_lambda_array_join.sql b/tests/queries/0_stateless/01534_lambda_array_join.sql new file mode 100644 index 00000000000..aee9dd1411a --- /dev/null +++ b/tests/queries/0_stateless/01534_lambda_array_join.sql @@ -0,0 +1,12 @@ +SELECT arrayMap(x -> concat(x, concat(arrayJoin([1]), x, NULL), ''), [1]); +SELECT arrayMap(x -> arrayJoin([1]), [1, 2]); + +SELECT + arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomString(range(randomString(1048577), NULL), arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomString(range(NULL), 65537), 255)), range(1))), substring(randomString(NULL), x + 7), '257'), 1025)), range(7))) AS byte, + count() AS c + FROM numbers(10) + GROUP BY + arrayMap(x -> reinterpretAsUInt8(substring(randomString(randomString(range(randomString(255), NULL)), NULL))), range(3)), + randomString(range(randomString(1048577), NULL), NULL), + byte + ORDER BY byte ASC; From 05099bebcf77a6c5ea958ad552ac11d07b2955e1 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 28 Oct 2020 02:50:27 +0300 Subject: [PATCH 352/432] fix --- src/Interpreters/InterpreterDropQuery.h | 2 +- tests/queries/0_stateless/01193_metadata_loading.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.h b/src/Interpreters/InterpreterDropQuery.h index 1fc4acffa04..fe5362985de 100644 --- a/src/Interpreters/InterpreterDropQuery.h +++ b/src/Interpreters/InterpreterDropQuery.h @@ -30,7 +30,7 @@ private: Context & context; BlockIO executeToDatabase(const ASTDropQuery & query); - BlockIO executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & db, std::vector & uuids_to_wait); + BlockIO executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector & uuids_to_wait); BlockIO executeToTable(const ASTDropQuery & query); BlockIO executeToTableImpl(const ASTDropQuery & query, DatabasePtr & db, UUID & uuid_to_wait); diff --git a/tests/queries/0_stateless/01193_metadata_loading.sh b/tests/queries/0_stateless/01193_metadata_loading.sh index 0ee583a7265..319b537e84b 100755 --- a/tests/queries/0_stateless/01193_metadata_loading.sh +++ b/tests/queries/0_stateless/01193_metadata_loading.sh @@ -49,4 +49,4 @@ $CLICKHOUSE_CLIENT -q "SELECT if(quantile(0.5)(query_duration_ms) < $max_time_ms $CLICKHOUSE_CLIENT -q "SELECT count() * $count_multiplier, i, d, s, n.i, n.f FROM $db.table_merge GROUP BY i, d, s, n.i, n.f ORDER BY i" -$CLICKHOUSE_CLIENT -q "DROP DATABASE $db" +$CLICKHOUSE_CLIENT -q "DROP DATABASE $db" --database_atomic_wait_for_drop_and_detach_synchronously=0 From f573aeb97e4373956355c82fe35d12111dc2e7fe Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 03:32:31 +0300 Subject: [PATCH 353/432] Whitespace --- src/Interpreters/DDLWorker.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 32d0e25bde5..fc7f5c2f765 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -1246,7 +1246,6 @@ public: size_t num_unfinished_hosts = waiting_hosts.size() - num_hosts_finished; size_t num_active_hosts = current_active_hosts.size(); - throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Watching task {} is executing longer than distributed_ddl_task_timeout (={}) seconds. " "There are {} unfinished hosts ({} of them are currently active), they are going to execute the query in background", From b11229c1b6c37bb11489c1df72e850d5421d7af6 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:15:41 +0300 Subject: [PATCH 354/432] Add a test for #13338 --- .../01535_decimal_round_scale_overflow_check.reference | 0 .../0_stateless/01535_decimal_round_scale_overflow_check.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.reference create mode 100644 tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.sql diff --git a/tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.reference b/tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.sql b/tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.sql new file mode 100644 index 00000000000..18509221203 --- /dev/null +++ b/tests/queries/0_stateless/01535_decimal_round_scale_overflow_check.sql @@ -0,0 +1 @@ +SELECT round(toDecimal32(1, 0), -9223372036854775806); -- { serverError 69 } From 7254f40ba89bea76242bac643a2ec2140e5b7c8b Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:16:52 +0300 Subject: [PATCH 355/432] Add range check for "scale" argument of decimal rounding function to prevent overflow --- src/Functions/FunctionsRound.h | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 7a8304dbfa9..542463255d3 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -31,6 +31,7 @@ namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int ARGUMENT_OUT_OF_BOUND; extern const int ILLEGAL_COLUMN; extern const int BAD_ARGUMENTS; } @@ -84,6 +85,9 @@ enum class TieBreakingMode Bankers, // use banker's rounding }; +/// For N, no more than the number of digits in the largest type. +using Scale = Int16; + /** Rounding functions for integer values. */ @@ -416,7 +420,7 @@ private: using Container = typename ColumnDecimal::Container; public: - static NO_INLINE void apply(const Container & in, Container & out, Int64 scale_arg) + static NO_INLINE void apply(const Container & in, Container & out, Scale scale_arg) { scale_arg = in.getScale() - scale_arg; if (scale_arg > 0) @@ -458,7 +462,7 @@ class Dispatcher FloatRoundingImpl, IntegerRoundingImpl>; - static ColumnPtr apply(const ColumnVector * col, Int64 scale_arg) + static ColumnPtr apply(const ColumnVector * col, Scale scale_arg) { auto col_res = ColumnVector::create(); @@ -487,7 +491,7 @@ class Dispatcher return col_res; } - static ColumnPtr apply(const ColumnDecimal * col, Int64 scale_arg) + static ColumnPtr apply(const ColumnDecimal * col, Scale scale_arg) { const typename ColumnDecimal::Container & vec_src = col->getData(); @@ -501,7 +505,7 @@ class Dispatcher } public: - static ColumnPtr apply(const IColumn * column, Int64 scale_arg) + static ColumnPtr apply(const IColumn * column, Scale scale_arg) { if constexpr (IsNumber) return apply(checkAndGetColumn>(column), scale_arg); @@ -544,20 +548,25 @@ public: return arguments[0]; } - static Int64 getScaleArg(ColumnsWithTypeAndName & arguments) + static Scale getScaleArg(ColumnsWithTypeAndName & arguments) { if (arguments.size() == 2) { const IColumn & scale_column = *arguments[1].column; if (!isColumnConst(scale_column)) - throw Exception("Scale argument for rounding functions must be constant.", ErrorCodes::ILLEGAL_COLUMN); + throw Exception("Scale argument for rounding functions must be constant", ErrorCodes::ILLEGAL_COLUMN); Field scale_field = assert_cast(scale_column).getField(); if (scale_field.getType() != Field::Types::UInt64 && scale_field.getType() != Field::Types::Int64) - throw Exception("Scale argument for rounding functions must have integer type.", ErrorCodes::ILLEGAL_COLUMN); + throw Exception("Scale argument for rounding functions must have integer type", ErrorCodes::ILLEGAL_COLUMN); - return scale_field.get(); + Int64 scale64 = scale_field.get(); + if (scale64 > std::numeric_limits::max() + || scale64 < std::numeric_limits::min()) + throw Exception("Scale argument for rounding function is too large", ErrorCodes::ARGUMENT_OUT_OF_BOUND); + + return scale64; } return 0; } @@ -568,7 +577,7 @@ public: ColumnPtr executeImpl(ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { const ColumnWithTypeAndName & column = arguments[0]; - Int64 scale_arg = getScaleArg(arguments); + Scale scale_arg = getScaleArg(arguments); ColumnPtr res; auto call = [&](const auto & types) -> bool From 690a3b431075f1676c67c661633708451e0908cd Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:24:30 +0300 Subject: [PATCH 356/432] Fix test --- tests/queries/0_stateless/01526_client_start_and_exit.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.sh b/tests/queries/0_stateless/01526_client_start_and_exit.sh index 0b3a2ee6b4f..c179be79d03 100755 --- a/tests/queries/0_stateless/01526_client_start_and_exit.sh +++ b/tests/queries/0_stateless/01526_client_start_and_exit.sh @@ -9,10 +9,11 @@ ${CLICKHOUSE_CLIENT} -q "SELECT 'CREATE TABLE test_' || hex(randomPrintableASCII function stress() { while true; do - "$CURDIR"/01526_client_start_and_exit.expect | grep -v -P 'ClickHouse client|Connecting|Connected|:\) Bye\.|^\s*$|spawn bash|^0\s*$' + "${CURDIR}"/01526_client_start_and_exit.expect | grep -v -P 'ClickHouse client|Connecting|Connected|:\) Bye\.|^\s*$|spawn bash|^0\s*$' done } +export CURDIR export -f stress for _ in {1..10}; do From dec51cdb0b1664660bbb9fea633b245e6df9b3e8 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:27:10 +0300 Subject: [PATCH 357/432] Add a test for #13342 --- tests/queries/0_stateless/01536_fuzz_cast.reference | 0 tests/queries/0_stateless/01536_fuzz_cast.sql | 1 + 2 files changed, 1 insertion(+) create mode 100644 tests/queries/0_stateless/01536_fuzz_cast.reference create mode 100644 tests/queries/0_stateless/01536_fuzz_cast.sql diff --git a/tests/queries/0_stateless/01536_fuzz_cast.reference b/tests/queries/0_stateless/01536_fuzz_cast.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01536_fuzz_cast.sql b/tests/queries/0_stateless/01536_fuzz_cast.sql new file mode 100644 index 00000000000..436d76b5c4c --- /dev/null +++ b/tests/queries/0_stateless/01536_fuzz_cast.sql @@ -0,0 +1 @@ +SELECT CAST(arrayJoin([NULL, '', '', NULL, '', NULL, '01.02.2017 03:04\005GMT', '', NULL, '01/02/2017 03:04:05 MSK01/02/\0017 03:04:05 MSK', '', NULL, '03/04/201903/04/201903/04/\001903/04/2019']), 'Enum8(\'a\' = 1, \'b\' = 2)') AS x; -- { serverError 349 } From 81cd172af113911021c207dab81737081afd8e40 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:31:47 +0300 Subject: [PATCH 358/432] Add a test for #13893 --- tests/queries/0_stateless/01537_fuzz_count_equal.reference | 1 + tests/queries/0_stateless/01537_fuzz_count_equal.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/queries/0_stateless/01537_fuzz_count_equal.reference create mode 100644 tests/queries/0_stateless/01537_fuzz_count_equal.sql diff --git a/tests/queries/0_stateless/01537_fuzz_count_equal.reference b/tests/queries/0_stateless/01537_fuzz_count_equal.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/01537_fuzz_count_equal.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/01537_fuzz_count_equal.sql b/tests/queries/0_stateless/01537_fuzz_count_equal.sql new file mode 100644 index 00000000000..fde3fe19124 --- /dev/null +++ b/tests/queries/0_stateless/01537_fuzz_count_equal.sql @@ -0,0 +1 @@ +SELECT DISTINCT NULL = countEqual(materialize([arrayJoin([NULL, NULL, NULL]), NULL AS x, arrayJoin([255, 1025, NULL, NULL]), arrayJoin([2, 1048576, NULL, NULL])]), materialize(x)); From b767efd889667b6a677dc05686c4dec87d052639 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 04:35:40 +0300 Subject: [PATCH 359/432] Add a test for #15540 --- .../queries/0_stateless/01538_fuzz_aggregate.reference | 0 tests/queries/0_stateless/01538_fuzz_aggregate.sql | 10 ++++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/01538_fuzz_aggregate.reference create mode 100644 tests/queries/0_stateless/01538_fuzz_aggregate.sql diff --git a/tests/queries/0_stateless/01538_fuzz_aggregate.reference b/tests/queries/0_stateless/01538_fuzz_aggregate.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01538_fuzz_aggregate.sql b/tests/queries/0_stateless/01538_fuzz_aggregate.sql new file mode 100644 index 00000000000..13dadabda63 --- /dev/null +++ b/tests/queries/0_stateless/01538_fuzz_aggregate.sql @@ -0,0 +1,10 @@ +SELECT + count(), + sum(ns) +FROM +( + SELECT intDiv(number, NULL) AS k + FROM system.numbers_mt + GROUP BY k +) +ARRAY JOIN ns; -- { serverError 47 } From 62e7e0637f6efda6f474bc451e3528be64b92d13 Mon Sep 17 00:00:00 2001 From: Denis Zhuravlev Date: Tue, 27 Oct 2020 23:34:19 -0300 Subject: [PATCH 360/432] test for #14144 --- .../01533_distinct_nullable_uuid.reference | 4 ++ .../01533_distinct_nullable_uuid.sql | 38 +++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 tests/queries/0_stateless/01533_distinct_nullable_uuid.reference create mode 100644 tests/queries/0_stateless/01533_distinct_nullable_uuid.sql diff --git a/tests/queries/0_stateless/01533_distinct_nullable_uuid.reference b/tests/queries/0_stateless/01533_distinct_nullable_uuid.reference new file mode 100644 index 00000000000..e02acad09d6 --- /dev/null +++ b/tests/queries/0_stateless/01533_distinct_nullable_uuid.reference @@ -0,0 +1,4 @@ +442d3ff4-842a-45bb-8b02-b616122c0dc6 +05fe40cb-1d0c-45b0-8e60-8e311c2463f1 +2fc89389-4728-4b30-9e51-b5bc3ad215f6 +10000 diff --git a/tests/queries/0_stateless/01533_distinct_nullable_uuid.sql b/tests/queries/0_stateless/01533_distinct_nullable_uuid.sql new file mode 100644 index 00000000000..926739d3f58 --- /dev/null +++ b/tests/queries/0_stateless/01533_distinct_nullable_uuid.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS bug_14144; + +CREATE TABLE bug_14144 +( meta_source_req_uuid Nullable(UUID), + a Int64, + meta_source_type String +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO bug_14144 SELECT cast(toUUID('442d3ff4-842a-45bb-8b02-b616122c0dc6'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +INSERT INTO bug_14144 SELECT cast(toUUIDOrZero('2fc89389-4728-4b30-9e51-b5bc3ad215f6'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +INSERT INTO bug_14144 SELECT cast(toUUIDOrNull('05fe40cb-1d0c-45b0-8e60-8e311c2463f1'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +SELECT DISTINCT meta_source_req_uuid +FROM bug_14144 +WHERE meta_source_type = 'missing' +ORDER BY meta_source_req_uuid ASC; + +TRUNCATE TABLE bug_14144; + +INSERT INTO bug_14144 SELECT generateUUIDv4(), number, 'missing' FROM numbers(10000); + +SELECT COUNT() FROM ( + SELECT DISTINCT meta_source_req_uuid + FROM bug_14144 + WHERE meta_source_type = 'missing' + ORDER BY meta_source_req_uuid ASC + LIMIT 100000 +); + +DROP TABLE bug_14144; + + + + From 2b26cbbc97affb8b5678d2dc9dd004b813cf60cf Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 28 Oct 2020 10:01:57 +0300 Subject: [PATCH 361/432] Add missed file --- .../test_concurrent_ttl_merges/configs/log_conf.xml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 tests/integration/test_concurrent_ttl_merges/configs/log_conf.xml diff --git a/tests/integration/test_concurrent_ttl_merges/configs/log_conf.xml b/tests/integration/test_concurrent_ttl_merges/configs/log_conf.xml new file mode 100644 index 00000000000..318a6bca95d --- /dev/null +++ b/tests/integration/test_concurrent_ttl_merges/configs/log_conf.xml @@ -0,0 +1,12 @@ + + 3 + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + From a3b151ff5d1846b6172cbe6de1b0ca6389df46e3 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Wed, 28 Oct 2020 13:05:30 +0300 Subject: [PATCH 362/432] Revert "Added redundant empty line." This reverts commit 81a5f540d7dcb949a1221d966490a00acee6de9d. --- tests/integration/test_multiple_disks/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index bc60553d005..07478d99657 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -1551,4 +1551,3 @@ def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_clust finally: node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy)) - From 499b18c240692e92dde5aa94186bb926d5c11d24 Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Wed, 28 Oct 2020 13:05:42 +0300 Subject: [PATCH 363/432] Revert "More diagnostics." This reverts commit dd84fb572fa5267472749b2c2f507c907ca6c3d0. --- tests/integration/test_multiple_disks/test.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 07478d99657..b0159d16501 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -449,11 +449,8 @@ def test_jbod_overflow(start_cluster, name, engine): data.append(get_random_string(1024 * 1024)) # 1MB row node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) - for p in ("/jbod1", "/jbod2", "/external"): - print(node1.exec_in_container([f"bash", "-c", f"find {p} | xargs -n1 du -sh"])) - used_disks = get_used_disks_for_table(node1, name) - assert set(used_disks) == {'jbod1'} + assert all(disk == 'jbod1' for disk in used_disks) # should go to the external disk (jbod is overflown) data = [] # 10MB in total From 8154695aa20e69b61e0225e4045f440e04ce63af Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Wed, 28 Oct 2020 13:05:48 +0300 Subject: [PATCH 364/432] Revert "Diagnostics (to be dropped)." This reverts commit fd48d1002914aa2127217cd9de9552d66dffc1f4. --- tests/integration/test_multiple_disks/test.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index b0159d16501..1a50e12a3f6 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -459,9 +459,6 @@ def test_jbod_overflow(start_cluster, name, engine): node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) - for p in ("/jbod1", "/jbod2", "/external"): - print(node1.exec_in_container([f"bash", "-c", f"find {p} | xargs -n1 du -sh"])) - used_disks = get_used_disks_for_table(node1, name) assert used_disks[-1] == 'external' From ff686700291f9765db614c3ba1a01d3042415c67 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Thu, 22 Oct 2020 19:43:36 +0300 Subject: [PATCH 365/432] add missing __init__.py --- tests/integration/test_system_flush_logs/__init__.py | 0 .../{test_SYSTEM_FLUSH_LOGS => test_system_flush_logs}/test.py | 0 tests/integration/test_system_queries/test.py | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 tests/integration/test_system_flush_logs/__init__.py rename tests/integration/{test_SYSTEM_FLUSH_LOGS => test_system_flush_logs}/test.py (100%) diff --git a/tests/integration/test_system_flush_logs/__init__.py b/tests/integration/test_system_flush_logs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_SYSTEM_FLUSH_LOGS/test.py b/tests/integration/test_system_flush_logs/test.py similarity index 100% rename from tests/integration/test_SYSTEM_FLUSH_LOGS/test.py rename to tests/integration/test_system_flush_logs/test.py diff --git a/tests/integration/test_system_queries/test.py b/tests/integration/test_system_queries/test.py index 7f5bce97805..b159e8b4cf3 100644 --- a/tests/integration/test_system_queries/test.py +++ b/tests/integration/test_system_queries/test.py @@ -107,7 +107,7 @@ def test_RELOAD_CONFIG_AND_MACROS(started_cluster): assert TSV(instance.query("select * from system.macros")) == TSV("instance\tch1\nmac\tro\n") -def test_SYSTEM_FLUSH_LOGS(started_cluster): +def test_system_flush_logs(started_cluster): instance = cluster.instances['ch1'] instance.query(''' SET log_queries = 0; From 3c31a5134e158d8155ea2a9e0c97c4e9e2585dbf Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 28 Oct 2020 14:03:26 +0300 Subject: [PATCH 366/432] Fix bug --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index a1f20016cb3..880ad4dd0d3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -490,7 +490,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper { std::sort(log_entries.begin(), log_entries.end()); - for (size_t entry_idx = 0, num_entries = log_entries.size(); entry_idx < num_entries; entry_idx += current_multi_batch_size) + for (size_t entry_idx = 0, num_entries = log_entries.size(); entry_idx < num_entries;) { auto begin = log_entries.begin() + entry_idx; auto end = entry_idx + current_multi_batch_size >= log_entries.size() @@ -498,6 +498,9 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper : (begin + current_multi_batch_size); auto last = end - 1; + /// Increment entry_idx before batch size increase (we copied at most current_multi_batch_size entries) + entry_idx += current_multi_batch_size; + /// Increase the batch size exponentially, so it will saturate to MAX_MULTI_OPS. if (current_multi_batch_size < MAX_MULTI_OPS) current_multi_batch_size = std::min(MAX_MULTI_OPS, current_multi_batch_size * 2); From d126024248f0bd50aa1ce9e1149e5a66a9bcc9e7 Mon Sep 17 00:00:00 2001 From: Mikhail Filimonov Date: Wed, 28 Oct 2020 13:58:38 +0100 Subject: [PATCH 367/432] Docker alpine build --- docker/server/.dockerignore | 8 ++ docker/server/.gitignore | 1 + docker/server/Dockerfile.alpine | 26 +++++ docker/server/alpine-build.sh | 59 +++++++++++ docker/server/entrypoint.alpine.sh | 152 +++++++++++++++++++++++++++++ 5 files changed, 246 insertions(+) create mode 100644 docker/server/.dockerignore create mode 100644 docker/server/.gitignore create mode 100644 docker/server/Dockerfile.alpine create mode 100755 docker/server/alpine-build.sh create mode 100755 docker/server/entrypoint.alpine.sh diff --git a/docker/server/.dockerignore b/docker/server/.dockerignore new file mode 100644 index 00000000000..468a8cafb00 --- /dev/null +++ b/docker/server/.dockerignore @@ -0,0 +1,8 @@ +# post / preinstall scripts (not needed, we do it in Dockerfile) +alpine-root/install/* + +# docs (looks useless) +alpine-root/usr/share/doc/* + +# packages, etc. (used by prepare.sh) +alpine-root/tgz-packages/* \ No newline at end of file diff --git a/docker/server/.gitignore b/docker/server/.gitignore new file mode 100644 index 00000000000..4081b5f124c --- /dev/null +++ b/docker/server/.gitignore @@ -0,0 +1 @@ +alpine-root/* \ No newline at end of file diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine new file mode 100644 index 00000000000..fc2756eac8c --- /dev/null +++ b/docker/server/Dockerfile.alpine @@ -0,0 +1,26 @@ +FROM alpine + +ENV LANG=en_US.UTF-8 \ + LANGUAGE=en_US:en \ + LC_ALL=en_US.UTF-8 \ + TZ=UTC \ + CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml + +COPY alpine-root/ / + +# from https://github.com/ClickHouse/ClickHouse/blob/master/debian/clickhouse-server.postinst +RUN addgroup clickhouse \ + && adduser -S -H -h /nonexistent -s /bin/false -G clickhouse -g "ClickHouse server" clickhouse \ + && chown clickhouse:clickhouse /var/lib/clickhouse \ + && chmod 700 /var/lib/clickhouse \ + && chown root:clickhouse /var/log/clickhouse-server \ + && chmod 775 /var/log/clickhouse-server \ + && chmod +x /entrypoint.sh \ + && apk add --no-cache su-exec + +EXPOSE 9000 8123 9009 + +VOLUME /var/lib/clickhouse \ + /var/log/clickhouse-server + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/server/alpine-build.sh b/docker/server/alpine-build.sh new file mode 100755 index 00000000000..30101225b3e --- /dev/null +++ b/docker/server/alpine-build.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -x + +REPO_CHANNEL="${REPO_CHANNEL:-stable}" # lts / testing / prestable / etc +REPO_URL="${REPO_URL:-"https://repo.yandex.ru/clickhouse/tgz/${REPO_CHANNEL}"}" +VERSION="${VERSION:-20.9.3.45}" + +# where original files live +DOCKER_BUILD_FOLDER="${BASH_SOURCE%/*}" + +# we will create root for our image here +CONTAINER_ROOT_FOLDER="${DOCKER_BUILD_FOLDER}/alpine-root" + +# where to put downloaded tgz +TGZ_PACKAGES_FOLDER="${CONTAINER_ROOT_FOLDER}/tgz-packages" + +# clean up the root from old runs +rm -rf "$CONTAINER_ROOT_FOLDER" + +mkdir -p "$TGZ_PACKAGES_FOLDER" + +PACKAGES=( "clickhouse-client" "clickhouse-server" "clickhouse-common-static" ) + +# download tars from the repo +for package in "${PACKAGES[@]}" +do + wget -q --show-progress "${REPO_URL}/${package}-${VERSION}.tgz" -O "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" +done + +# unpack tars +for package in "${PACKAGES[@]}" +do + tar xvzf "${TGZ_PACKAGES_FOLDER}/${package}-${VERSION}.tgz" --strip-components=2 -C "$CONTAINER_ROOT_FOLDER" +done + +# prepare few more folders +mkdir -p "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/users.d" \ + "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d" \ + "${CONTAINER_ROOT_FOLDER}/var/log/clickhouse-server" \ + "${CONTAINER_ROOT_FOLDER}/var/lib/clickhouse" \ + "${CONTAINER_ROOT_FOLDER}/docker-entrypoint-initdb.d" \ + "${CONTAINER_ROOT_FOLDER}/lib64" + +cp "${DOCKER_BUILD_FOLDER}/docker_related_config.xml" "${CONTAINER_ROOT_FOLDER}/etc/clickhouse-server/config.d/" +cp "${DOCKER_BUILD_FOLDER}/entrypoint.alpine.sh" "${CONTAINER_ROOT_FOLDER}/entrypoint.sh" + +## get glibc components from ubuntu 20.04 and put them to expected place +docker pull ubuntu:20.04 +ubuntu20image=$(docker create --rm ubuntu:20.04) +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib" +docker cp -L ${ubuntu20image}:/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64" + +docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "yandex/clickhouse-server:${VERSION}-alpine" --pull \ No newline at end of file diff --git a/docker/server/entrypoint.alpine.sh b/docker/server/entrypoint.alpine.sh new file mode 100755 index 00000000000..e2edda9ca26 --- /dev/null +++ b/docker/server/entrypoint.alpine.sh @@ -0,0 +1,152 @@ +#!/bin/sh +#set -x + +DO_CHOWN=1 +if [ "$CLICKHOUSE_DO_NOT_CHOWN" = 1 ]; then + DO_CHOWN=0 +fi + +CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}" +CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}" + +# support --user +if [ "$(id -u)" = "0" ]; then + USER=$CLICKHOUSE_UID + GROUP=$CLICKHOUSE_GID + # busybox has setuidgid & chpst buildin + gosu="su-exec $USER:$GROUP" +else + USER="$(id -u)" + GROUP="$(id -g)" + gosu="" + DO_CHOWN=0 +fi + +# set some vars +CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}" + +# port is needed to check if clickhouse-server is ready for connections +HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)" + +# get CH directories locations +DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)" +TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)" +USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)" +LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)" +LOG_DIR="$(dirname $LOG_PATH || true)" +ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)" +ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)" +FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)" + +CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}" +CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}" +CLICKHOUSE_DB="${CLICKHOUSE_DB:-}" + +for dir in "$DATA_DIR" \ + "$ERROR_LOG_DIR" \ + "$LOG_DIR" \ + "$TMP_DIR" \ + "$USER_PATH" \ + "$FORMAT_SCHEMA_PATH" +do + # check if variable not empty + [ -z "$dir" ] && continue + # ensure directories exist + if ! mkdir -p "$dir"; then + echo "Couldn't create necessary directory: $dir" + exit 1 + fi + + if [ "$DO_CHOWN" = "1" ]; then + # ensure proper directories permissions + chown -R "$USER:$GROUP" "$dir" + elif [ "$(stat -c %u "$dir")" != "$USER" ]; then + echo "Necessary directory '$dir' isn't owned by user with id '$USER'" + exit 1 + fi +done + +# if clickhouse user is defined - create it (user "default" already exists out of box) +if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then + echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'" + cat < /etc/clickhouse-server/users.d/default-user.xml + + + + + + + + <${CLICKHOUSE_USER}> + default + + ::/0 + + ${CLICKHOUSE_PASSWORD} + default + + + +EOT +fi + +if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then + # Listen only on localhost until the initialization is done + $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 & + pid="$!" + + # check if clickhouse is ready to accept connections + # will try to send ping clickhouse via http_port (max 6 retries, with 1 sec timeout and 1 sec delay between retries) + tries=6 + while ! wget --spider -T 1 -q "http://localhost:$HTTP_PORT/ping" 2>/dev/null; do + if [ "$tries" -le "0" ]; then + echo >&2 'ClickHouse init process failed.' + exit 1 + fi + tries=$(( tries-1 )) + sleep 1 + done + + if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then + printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD" + fi + + clickhouseclient="clickhouse-client --multiquery -u $CLICKHOUSE_USER $WITH_PASSWORD " + + # create default database, if defined + if [ -n "$CLICKHOUSE_DB" ]; then + echo "$0: create database '$CLICKHOUSE_DB'" + "$clickhouseclient" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB"; + fi + + for f in /docker-entrypoint-initdb.d/*; do + case "$f" in + *.sh) + if [ -x "$f" ]; then + echo "$0: running $f" + "$f" + else + echo "$0: sourcing $f" + . "$f" + fi + ;; + *.sql) echo "$0: running $f"; cat "$f" | "$clickhouseclient" ; echo ;; + *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "$clickhouseclient"; echo ;; + *) echo "$0: ignoring $f" ;; + esac + echo + done + + if ! kill -s TERM "$pid" || ! wait "$pid"; then + echo >&2 'Finishing of ClickHouse init process failed.' + exit 1 + fi +fi + +# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments +if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then + exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@" +fi + +# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image +exec "$@" From 2d28d97233622beb8d7a97299e8c7a437ee52dfc Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Wed, 28 Oct 2020 15:13:27 +0100 Subject: [PATCH 368/432] Update other-functions.md --- docs/en/sql-reference/functions/other-functions.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 518479fb728..2cc80dcffc1 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -626,7 +626,12 @@ neighbor(column, offset[, default_value]) ``` The result of the function depends on the affected data blocks and the order of data in the block. -If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result. + +!!! warning "Warning" + It can reach the neighbor rows only inside the currently processed data block. + +The rows order used during the calculation of `neighbor` can differ from the order of rows returned to the user. +To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery. **Parameters** @@ -731,8 +736,13 @@ Result: Calculates the difference between successive row values ​​in the data block. Returns 0 for the first row and the difference from the previous row for each subsequent row. +!!! warning "Warning" + It can reach the previos row only inside the currently processed data block. + The result of the function depends on the affected data blocks and the order of data in the block. -If you make a subquery with ORDER BY and call the function from outside the subquery, you can get the expected result. + +The rows order used during the calculation of `runningDifference` can differ from the order of rows returned to the user. +To prevent that you can make a subquery with ORDER BY and call the function from outside the subquery. Example: From a1f1db753b7705177f9472181f816e2a036289f8 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Wed, 28 Oct 2020 18:23:10 +0300 Subject: [PATCH 369/432] Update CompressedReadBufferBase.cpp --- src/Compression/CompressedReadBufferBase.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp index be2f697e1b3..7a6b605d015 100644 --- a/src/Compression/CompressedReadBufferBase.cpp +++ b/src/Compression/CompressedReadBufferBase.cpp @@ -185,9 +185,9 @@ void CompressedReadBufferBase::decompress(char * to, size_t size_decompressed, s } else { - throw Exception("Data compressed with different methods, given method byte " + throw Exception("Data compressed with different methods, given method byte 0x" + getHexUIntLowercase(method) - + ", previous method byte " + + ", previous method byte 0x" + getHexUIntLowercase(codec->getMethodByte()), ErrorCodes::CANNOT_DECOMPRESS); } From 885bd847201befc61f0063a113ab2eb8ad575741 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 28 Oct 2020 20:48:02 +0300 Subject: [PATCH 370/432] Remove trash from CMakeLists --- CMakeLists.txt | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 21cc74bbd2b..783a9f80b66 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,25 +59,6 @@ set(CMAKE_DEBUG_POSTFIX "d" CACHE STRING "Generate debug library name with a pos # For more info see https://cmake.org/cmake/help/latest/prop_gbl/USE_FOLDERS.html set_property(GLOBAL PROPERTY USE_FOLDERS ON) -# cmake 3.9+ needed. -# Usually impractical. -# See also ${ENABLE_THINLTO} -option(ENABLE_IPO "Full link time optimization") - -if(ENABLE_IPO) - cmake_policy(SET CMP0069 NEW) - include(CheckIPOSupported) - check_ipo_supported(RESULT IPO_SUPPORTED OUTPUT IPO_NOT_SUPPORTED) - if(IPO_SUPPORTED) - message(STATUS "IPO/LTO is supported, enabling") - set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) - else() - message (${RECONFIGURE_MESSAGE_LEVEL} "IPO/LTO is not supported: <${IPO_NOT_SUPPORTED}>") - endif() -else() - message(STATUS "IPO/LTO not enabled.") -endif() - # Check that submodules are present only if source was downloaded with git if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git" AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/boost/boost") message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive") From 6eaba28e012310ec89827529d054f58bfd538955 Mon Sep 17 00:00:00 2001 From: Kruglov Pavel <48961922+Avogar@users.noreply.github.com> Date: Wed, 28 Oct 2020 22:44:37 +0300 Subject: [PATCH 371/432] Trigger CI --- tests/integration/test_disabled_mysql_server/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_disabled_mysql_server/test.py b/tests/integration/test_disabled_mysql_server/test.py index df5f123c6bd..a2cbcb17534 100644 --- a/tests/integration/test_disabled_mysql_server/test.py +++ b/tests/integration/test_disabled_mysql_server/test.py @@ -27,7 +27,7 @@ class MySQLNodeInstance: self.port = port self.hostname = hostname self.password = password - self.mysql_connection = None # lazy init + self.mysql_connection = None # lazy init def alloc_connection(self): if self.mysql_connection is None: From f995ef9797875354d9d0c3d01756a395b685303f Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Tue, 27 Oct 2020 16:55:57 +0800 Subject: [PATCH 372/432] Balanced reading from JBOD --- src/Core/Settings.h | 2 + .../MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadPool.cpp | 90 +++++++++++++++---- src/Storages/MergeTree/MergeTreeReadPool.h | 11 ++- 4 files changed, 84 insertions(+), 21 deletions(-) diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b94883ca871..0c5d5957dac 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -169,6 +169,8 @@ class IColumn; M(Milliseconds, read_backoff_min_interval_between_events_ms, 1000, "Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time.", 0) \ M(UInt64, read_backoff_min_events, 2, "Settings to reduce the number of threads in case of slow reads. The number of events after which the number of threads will be reduced.", 0) \ \ + M(UInt64, read_backoff_min_concurrency, 1, "Settings to try keeping the minimal number of threads in case of slow reads.", 0) \ + \ M(Float, memory_tracker_fault_probability, 0., "For testing of `exception safety` - throw an exception every time you allocate memory with the specified probability.", 0) \ \ M(Bool, enable_http_compression, 0, "Compress the result if the client over HTTP said that it understands data compressed by gzip or deflate.", 0) \ diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 8c1dc845d26..e08ae40a13d 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -898,7 +898,7 @@ Pipe MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams( num_streams, sum_marks, min_marks_for_concurrent_read, - parts, + std::move(parts), data, metadata_snapshot, query_info.prewhere_info, diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index d78f72d1dd0..5ab2cbf2b88 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -21,7 +21,7 @@ MergeTreeReadPool::MergeTreeReadPool( const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, - RangesInDataParts parts_, + RangesInDataParts && parts_, const MergeTreeData & data_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_, @@ -38,11 +38,11 @@ MergeTreeReadPool::MergeTreeReadPool( , do_not_steal_tasks{do_not_steal_tasks_} , predict_block_size_bytes{preferred_block_size_bytes_ > 0} , prewhere_info{prewhere_info_} - , parts_ranges{parts_} + , parts_ranges{std::move(parts_)} { /// parts don't contain duplicate MergeTreeDataPart's. - const auto per_part_sum_marks = fillPerPartInfo(parts_, check_columns_); - fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_, min_marks_for_concurrent_read_); + const auto per_part_sum_marks = fillPerPartInfo(parts_ranges, check_columns_); + fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_ranges, min_marks_for_concurrent_read_); } @@ -62,7 +62,24 @@ MergeTreeReadTaskPtr MergeTreeReadPool::getTask(const size_t min_marks_to_read, return nullptr; /// Steal task if nothing to do and it's not prohibited - const auto thread_idx = tasks_remaining_for_this_thread ? thread : *std::begin(remaining_thread_tasks); + auto thread_idx = thread; + if (!tasks_remaining_for_this_thread) + { + auto it = remaining_thread_tasks.lower_bound(backoff_state.current_threads); + // Grab the entire tasks of a thread which is killed by backoff + if (it != remaining_thread_tasks.end()) + { + threads_tasks[thread] = std::move(threads_tasks[*it]); + remaining_thread_tasks.erase(it); + } + else // Try steal tasks from the next thread + { + it = remaining_thread_tasks.upper_bound(thread); + if (it == remaining_thread_tasks.end()) + it = remaining_thread_tasks.begin(); + thread_idx = *it; + } + } auto & thread_tasks = threads_tasks[thread_idx]; auto & thread_task = thread_tasks.parts_and_ranges.back(); @@ -163,7 +180,7 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf std::lock_guard lock(mutex); - if (backoff_state.current_threads <= 1) + if (backoff_state.current_threads <= backoff_settings.min_concurrency) return; size_t throughput = info.bytes_read * 1000000000 / info.nanoseconds; @@ -194,14 +211,14 @@ void MergeTreeReadPool::profileFeedback(const ReadBufferFromFileBase::ProfileInf std::vector MergeTreeReadPool::fillPerPartInfo( - RangesInDataParts & parts, const bool check_columns) + const RangesInDataParts & parts, const bool check_columns) { std::vector per_part_sum_marks; Block sample_block = metadata_snapshot->getSampleBlock(); for (const auto i : ext::range(0, parts.size())) { - auto & part = parts[i]; + const auto & part = parts[i]; /// Read marks for every data part. size_t sum_marks = 0; @@ -238,21 +255,63 @@ std::vector MergeTreeReadPool::fillPerPartInfo( void MergeTreeReadPool::fillPerThreadInfo( const size_t threads, const size_t sum_marks, std::vector per_part_sum_marks, - RangesInDataParts & parts, const size_t min_marks_for_concurrent_read) + const RangesInDataParts & parts, const size_t min_marks_for_concurrent_read) { threads_tasks.resize(threads); + if (parts.empty()) + return; + + struct PartInfo + { + RangesInDataPart part; + size_t sum_marks; + size_t part_idx; + }; + std::map> parts_per_disk; + + for (size_t i = 0; i < parts.size(); ++i) + { + PartInfo part_info{parts[i], per_part_sum_marks[i], i}; + if (parts[i].data_part->isStoredOnDisk()) + parts_per_disk[parts[i].data_part->volume->getDisk()->getName()].push_back(std::move(part_info)); + else + parts_per_disk[""].push_back(std::move(part_info)); + } const size_t min_marks_per_thread = (sum_marks - 1) / threads + 1; - for (size_t i = 0; i < threads && !parts.empty(); ++i) + auto it = std::prev(parts_per_disk.end()); + auto * current_parts = &it->second; + + auto get_next_parts = [&]() + { + size_t n = parts_per_disk.size(); + do + { + ++it; + if (it == parts_per_disk.end()) + it = parts_per_disk.begin(); + current_parts = &it->second; + } while (current_parts->empty() && --n); + return !current_parts->empty(); + }; + + auto get_current_parts = [&]() + { + if (!current_parts->empty()) + return true; + return get_next_parts(); + }; + + for (size_t i = 0; i < threads && get_next_parts(); ++i) { auto need_marks = min_marks_per_thread; - while (need_marks > 0 && !parts.empty()) + while (need_marks > 0 && get_current_parts()) { - const auto part_idx = parts.size() - 1; - RangesInDataPart & part = parts.back(); - size_t & marks_in_part = per_part_sum_marks.back(); + RangesInDataPart & part = current_parts->back().part; + size_t & marks_in_part = current_parts->back().sum_marks; + const auto part_idx = current_parts->back().part_idx; /// Do not get too few rows from part. if (marks_in_part >= min_marks_for_concurrent_read && @@ -274,8 +333,7 @@ void MergeTreeReadPool::fillPerThreadInfo( marks_in_ranges = marks_in_part; need_marks -= marks_in_part; - parts.pop_back(); - per_part_sum_marks.pop_back(); + current_parts->pop_back(); } else { diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index c0b04c6a228..aa6811661e6 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -36,13 +36,16 @@ public: size_t min_interval_between_events_ms = 1000; /// Number of events to do backoff - to lower number of threads in pool. size_t min_events = 2; + /// Try keeping the minimal number of threads in pool. + size_t min_concurrency = 1; /// Constants above is just an example. BackoffSettings(const Settings & settings) : min_read_latency_ms(settings.read_backoff_min_latency_ms.totalMilliseconds()), max_throughput(settings.read_backoff_max_throughput), min_interval_between_events_ms(settings.read_backoff_min_interval_between_events_ms.totalMilliseconds()), - min_events(settings.read_backoff_min_events) + min_events(settings.read_backoff_min_events), + min_concurrency(settings.read_backoff_min_concurrency) { } @@ -68,7 +71,7 @@ private: public: MergeTreeReadPool( const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, - RangesInDataParts parts_, const MergeTreeData & data_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_, + RangesInDataParts && parts_, const MergeTreeData & data_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_, const bool check_columns_, const Names & column_names_, const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, const bool do_not_steal_tasks_ = false); @@ -88,11 +91,11 @@ public: private: std::vector fillPerPartInfo( - RangesInDataParts & parts, const bool check_columns); + const RangesInDataParts & parts, const bool check_columns); void fillPerThreadInfo( const size_t threads, const size_t sum_marks, std::vector per_part_sum_marks, - RangesInDataParts & parts, const size_t min_marks_for_concurrent_read); + const RangesInDataParts & parts, const size_t min_marks_for_concurrent_read); const MergeTreeData & data; StorageMetadataPtr metadata_snapshot; From 10bad32fb405cdf9c073875235c678c83e0bcfd8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 29 Oct 2020 00:52:26 +0300 Subject: [PATCH 373/432] Refactor code a little bit. Add comment. --- src/Storages/MergeTree/MergeTreeReadPool.cpp | 75 ++++++++++---------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 5ab2cbf2b88..6ce47af87c5 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -267,51 +267,41 @@ void MergeTreeReadPool::fillPerThreadInfo( size_t sum_marks; size_t part_idx; }; - std::map> parts_per_disk; - for (size_t i = 0; i < parts.size(); ++i) + using PartsInfo = std::vector; + std::queue parts_queue; + { - PartInfo part_info{parts[i], per_part_sum_marks[i], i}; - if (parts[i].data_part->isStoredOnDisk()) - parts_per_disk[parts[i].data_part->volume->getDisk()->getName()].push_back(std::move(part_info)); - else - parts_per_disk[""].push_back(std::move(part_info)); + /// Group parts by volume name. + /// We try minimize the number of threads concurrently read from the same volume. + /// It improves the performance for JBOD architecture. + std::map> parts_per_disk; + + for (size_t i = 0; i < parts.size(); ++i) + { + PartInfo part_info{parts[i], per_part_sum_marks[i], i}; + if (parts[i].data_part->isStoredOnDisk()) + parts_per_disk[parts[i].data_part->volume->getDisk()->getName()].push_back(std::move(part_info)); + else + parts_per_disk[""].push_back(std::move(part_info)); + } + + for (auto & info : parts_per_disk) + parts_queue.push(std::move(info.second)); } const size_t min_marks_per_thread = (sum_marks - 1) / threads + 1; - auto it = std::prev(parts_per_disk.end()); - auto * current_parts = &it->second; - - auto get_next_parts = [&]() - { - size_t n = parts_per_disk.size(); - do - { - ++it; - if (it == parts_per_disk.end()) - it = parts_per_disk.begin(); - current_parts = &it->second; - } while (current_parts->empty() && --n); - return !current_parts->empty(); - }; - - auto get_current_parts = [&]() - { - if (!current_parts->empty()) - return true; - return get_next_parts(); - }; - - for (size_t i = 0; i < threads && get_next_parts(); ++i) + for (size_t i = 0; i < threads && !parts_queue.empty(); ++i) { auto need_marks = min_marks_per_thread; - while (need_marks > 0 && get_current_parts()) + while (need_marks > 0 && !parts_queue.empty()) { - RangesInDataPart & part = current_parts->back().part; - size_t & marks_in_part = current_parts->back().sum_marks; - const auto part_idx = current_parts->back().part_idx; + auto & current_parts = parts_queue.front(); + RangesInDataPart & part = current_parts.back().part; + size_t & marks_in_part = current_parts.back().sum_marks; + const auto part_idx = current_parts.back().part_idx; /// Do not get too few rows from part. if (marks_in_part >= min_marks_for_concurrent_read && @@ -333,7 +323,9 @@ void MergeTreeReadPool::fillPerThreadInfo( marks_in_ranges = marks_in_part; need_marks -= marks_in_part; - current_parts->pop_back(); + current_parts.pop_back(); + if (current_parts.empty()) + parts_queue.pop(); } else { @@ -362,6 +354,17 @@ void MergeTreeReadPool::fillPerThreadInfo( if (marks_in_ranges != 0) remaining_thread_tasks.insert(i); } + + /// Before processing next thread, change volume if possible. + /// Different threads will likely start reading from different volumes, + /// which may improve read parallelism for JBOD. + /// It also may be helpful in case we have backoff threads. + /// Backoff threads will likely to reduce load for different disks, not the same one. + if (parts_queue.size() > 1) + { + parts_queue.push(std::move(parts_queue.front())); + parts_queue.pop(); + } } } From 5531bbdc980b32e49bb28f903de7dfad1cbd7f10 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 29 Oct 2020 01:21:32 +0300 Subject: [PATCH 374/432] Update version_date.tsv after release 20.10.3.30 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f7d6536a890..3b2681a4ec2 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v20.10.3.30-stable 2020-10-29 v20.10.2.20-stable 2020-10-23 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 From e094b001d309a38bc887873923949dd9e72c35e0 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 29 Oct 2020 02:01:30 +0300 Subject: [PATCH 375/432] Update version_date.tsv after release 20.9.4.76 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 3b2681a4ec2..c58c64e249c 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,6 @@ v20.10.3.30-stable 2020-10-29 v20.10.2.20-stable 2020-10-23 +v20.9.4.76-stable 2020-10-29 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 v20.8.4.11-lts 2020-10-09 From 3e8f399f0bf5884e49506bd266c5f1f346b49dbc Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 29 Oct 2020 05:15:01 +0300 Subject: [PATCH 376/432] Update test --- tests/queries/0_stateless/00161_rounding_functions.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00161_rounding_functions.sql b/tests/queries/0_stateless/00161_rounding_functions.sql index 460129d2e9d..cc3542338bb 100644 --- a/tests/queries/0_stateless/00161_rounding_functions.sql +++ b/tests/queries/0_stateless/00161_rounding_functions.sql @@ -44,4 +44,4 @@ SELECT 12345.6789 AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), SELECT roundToExp2(100), roundToExp2(64), roundToExp2(3), roundToExp2(0), roundToExp2(-1); SELECT roundToExp2(0.9), roundToExp2(0), roundToExp2(-0.5), roundToExp2(-0.6), roundToExp2(-0.2); -SELECT ceil(29375422, -54212) --{serverError 36} +SELECT ceil(29375422, -54212) --{serverError 69} From c2ca5d29a85b343d1e2bf558db3b4d07c3d4c2ea Mon Sep 17 00:00:00 2001 From: feng lv Date: Thu, 29 Oct 2020 02:40:39 +0000 Subject: [PATCH 377/432] fix build --- src/Interpreters/InterpreterDropQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 8bf8675b15d..144e045ecee 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -250,7 +250,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) { if (query.no_delay) { - for (const auto table_uuid : tables_to_wait) + for (const auto & table_uuid : tables_to_wait) waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); } throw; @@ -258,7 +258,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) if (query.no_delay) { - for (const auto table_uuid : tables_to_wait) + for (const auto & table_uuid : tables_to_wait) waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); } return res; From e9f5eefc8c751e4af55503a6736c3aa4b40c55cf Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Thu, 29 Oct 2020 08:28:32 +0300 Subject: [PATCH 378/432] Really fixed `test_multiple_disks::test_background_move`. --- tests/integration/test_multiple_disks/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 1a50e12a3f6..bb90ce4519d 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -493,7 +493,7 @@ def test_background_move(start_cluster, name, engine): SETTINGS storage_policy='moving_jbod_with_external' """.format(name=name, engine=engine)) - node1.query(f"SYSTEM START MERGES {name}") + node1.query(f"SYSTEM STOP MERGES {name}") for i in range(5): data = [] # 5MB in total From 745cb4ab2f8d5b04e2b2481a9f9bc585cb583807 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 29 Oct 2020 09:57:42 +0300 Subject: [PATCH 379/432] Update version_date.tsv after release 20.8.5.45 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index c58c64e249c..37e3e412a63 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -3,6 +3,7 @@ v20.10.2.20-stable 2020-10-23 v20.9.4.76-stable 2020-10-29 v20.9.3.45-stable 2020-10-09 v20.9.2.20-stable 2020-09-22 +v20.8.5.45-lts 2020-10-29 v20.8.4.11-lts 2020-10-09 v20.8.3.18-stable 2020-09-18 v20.8.2.3-stable 2020-09-08 From 671d2b7f1b793b8f233a9685e469277c2a7079dc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 29 Oct 2020 10:04:23 +0300 Subject: [PATCH 380/432] Update MergeTreeReadPool.cpp Fix comment --- src/Storages/MergeTree/MergeTreeReadPool.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 6ce47af87c5..e44ff500c88 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -272,8 +272,8 @@ void MergeTreeReadPool::fillPerThreadInfo( std::queue parts_queue; { - /// Group parts by volume name. - /// We try minimize the number of threads concurrently read from the same volume. + /// Group parts by disk name. + /// We try minimize the number of threads concurrently read from the same disk. /// It improves the performance for JBOD architecture. std::map> parts_per_disk; @@ -355,8 +355,8 @@ void MergeTreeReadPool::fillPerThreadInfo( remaining_thread_tasks.insert(i); } - /// Before processing next thread, change volume if possible. - /// Different threads will likely start reading from different volumes, + /// Before processing next thread, change disk if possible. + /// Different threads will likely start reading from different disk, /// which may improve read parallelism for JBOD. /// It also may be helpful in case we have backoff threads. /// Backoff threads will likely to reduce load for different disks, not the same one. From bb1ac2af8218b606a96f730eb7a1419b06a3c8ab Mon Sep 17 00:00:00 2001 From: taichong Date: Thu, 29 Oct 2020 14:53:32 +0800 Subject: [PATCH 381/432] Shrink sequence gtid set when use MySQL Master -> MySQL Slave -> ClickHouse MaterializeMySQL Engine and MySQL Slave enable slave_parallel_worker the gtid in .metadata won't shrink. Like this: https://github.com/ClickHouse/ClickHouse/issues/15951 --- src/Core/MySQL/MySQLGtid.cpp | 12 ++++++++++++ src/Core/MySQL/MySQLGtid.h | 2 ++ src/Core/tests/mysql_protocol.cpp | 11 +++++++++++ 3 files changed, 25 insertions(+) diff --git a/src/Core/MySQL/MySQLGtid.cpp b/src/Core/MySQL/MySQLGtid.cpp index df26eb7aa08..6e50998da62 100644 --- a/src/Core/MySQL/MySQLGtid.cpp +++ b/src/Core/MySQL/MySQLGtid.cpp @@ -85,6 +85,9 @@ void GTIDSets::update(const GTID & other) ErrorCodes::LOGICAL_ERROR); } + /// Try to shirnk Sequence interval. + GTIDSet::tryShirnk(set, i, current); + /// Sequence, extend the interval. if (other.seq_no == current.end) { @@ -116,6 +119,15 @@ void GTIDSets::update(const GTID & other) sets.emplace_back(set); } +void GTIDSet::tryShirnk(GTIDSet & set, unsigned int i, GTIDSet::Interval & current) +{ + if (i != set.intervals.size() -1) { + auto & next = set.intervals[i+1]; + if (current.end == next.start) + set.tryMerge(i); + } +} + String GTIDSets::toString() const { WriteBufferFromOwnString buffer; diff --git a/src/Core/MySQL/MySQLGtid.h b/src/Core/MySQL/MySQLGtid.h index d228e269872..cd8cd0a2e98 100644 --- a/src/Core/MySQL/MySQLGtid.h +++ b/src/Core/MySQL/MySQLGtid.h @@ -26,6 +26,8 @@ public: std::vector intervals; void tryMerge(size_t i); + + static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current) ; }; class GTIDSets diff --git a/src/Core/tests/mysql_protocol.cpp b/src/Core/tests/mysql_protocol.cpp index 6cad095fc85..7e6aae5da23 100644 --- a/src/Core/tests/mysql_protocol.cpp +++ b/src/Core/tests/mysql_protocol.cpp @@ -260,6 +260,17 @@ int main(int argc, char ** argv) "10662d71-9d91-11ea-bbc2-0242ac110003:6-7", "20662d71-9d91-11ea-bbc2-0242ac110003:9", "10662d71-9d91-11ea-bbc2-0242ac110003:6-7,20662d71-9d91-11ea-bbc2-0242ac110003:9"}, + + {"shirnk-sequence", + "10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:7", + "10662d71-9d91-11ea-bbc2-0242ac110003:6", + "10662d71-9d91-11ea-bbc2-0242ac110003:1-7"}, + + {"shirnk-sequence", + "10662d71-9d91-11ea-bbc2-0242ac110003:1-3:4-5:10", + "10662d71-9d91-11ea-bbc2-0242ac110003:8", + "10662d71-9d91-11ea-bbc2-0242ac110003:1-5:8:10" + } }; for (auto & tc : cases) From d3f08b21de47065de5687401fe4e28fd72ad46f9 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 29 Oct 2020 11:28:40 +0300 Subject: [PATCH 382/432] CMake moved power pc check in tools --- cmake/arch.cmake | 4 ---- cmake/tools.cmake | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 57ed42295bb..9604ef62b31 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -16,8 +16,4 @@ endif () if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)") set (ARCH_PPC64LE 1) - # FIXME: move this check into tools.cmake - if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) - message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture") - endif () endif () diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 6f07cc2439c..12ffa8eb5fd 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -80,3 +80,9 @@ if (LINKER_NAME) message(STATUS "Using custom linker by name: ${LINKER_NAME}") endif () + +if (ARCH_PPC64LE) + if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8)) + message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture") + endif () +endif () \ No newline at end of file From 4189d1f9c2c7e0100fbef73d51ca0a8b0b0c2edc Mon Sep 17 00:00:00 2001 From: Vladimir Chebotarev Date: Thu, 29 Oct 2020 16:15:19 +0300 Subject: [PATCH 383/432] More attempt to fix `test_jbod_overflow`. --- .../configs/config.d/storage_configuration.xml | 2 +- tests/integration/test_multiple_disks/test.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml index b091adf9ec5..5b916dab984 100644 --- a/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_multiple_disks/configs/config.d/storage_configuration.xml @@ -23,7 +23,7 @@
jbod1 - 0.09 + 0.08
external diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index bb90ce4519d..24ee6c0493b 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -440,7 +440,7 @@ def test_jbod_overflow(start_cluster, name, engine): SETTINGS storage_policy='small_jbod_with_external' """.format(name=name, engine=engine)) - node1.query("SYSTEM STOP MERGES") + node1.query(f"SYSTEM STOP MERGES {name}") # small jbod size is 40MB, so lets insert 5MB batch 7 times for i in range(7): @@ -463,7 +463,7 @@ def test_jbod_overflow(start_cluster, name, engine): assert used_disks[-1] == 'external' - node1.query("SYSTEM START MERGES") + node1.query(f"SYSTEM START MERGES {name}") time.sleep(1) node1.query("OPTIMIZE TABLE {} FINAL".format(name)) From 2a66c17472387c09afc3ac0556b1c4b4a8c90564 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Thu, 29 Oct 2020 16:37:59 +0300 Subject: [PATCH 384/432] Update InterpreterDropQuery.cpp --- src/Interpreters/InterpreterDropQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 8bf8675b15d..144e045ecee 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -250,7 +250,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) { if (query.no_delay) { - for (const auto table_uuid : tables_to_wait) + for (const auto & table_uuid : tables_to_wait) waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); } throw; @@ -258,7 +258,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) if (query.no_delay) { - for (const auto table_uuid : tables_to_wait) + for (const auto & table_uuid : tables_to_wait) waitForTableToBeActuallyDroppedOrDetached(query, database, table_uuid); } return res; From 3627fabfb9e2445a32fbed81eef9e79a00de76ef Mon Sep 17 00:00:00 2001 From: Maxim Akhmedov Date: Thu, 29 Oct 2020 17:37:23 +0300 Subject: [PATCH 385/432] Remove -g0 form Arcadia build settings. --- src/Access/ya.make | 1 - src/Access/ya.make.in | 1 - src/AggregateFunctions/ya.make | 1 - src/AggregateFunctions/ya.make.in | 1 - src/Client/ya.make | 1 - src/Client/ya.make.in | 1 - src/Columns/ya.make | 1 - src/Common/ya.make | 1 - src/Common/ya.make.in | 1 - src/Compression/ya.make | 1 - src/Compression/ya.make.in | 1 - src/Core/ya.make | 1 - src/Core/ya.make.in | 1 - src/DataStreams/ya.make | 1 - src/DataStreams/ya.make.in | 1 - src/DataTypes/ya.make | 1 - src/DataTypes/ya.make.in | 1 - src/Databases/ya.make | 1 - src/Databases/ya.make.in | 1 - src/Dictionaries/ya.make | 1 - src/Dictionaries/ya.make.in | 1 - src/Disks/S3/ya.make | 1 - src/Disks/ya.make | 1 - src/Disks/ya.make.in | 1 - src/Formats/ya.make | 1 - src/Formats/ya.make.in | 1 - src/Functions/ya.make | 1 - src/Functions/ya.make.in | 1 - src/IO/ya.make | 1 - src/IO/ya.make.in | 1 - src/Interpreters/ya.make | 1 - src/Interpreters/ya.make.in | 1 - src/Parsers/ya.make | 1 - src/Parsers/ya.make.in | 1 - src/Processors/ya.make | 1 - src/Processors/ya.make.in | 1 - src/Server/ya.make | 1 - src/Server/ya.make.in | 1 - src/Storages/ya.make | 1 - src/Storages/ya.make.in | 1 - src/TableFunctions/ya.make | 1 - src/TableFunctions/ya.make.in | 1 - 42 files changed, 42 deletions(-) diff --git a/src/Access/ya.make b/src/Access/ya.make index b945c5a192a..1ec8cb32c97 100644 --- a/src/Access/ya.make +++ b/src/Access/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( AccessControlManager.cpp diff --git a/src/Access/ya.make.in b/src/Access/ya.make.in index e48d0d1bda7..ce7cd88b272 100644 --- a/src/Access/ya.make.in +++ b/src/Access/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( diff --git a/src/AggregateFunctions/ya.make b/src/AggregateFunctions/ya.make index 1578e0c80ea..8d27cf986d0 100644 --- a/src/AggregateFunctions/ya.make +++ b/src/AggregateFunctions/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( AggregateFunctionAggThrow.cpp diff --git a/src/AggregateFunctions/ya.make.in b/src/AggregateFunctions/ya.make.in index 4c2943b0539..dd49b679d28 100644 --- a/src/AggregateFunctions/ya.make.in +++ b/src/AggregateFunctions/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( diff --git a/src/Client/ya.make b/src/Client/ya.make index b07e5afc0fb..07cc6725308 100644 --- a/src/Client/ya.make +++ b/src/Client/ya.make @@ -6,7 +6,6 @@ PEERDIR( contrib/libs/poco/NetSSL_OpenSSL ) -CFLAGS(-g0) SRCS( Connection.cpp diff --git a/src/Client/ya.make.in b/src/Client/ya.make.in index 704a05d8f3b..d8faff9ae1a 100644 --- a/src/Client/ya.make.in +++ b/src/Client/ya.make.in @@ -5,7 +5,6 @@ PEERDIR( contrib/libs/poco/NetSSL_OpenSSL ) -CFLAGS(-g0) SRCS( diff --git a/src/Columns/ya.make b/src/Columns/ya.make index 78c0e1b992d..1463bbc69e2 100644 --- a/src/Columns/ya.make +++ b/src/Columns/ya.make @@ -13,7 +13,6 @@ PEERDIR( contrib/libs/pdqsort ) -CFLAGS(-g0) SRCS( Collator.cpp diff --git a/src/Common/ya.make b/src/Common/ya.make index fb04ecaa141..b19a5183201 100644 --- a/src/Common/ya.make +++ b/src/Common/ya.make @@ -21,7 +21,6 @@ PEERDIR( INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc) -CFLAGS(-g0) SRCS( ActionLock.cpp diff --git a/src/Common/ya.make.in b/src/Common/ya.make.in index f8b7601e215..49c8baa5eec 100644 --- a/src/Common/ya.make.in +++ b/src/Common/ya.make.in @@ -20,7 +20,6 @@ PEERDIR( INCLUDE(${ARCADIA_ROOT}/clickhouse/cmake/yandex/ya.make.versions.inc) -CFLAGS(-g0) SRCS( diff --git a/src/Compression/ya.make b/src/Compression/ya.make index a17e2029b8f..8ffcb6be547 100644 --- a/src/Compression/ya.make +++ b/src/Compression/ya.make @@ -12,7 +12,6 @@ PEERDIR( contrib/libs/zstd ) -CFLAGS(-g0) SRCS( CachedCompressedReadBuffer.cpp diff --git a/src/Compression/ya.make.in b/src/Compression/ya.make.in index 780ea72b3ec..3c46b036aa0 100644 --- a/src/Compression/ya.make.in +++ b/src/Compression/ya.make.in @@ -11,7 +11,6 @@ PEERDIR( contrib/libs/zstd ) -CFLAGS(-g0) SRCS( diff --git a/src/Core/ya.make b/src/Core/ya.make index 424566d212a..626662e992c 100644 --- a/src/Core/ya.make +++ b/src/Core/ya.make @@ -7,7 +7,6 @@ PEERDIR( contrib/restricted/boost/libs ) -CFLAGS(-g0) SRCS( BackgroundSchedulePool.cpp diff --git a/src/Core/ya.make.in b/src/Core/ya.make.in index 95c4e32995d..b2e82663c1e 100644 --- a/src/Core/ya.make.in +++ b/src/Core/ya.make.in @@ -6,7 +6,6 @@ PEERDIR( contrib/restricted/boost/libs ) -CFLAGS(-g0) SRCS( diff --git a/src/DataStreams/ya.make b/src/DataStreams/ya.make index 0c46e42d456..adef8246f33 100644 --- a/src/DataStreams/ya.make +++ b/src/DataStreams/ya.make @@ -8,7 +8,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( AddingDefaultBlockOutputStream.cpp diff --git a/src/DataStreams/ya.make.in b/src/DataStreams/ya.make.in index 268719112ac..7aa2fe4874e 100644 --- a/src/DataStreams/ya.make.in +++ b/src/DataStreams/ya.make.in @@ -7,7 +7,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( diff --git a/src/DataTypes/ya.make b/src/DataTypes/ya.make index 20a63bb7727..97b600f70ba 100644 --- a/src/DataTypes/ya.make +++ b/src/DataTypes/ya.make @@ -6,7 +6,6 @@ PEERDIR( clickhouse/src/Formats ) -CFLAGS(-g0) SRCS( convertMySQLDataType.cpp diff --git a/src/DataTypes/ya.make.in b/src/DataTypes/ya.make.in index f1983be1032..05170178925 100644 --- a/src/DataTypes/ya.make.in +++ b/src/DataTypes/ya.make.in @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Formats ) -CFLAGS(-g0) SRCS( diff --git a/src/Databases/ya.make b/src/Databases/ya.make index b4173057e03..e3c5daeb6bc 100644 --- a/src/Databases/ya.make +++ b/src/Databases/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( DatabaseAtomic.cpp diff --git a/src/Databases/ya.make.in b/src/Databases/ya.make.in index e48d0d1bda7..ce7cd88b272 100644 --- a/src/Databases/ya.make.in +++ b/src/Databases/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( diff --git a/src/Dictionaries/ya.make b/src/Dictionaries/ya.make index 485d8b0a16d..9edf156c015 100644 --- a/src/Dictionaries/ya.make +++ b/src/Dictionaries/ya.make @@ -12,7 +12,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( CacheDictionary.cpp diff --git a/src/Dictionaries/ya.make.in b/src/Dictionaries/ya.make.in index 3eb8e728643..2c0735d38a4 100644 --- a/src/Dictionaries/ya.make.in +++ b/src/Dictionaries/ya.make.in @@ -11,7 +11,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( diff --git a/src/Disks/S3/ya.make b/src/Disks/S3/ya.make index b32adee0d26..17425f6e69a 100644 --- a/src/Disks/S3/ya.make +++ b/src/Disks/S3/ya.make @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( DiskS3.cpp diff --git a/src/Disks/ya.make b/src/Disks/ya.make index f01348ff945..d14bc0d05c8 100644 --- a/src/Disks/ya.make +++ b/src/Disks/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( createVolume.cpp diff --git a/src/Disks/ya.make.in b/src/Disks/ya.make.in index 9ed04e23f83..ee13bb272cd 100644 --- a/src/Disks/ya.make.in +++ b/src/Disks/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( diff --git a/src/Formats/ya.make b/src/Formats/ya.make index b4f7b073e21..8e797de39f8 100644 --- a/src/Formats/ya.make +++ b/src/Formats/ya.make @@ -7,7 +7,6 @@ PEERDIR( contrib/libs/protoc ) -CFLAGS(-g0) SRCS( FormatFactory.cpp diff --git a/src/Formats/ya.make.in b/src/Formats/ya.make.in index 51c8bfde2f8..f7d03e7b00f 100644 --- a/src/Formats/ya.make.in +++ b/src/Formats/ya.make.in @@ -6,7 +6,6 @@ PEERDIR( contrib/libs/protoc ) -CFLAGS(-g0) SRCS( diff --git a/src/Functions/ya.make b/src/Functions/ya.make index ed03f5175ab..4c2cbaf5b1f 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -32,7 +32,6 @@ PEERDIR( ) # "Arcadia" build is slightly deficient. It lacks many libraries that we need. -CFLAGS(-g0) SRCS( abs.cpp diff --git a/src/Functions/ya.make.in b/src/Functions/ya.make.in index 2a66aa5553e..be90a8b6c7a 100644 --- a/src/Functions/ya.make.in +++ b/src/Functions/ya.make.in @@ -31,7 +31,6 @@ PEERDIR( ) # "Arcadia" build is slightly deficient. It lacks many libraries that we need. -CFLAGS(-g0) SRCS( diff --git a/src/IO/ya.make b/src/IO/ya.make index 28099818b46..3796494ff33 100644 --- a/src/IO/ya.make +++ b/src/IO/ya.make @@ -8,7 +8,6 @@ PEERDIR( contrib/libs/poco/NetSSL_OpenSSL ) -CFLAGS(-g0) SRCS( AIOContextPool.cpp diff --git a/src/IO/ya.make.in b/src/IO/ya.make.in index 1b3ca7d6d35..b566644f78b 100644 --- a/src/IO/ya.make.in +++ b/src/IO/ya.make.in @@ -7,7 +7,6 @@ PEERDIR( contrib/libs/poco/NetSSL_OpenSSL ) -CFLAGS(-g0) SRCS( diff --git a/src/Interpreters/ya.make b/src/Interpreters/ya.make index 563b7374fdc..1c463eff7e4 100644 --- a/src/Interpreters/ya.make +++ b/src/Interpreters/ya.make @@ -14,7 +14,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( ActionLocksManager.cpp diff --git a/src/Interpreters/ya.make.in b/src/Interpreters/ya.make.in index da34c1e3680..2445a9ba850 100644 --- a/src/Interpreters/ya.make.in +++ b/src/Interpreters/ya.make.in @@ -13,7 +13,6 @@ PEERDIR( NO_COMPILER_WARNINGS() -CFLAGS(-g0) SRCS( diff --git a/src/Parsers/ya.make b/src/Parsers/ya.make index 4ec97b8b55b..4f8b8a82210 100644 --- a/src/Parsers/ya.make +++ b/src/Parsers/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( ASTAlterQuery.cpp diff --git a/src/Parsers/ya.make.in b/src/Parsers/ya.make.in index 8b214f90a03..5ee7f637941 100644 --- a/src/Parsers/ya.make.in +++ b/src/Parsers/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( diff --git a/src/Processors/ya.make b/src/Processors/ya.make index b2f8b9ba7c2..7898576ad2d 100644 --- a/src/Processors/ya.make +++ b/src/Processors/ya.make @@ -7,7 +7,6 @@ PEERDIR( contrib/libs/protobuf ) -CFLAGS(-g0) SRCS( Chunk.cpp diff --git a/src/Processors/ya.make.in b/src/Processors/ya.make.in index 3dc63479238..d1aa7d43b6a 100644 --- a/src/Processors/ya.make.in +++ b/src/Processors/ya.make.in @@ -6,7 +6,6 @@ PEERDIR( contrib/libs/protobuf ) -CFLAGS(-g0) SRCS( diff --git a/src/Server/ya.make b/src/Server/ya.make index cab114871c5..8a9bbd3bbc2 100644 --- a/src/Server/ya.make +++ b/src/Server/ya.make @@ -6,7 +6,6 @@ PEERDIR( contrib/libs/poco/Util ) -CFLAGS(-g0) SRCS( HTTPHandler.cpp diff --git a/src/Server/ya.make.in b/src/Server/ya.make.in index 44a2531208f..9adec7e3685 100644 --- a/src/Server/ya.make.in +++ b/src/Server/ya.make.in @@ -5,7 +5,6 @@ PEERDIR( contrib/libs/poco/Util ) -CFLAGS(-g0) SRCS( diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 107433b5e73..29d9e87bfa0 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -7,7 +7,6 @@ PEERDIR( contrib/libs/poco/MongoDB ) -CFLAGS(-g0) SRCS( AlterCommands.cpp diff --git a/src/Storages/ya.make.in b/src/Storages/ya.make.in index a2fb50a6d61..dbae43aa3fc 100644 --- a/src/Storages/ya.make.in +++ b/src/Storages/ya.make.in @@ -6,7 +6,6 @@ PEERDIR( contrib/libs/poco/MongoDB ) -CFLAGS(-g0) SRCS( diff --git a/src/TableFunctions/ya.make b/src/TableFunctions/ya.make index 03432e2bbbc..50f685fef3e 100644 --- a/src/TableFunctions/ya.make +++ b/src/TableFunctions/ya.make @@ -5,7 +5,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( ITableFunction.cpp diff --git a/src/TableFunctions/ya.make.in b/src/TableFunctions/ya.make.in index 289c831f11a..aedb6209ef8 100644 --- a/src/TableFunctions/ya.make.in +++ b/src/TableFunctions/ya.make.in @@ -4,7 +4,6 @@ PEERDIR( clickhouse/src/Common ) -CFLAGS(-g0) SRCS( From dbfef325e6a4fdb081f6b99a6d4d068237f92135 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov Date: Thu, 29 Oct 2020 20:22:48 +0300 Subject: [PATCH 386/432] Rename NullSink to NullOutput --- src/IO/ReadHelpers.cpp | 10 +++++----- src/IO/ReadHelpers.h | 2 +- src/Processors/Formats/Impl/TSKVRowInputFormat.cpp | 2 +- .../Formats/Impl/TabSeparatedRowInputFormat.cpp | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 900e9c7b535..bf41de3959a 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -480,7 +480,7 @@ void readEscapedString(String & s, ReadBuffer & buf) } template void readEscapedStringInto>(PaddedPODArray & s, ReadBuffer & buf); -template void readEscapedStringInto(NullSink & s, ReadBuffer & buf); +template void readEscapedStringInto(NullOutput & s, ReadBuffer & buf); /** If enable_sql_style_quoting == true, @@ -562,7 +562,7 @@ void readQuotedStringWithSQLStyle(String & s, ReadBuffer & buf) template void readQuotedStringInto(PaddedPODArray & s, ReadBuffer & buf); -template void readDoubleQuotedStringInto(NullSink & s, ReadBuffer & buf); +template void readDoubleQuotedStringInto(NullOutput & s, ReadBuffer & buf); void readDoubleQuotedString(String & s, ReadBuffer & buf) { @@ -742,7 +742,7 @@ void readJSONString(String & s, ReadBuffer & buf) template void readJSONStringInto, void>(PaddedPODArray & s, ReadBuffer & buf); template bool readJSONStringInto, bool>(PaddedPODArray & s, ReadBuffer & buf); -template void readJSONStringInto(NullSink & s, ReadBuffer & buf); +template void readJSONStringInto(NullOutput & s, ReadBuffer & buf); template void readJSONStringInto(String & s, ReadBuffer & buf); @@ -891,7 +891,7 @@ void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field) throw Exception("Unexpected EOF for key '" + name_of_field.toString() + "'", ErrorCodes::INCORRECT_DATA); else if (*buf.position() == '"') /// skip double-quoted string { - NullSink sink; + NullOutput sink; readJSONStringInto(sink, buf); } else if (isNumericASCII(*buf.position()) || *buf.position() == '-' || *buf.position() == '+' || *buf.position() == '.') /// skip number @@ -955,7 +955,7 @@ void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field) // field name if (*buf.position() == '"') { - NullSink sink; + NullOutput sink; readJSONStringInto(sink, buf); } else diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index d79328889f1..9ff1858c723 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -527,7 +527,7 @@ bool tryReadJSONStringInto(Vector & s, ReadBuffer & buf) } /// This could be used as template parameter for functions above, if you want to just skip data. -struct NullSink +struct NullOutput { void append(const char *, size_t) {} void push_back(char) {} diff --git a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp index 93cd0a623c7..abb468741c5 100644 --- a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp @@ -130,7 +130,7 @@ bool TSKVRowInputFormat::readRow(MutableColumns & columns, RowReadExtension & ex throw Exception("Unknown field found while parsing TSKV format: " + name_ref.toString(), ErrorCodes::INCORRECT_DATA); /// If the key is not found, skip the value. - NullSink sink; + NullOutput sink; readEscapedStringInto(sink, in); } else diff --git a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index c7da0e7383e..529b70e4e09 100644 --- a/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -20,7 +20,7 @@ namespace ErrorCodes static void skipTSVRow(ReadBuffer & in, const size_t num_columns) { - NullSink null_sink; + NullOutput null_sink; for (size_t i = 0; i < num_columns; ++i) { @@ -196,7 +196,7 @@ bool TabSeparatedRowInputFormat::readRow(MutableColumns & columns, RowReadExtens } else { - NullSink null_sink; + NullOutput null_sink; readEscapedStringInto(null_sink, in); } @@ -353,7 +353,7 @@ void TabSeparatedRowInputFormat::tryDeserializeField(const DataTypePtr & type, I } else { - NullSink null_sink; + NullOutput null_sink; readEscapedStringInto(null_sink, in); } } From 68b47f5f617b89ab7b53001b8d430b8488da7f2d Mon Sep 17 00:00:00 2001 From: myrrc Date: Thu, 29 Oct 2020 20:24:54 +0300 Subject: [PATCH 387/432] removing LC in AVRO input format if needed --- src/Processors/Formats/Impl/AvroRowInputFormat.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index 812bed23451..d756b8191bd 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -1,4 +1,5 @@ #include "AvroRowInputFormat.h" +#include "DataTypes/DataTypeLowCardinality.h" #if USE_AVRO #include @@ -174,7 +175,10 @@ static std::string nodeName(avro::NodePtr node) AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::NodePtr root_node, DataTypePtr target_type) { - WhichDataType target(target_type); + const WhichDataType target = target_type->getTypeId() == TypeIndex::LowCardinality + ? removeLowCardinality(target_type) + : target_type; + switch (root_node->type()) { case avro::AVRO_STRING: [[fallthrough]]; @@ -384,7 +388,8 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::Node } throw Exception( - "Type " + target_type->getName() + " is not compatible with Avro " + avro::toString(root_node->type()) + ":\n" + nodeToJson(root_node), + "Type " + target_type->getName() + " is not compatible with Avro " + avro::toString(root_node->type()) + ":\n" + + nodeToJson(root_node), ErrorCodes::ILLEGAL_COLUMN); } From 9564a7f6d1c940c8355ef3dd4edebeb0d620b41c Mon Sep 17 00:00:00 2001 From: myrrc Date: Thu, 29 Oct 2020 20:30:24 +0300 Subject: [PATCH 388/432] simplified the cast --- src/Processors/Formats/Impl/AvroRowInputFormat.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp index d756b8191bd..cf7a020ee0b 100644 --- a/src/Processors/Formats/Impl/AvroRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowInputFormat.cpp @@ -175,9 +175,7 @@ static std::string nodeName(avro::NodePtr node) AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(avro::NodePtr root_node, DataTypePtr target_type) { - const WhichDataType target = target_type->getTypeId() == TypeIndex::LowCardinality - ? removeLowCardinality(target_type) - : target_type; + const WhichDataType target = removeLowCardinality(target_type); switch (root_node->type()) { From 4e5bfc97848f6290bf14f652b253c8bda6fb2eed Mon Sep 17 00:00:00 2001 From: myrrc Date: Thu, 29 Oct 2020 21:02:48 +0300 Subject: [PATCH 389/432] added the simple test --- .../01543_avro_deserialization_with_lc.reference | 0 .../01543_avro_deserialization_with_lc.sh | 12 ++++++++++++ 2 files changed, 12 insertions(+) create mode 100644 tests/queries/0_stateless/01543_avro_deserialization_with_lc.reference create mode 100755 tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh diff --git a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.reference b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh new file mode 100755 index 00000000000..0971396ec9c --- /dev/null +++ b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS test_01543 (value LowCardinality(String)) ENGINE=Memory()" +$CLICKHOUSE_CLIENT --query "INSERT INTO test_01543 SELECT toString(number) FROM numbers(1000)" + +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_01543 FORMAT Avro" | + $CLICKHOUSE_CLIENT -q "INSERT INTO test_01543 FORMAT Avro"; + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_01543" From 0f92fe9ad2003b9c091c3a10db0190ff84d9a10d Mon Sep 17 00:00:00 2001 From: Anna <42538400+adevyatova@users.noreply.github.com> Date: Thu, 29 Oct 2020 21:22:28 +0300 Subject: [PATCH 390/432] DOCSUP-3118: Document the named subqueries WITH AS (#16339) * Update WITH desc * Update docs/en/sql-reference/statements/select/with.md Co-authored-by: BayoNet * Update docs/en/sql-reference/statements/select/with.md Co-authored-by: BayoNet * Update docs/en/sql-reference/statements/select/with.md Co-authored-by: BayoNet * Fixed, ru translated * Update docs/en/sql-reference/statements/select/with.md Co-authored-by: BayoNet * Fixed syntax description * Update docs/ru/sql-reference/statements/select/with.md Co-authored-by: BayoNet * Update Co-authored-by: BayoNet --- .../sql-reference/statements/select/with.md | 45 +++++++---------- .../sql-reference/statements/select/with.md | 48 ++++++++----------- 2 files changed, 37 insertions(+), 56 deletions(-) diff --git a/docs/en/sql-reference/statements/select/with.md b/docs/en/sql-reference/statements/select/with.md index a507d5224aa..6a0564a8ede 100644 --- a/docs/en/sql-reference/statements/select/with.md +++ b/docs/en/sql-reference/statements/select/with.md @@ -4,13 +4,17 @@ toc_title: WITH # WITH Clause {#with-clause} -This section provides support for Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), so the results of `WITH` clause can be used in the rest of `SELECT` query. +Clickhouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression. -## Limitations {#limitations} +## Syntax -1. Recursive queries are not supported. -2. When subquery is used inside WITH section, it’s result should be scalar with exactly one row. -3. Expression’s results are not available in subqueries. +``` sql +WITH AS +``` +or +``` sql +WITH AS +``` ## Examples {#examples} @@ -22,10 +26,10 @@ SELECT * FROM hits WHERE EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound + EventTime <= ts_upper_bound; ``` -**Example 2:** Evicting sum(bytes) expression result from SELECT clause column list +**Example 2:** Evicting a sum(bytes) expression result from the SELECT clause column list ``` sql WITH sum(bytes) as s @@ -34,10 +38,10 @@ SELECT table FROM system.parts GROUP BY table -ORDER BY s +ORDER BY s; ``` -**Example 3:** Using results of scalar subquery +**Example 3:** Using results of a scalar subquery ``` sql /* this example would return TOP 10 of most huge tables */ @@ -53,27 +57,14 @@ SELECT FROM system.parts GROUP BY table ORDER BY table_disk_usage DESC -LIMIT 10 +LIMIT 10; ``` -**Example 4:** Re-using expression in subquery - -As a workaround for current limitation for expression usage in subqueries, you may duplicate it. +**Example 4:** Reusing expression in a subquery ``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) +WITH test1 AS (SELECT i + 1, j + 1 FROM test1) +SELECT * FROM test1; ``` -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` +[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) diff --git a/docs/ru/sql-reference/statements/select/with.md b/docs/ru/sql-reference/statements/select/with.md index 4feae232bd7..328b28c27ef 100644 --- a/docs/ru/sql-reference/statements/select/with.md +++ b/docs/ru/sql-reference/statements/select/with.md @@ -2,18 +2,21 @@ toc_title: WITH --- -# Секция WITH {#sektsiia-with} +# Секция WITH {#with-clause} -Данная секция представляет собой [Common Table Expressions](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивные_запросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. +Clickhouse поддерживает [Общие табличные выражения](https://ru.wikipedia.org/wiki/Иерархические_и_рекурсивные_запросы_в_SQL), то есть позволяет использовать результаты выражений из секции `WITH` в остальной части `SELECT` запроса. Именованные подзапросы могут быть включены в текущий и дочерний контекст запроса в тех местах, где разрешены табличные объекты. Рекурсия предотвращается путем скрытия общего табличного выражения текущего уровня из выражения `WITH`. + +## Синтаксис + +``` sql +WITH AS +``` +или +``` sql +WITH AS +``` -### Ограничения - -1. Рекурсивные запросы не поддерживаются -2. Если в качестве выражения используется подзапрос, то результат должен содержать ровно одну строку -3. Результаты выражений нельзя переиспользовать во вложенных запросах -В дальнейшем, результаты выражений можно использовать в секции SELECT. - -### Примеры +## Примеры **Пример 1:** Использование константного выражения как «переменной» @@ -23,7 +26,7 @@ SELECT * FROM hits WHERE EventDate = toDate(ts_upper_bound) AND - EventTime <= ts_upper_bound + EventTime <= ts_upper_bound; ``` **Пример 2:** Выкидывание выражения sum(bytes) из списка колонок в SELECT @@ -35,7 +38,7 @@ SELECT table FROM system.parts GROUP BY table -ORDER BY s +ORDER BY s; ``` **Пример 3:** Использование результатов скалярного подзапроса @@ -54,27 +57,14 @@ SELECT FROM system.parts GROUP BY table ORDER BY table_disk_usage DESC -LIMIT 10 +LIMIT 10; ``` **Пример 4:** Переиспользование выражения -В настоящий момент, переиспользование выражения из секции WITH внутри подзапроса возможно только через дублирование. - ``` sql -WITH ['hello'] AS hello -SELECT - hello, - * -FROM -( - WITH ['hello'] AS hello - SELECT hello -) +WITH test1 AS (SELECT i + 1, j + 1 FROM test1) +SELECT * FROM test1; ``` -``` text -┌─hello─────┬─hello─────┐ -│ ['hello'] │ ['hello'] │ -└───────────┴───────────┘ -``` +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/statements/select/with/) From 19794bee077ed23261930b37c003ad4ae8eedcbd Mon Sep 17 00:00:00 2001 From: Ekaterina <72217280+antarctictardigrade@users.noreply.github.com> Date: Thu, 29 Oct 2020 21:27:37 +0300 Subject: [PATCH 391/432] DOCSUP-2966: Update the INSERT INTO statement (#16404) * Revert "Update version_date.tsv after release 20.7.4.11" This reverts commit 9aff247afed1479f95a4714d699c1ddaefc75499. * Added translation for feature. * Fixed. * Fixed. * Fixed. * Update docs/en/sql-reference/statements/insert-into.md * Update docs/en/sql-reference/statements/insert-into.md * Update docs/en/sql-reference/statements/insert-into.md * Update docs/en/sql-reference/statements/insert-into.md * Update docs/en/sql-reference/statements/insert-into.md * Update docs/en/sql-reference/statements/insert-into.md * Update docs/ru/sql-reference/statements/insert-into.md * Update docs/ru/sql-reference/statements/insert-into.md * Update docs/ru/sql-reference/statements/insert-into.md * Update version_date.tsv Co-authored-by: BayoNet --- .../sql-reference/statements/insert-into.md | 53 ++++++++++++++++++- .../sql-reference/statements/insert-into.md | 50 ++++++++++++++++- 2 files changed, 100 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/statements/insert-into.md b/docs/en/sql-reference/statements/insert-into.md index b49314a1785..ae5e074fd15 100644 --- a/docs/en/sql-reference/statements/insert-into.md +++ b/docs/en/sql-reference/statements/insert-into.md @@ -13,12 +13,61 @@ Basic query format: INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... ``` -The query can specify a list of columns to insert `[(c1, c2, c3)]`. In this case, the rest of the columns are filled with: +You can specify a list of columns to insert using the `(c1, c2, c3)` or `COLUMNS(c1,c2,c3)` syntax. + +Instead of listing all the required columns you can use the `(* EXCEPT(column_list))` syntax. + +For example, consider the table: + +``` sql +SHOW CREATE insert_select_testtable; +``` + +``` +┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ CREATE TABLE insert_select_testtable +( + `a` Int8, + `b` String, + `c` Int8 +) +ENGINE = MergeTree() +ORDER BY a +SETTINGS index_granularity = 8192 │ +└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +``` sql +INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1) ; +``` + +If you want to insert data in all the columns, except 'b', you need to pass so many values how many columns you chose in parenthesis then: + +``` sql +INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2); +``` + +``` sql +SELECT * FROM insert_select_testtable; +``` + +``` +┌─a─┬─b─┬─c─┐ +│ 2 │ │ 2 │ +└───┴───┴───┘ +┌─a─┬─b─┬─c─┐ +│ 1 │ a │ 1 │ +└───┴───┴───┘ +``` + +In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default. + +If a list of columns doesn't include all existing columns, the rest of the columns are filled with: - The values calculated from the `DEFAULT` expressions specified in the table definition. - Zeros and empty strings, if `DEFAULT` expressions are not defined. -If [strict_insert_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query. +If [strict\_insert\_defaults=1](../../operations/settings/settings.md), columns that do not have `DEFAULT` defined must be listed in the query. Data can be passed to the INSERT in any [format](../../interfaces/formats.md#formats) supported by ClickHouse. The format must be specified explicitly in the query: diff --git a/docs/ru/sql-reference/statements/insert-into.md b/docs/ru/sql-reference/statements/insert-into.md index 8ea7c83bec8..0d38be81ac6 100644 --- a/docs/ru/sql-reference/statements/insert-into.md +++ b/docs/ru/sql-reference/statements/insert-into.md @@ -13,7 +13,55 @@ toc_title: INSERT INTO INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... ``` -В запросе можно указать список столбцов для вставки `[(c1, c2, c3)]`. В этом случае, в остальные столбцы записываются: +Вы можете указать список столбцов для вставки, используя следующий синтаксис: `(c1, c2, c3)` или `COLUMNS(c1,c2,c3)`. + +Можно не перечислять все необходимые столбцы, а использовать синтаксис `(* EXCEPT(column_list))`. + +В качестве примера рассмотрим таблицу: + +``` sql +SHOW CREATE insert_select_testtable +``` + +``` +┌─statement────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ CREATE TABLE insert_select_testtable +( + `a` Int8, + `b` String, + `c` Int8 +) +ENGINE = MergeTree() +ORDER BY a +SETTINGS index_granularity = 8192 │ +└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +``` sql +INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1) +``` + +Если вы хотите вставить данные во все столбцы, кроме 'b', вам нужно передать столько значений, сколько столбцов вы указали в скобках: + +``` sql +INSERT INTO insert_select_testtable (* EXCEPT(b)) Values (2, 2) +``` + +``` sql +SELECT * FROM insert_select_testtable +``` + +``` +┌─a─┬─b─┬─c─┐ +│ 2 │ │ 2 │ +└───┴───┴───┘ +┌─a─┬─b─┬─c─┐ +│ 1 │ a │ 1 │ +└───┴───┴───┘ +``` + +В этом примере мы видим, что вторая строка содержит столбцы `a` и `c`, заполненные переданными значениями и `b`, заполненный значением по умолчанию. +Если список столбцов не включает все существующие столбцы, то все остальные столбцы заполняются следующим образом: - Значения, вычисляемые из `DEFAULT` выражений, указанных в определении таблицы. - Нули и пустые строки, если `DEFAULT` не определены. From ba4bb18663d868bee059f844ec871da6e8cc1eee Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 29 Oct 2020 21:57:16 +0300 Subject: [PATCH 392/432] Remove useless sleep in test --- tests/queries/0_stateless/00933_ttl_with_default.sql | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/queries/0_stateless/00933_ttl_with_default.sql b/tests/queries/0_stateless/00933_ttl_with_default.sql index d3f3b62126c..e6c0a6e700c 100644 --- a/tests/queries/0_stateless/00933_ttl_with_default.sql +++ b/tests/queries/0_stateless/00933_ttl_with_default.sql @@ -5,7 +5,6 @@ insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1); insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_2 final; select a from ttl_00933_2 order by a; @@ -16,7 +15,6 @@ insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 100); insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 200); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 300); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 400); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_2 final; select a, b from ttl_00933_2 order by a; @@ -27,7 +25,6 @@ insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 5); insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 10); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 15); insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 20); -select sleep(0.7) format Null; -- wait if very fast merge happen optimize table ttl_00933_2 final; select a, b from ttl_00933_2 order by a; From 18c5abc44eff7617d7358a6e4fd1d048b6a8a65d Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Fri, 30 Oct 2020 10:55:50 +0800 Subject: [PATCH 393/432] Allow fetching duplicate parts to detach --- src/Storages/StorageReplicatedMergeTree.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index edc8a9df911..814350294e7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3282,12 +3282,15 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora auto zookeeper = zookeeper_ ? zookeeper_ : getZooKeeper(); const auto part_info = MergeTreePartInfo::fromPartName(part_name, format_version); - if (auto part = getPartIfExists(part_info, {IMergeTreeDataPart::State::Outdated, IMergeTreeDataPart::State::Deleting})) + if (!to_detached) { - LOG_DEBUG(log, "Part {} should be deleted after previous attempt before fetch", part->name); - /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. - cleanup_thread.wakeup(); - return false; + if (auto part = getPartIfExists(part_info, {IMergeTreeDataPart::State::Outdated, IMergeTreeDataPart::State::Deleting})) + { + LOG_DEBUG(log, "Part {} should be deleted after previous attempt before fetch", part->name); + /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. + cleanup_thread.wakeup(); + return false; + } } { From f428675b6e2465f4837e71932b50a4093199a40a Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 30 Oct 2020 05:42:10 +0000 Subject: [PATCH 394/432] fix parseDateTimeBestEffortOrNull with empty string --- src/IO/parseDateTimeBestEffort.cpp | 4 ++++ ...rse_datetime_besteffort_or_null_empty_string.reference | 8 ++++++++ ...543_parse_datetime_besteffort_or_null_empty_string.sql | 8 ++++++++ 3 files changed, 20 insertions(+) create mode 100644 tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.reference create mode 100644 tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index d84ac76b164..a5e23e7f697 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -533,6 +533,10 @@ ReturnType parseDateTimeBestEffortImpl( } } + /// If neigher Date nor Time is parsed successfully, it should fail + if (!year && !month && !day_of_month && !has_time) + return on_error("Cannot read DateTime: neither Date nor Time was parsed successfully", ErrorCodes::CANNOT_PARSE_DATETIME); + if (!year) year = 2000; if (!month) diff --git a/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.reference b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.reference new file mode 100644 index 00000000000..14be7c77e23 --- /dev/null +++ b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.reference @@ -0,0 +1,8 @@ +2010-01-01 00:00:00 +2010-01-01 01:01:01 +2000-01-01 01:01:01 +\N +\N +\N +\N +\N diff --git a/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql new file mode 100644 index 00000000000..66d28534b30 --- /dev/null +++ b/tests/queries/0_stateless/01543_parse_datetime_besteffort_or_null_empty_string.sql @@ -0,0 +1,8 @@ +SELECT parseDateTimeBestEffortOrNull('2010-01-01'); +SELECT parseDateTimeBestEffortOrNull('2010-01-01 01:01:01'); +SELECT parseDateTimeBestEffortOrNull('01:01:01'); +SELECT parseDateTimeBestEffortOrNull('20100'); +SELECT parseDateTimeBestEffortOrNull('0100:0100:0000'); +SELECT parseDateTimeBestEffortOrNull('x'); +SELECT parseDateTimeBestEffortOrNull(''); +SELECT parseDateTimeBestEffortOrNull(' '); From 21fcd8aece26fe63e373f6e56d3f1fcfcbd6056e Mon Sep 17 00:00:00 2001 From: feng lv Date: Fri, 30 Oct 2020 06:45:09 +0000 Subject: [PATCH 395/432] fix fix --- .../00569_parse_date_time_best_effort.reference | 4 ++-- .../01313_parse_date_time_best_effort_null_zero.sql | 4 ++-- .../0_stateless/01442_date_time_with_params.reference | 4 ++-- tests/queries/0_stateless/01442_date_time_with_params.sql | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference index 6c10720d0b1..ad7c17b7717 100644 --- a/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference +++ b/tests/queries/0_stateless/00569_parse_date_time_best_effort.reference @@ -1,7 +1,7 @@ s a b - 0 2000-01-01 00:00:00 2000-01-01 00:00:00 - 0000 2000-01-01 00:00:00 2000-01-01 00:00:00 + 0 ᴺᵁᴸᴸ 1970-01-01 00:00:00 + 0000 ᴺᵁᴸᴸ 1970-01-01 00:00:00 00:00:00 2000-01-01 00:00:00 2000-01-01 00:00:00 01:00:00 2000-01-01 01:00:00 2000-01-01 01:00:00 02/01/17 010203 MSK 2017-01-01 22:02:03 2017-01-01 22:02:03 diff --git a/tests/queries/0_stateless/01313_parse_date_time_best_effort_null_zero.sql b/tests/queries/0_stateless/01313_parse_date_time_best_effort_null_zero.sql index c58eb722327..ed56aec3fb0 100644 --- a/tests/queries/0_stateless/01313_parse_date_time_best_effort_null_zero.sql +++ b/tests/queries/0_stateless/01313_parse_date_time_best_effort_null_zero.sql @@ -1,8 +1,8 @@ -SELECT parseDateTimeBestEffort(''); -- { serverError 6 } +SELECT parseDateTimeBestEffort(''); -- { serverError 41 } SELECT parseDateTimeBestEffortOrNull(''); SELECT parseDateTimeBestEffortOrZero('', 'UTC'); -SELECT parseDateTime64BestEffort(''); -- { serverError 6 } +SELECT parseDateTime64BestEffort(''); -- { serverError 41 } SELECT parseDateTime64BestEffortOrNull(''); SELECT parseDateTime64BestEffortOrZero('', 0, 'UTC'); diff --git a/tests/queries/0_stateless/01442_date_time_with_params.reference b/tests/queries/0_stateless/01442_date_time_with_params.reference index f55d095d164..94ffcffdd37 100644 --- a/tests/queries/0_stateless/01442_date_time_with_params.reference +++ b/tests/queries/0_stateless/01442_date_time_with_params.reference @@ -15,7 +15,7 @@ 2020-05-14 03:37:03.253 Nullable(DateTime64(3, \'UTC\')) 2020-05-14 06:37:03.253 Nullable(DateTime64(3, \'Europe/Minsk\')) 2020-05-14 03:37:03.253 Nullable(DateTime64(3, \'UTC\')) -1970-01-01 03:00:00.000 DateTime64(3) +1970-01-01 00:00:00.000 DateTime64(3, \'UTC\') 2020-05-14 03:37:03.000 DateTime64(3, \'UTC\') 2020-05-14 03:37:03.000 DateTime64(3, \'UTC\') 2020-05-14 03:37:03.253 DateTime64(3, \'UTC\') @@ -35,7 +35,7 @@ 2020-05-14 03:37:03 Nullable(DateTime(\'UTC\')) 2020-05-14 06:37:03 Nullable(DateTime(\'Europe/Minsk\')) 2020-05-14 03:37:03 Nullable(DateTime(\'UTC\')) -1970-01-01 03:00:00 DateTime +1970-01-01 00:00:00 DateTime(\'UTC\') 2020-05-14 03:37:03 DateTime(\'UTC\') 2020-05-14 03:37:03 DateTime(\'UTC\') 2020-05-14 03:37:03 DateTime(\'UTC\') diff --git a/tests/queries/0_stateless/01442_date_time_with_params.sql b/tests/queries/0_stateless/01442_date_time_with_params.sql index 5ae7fe22699..2eefa47ba8c 100644 --- a/tests/queries/0_stateless/01442_date_time_with_params.sql +++ b/tests/queries/0_stateless/01442_date_time_with_params.sql @@ -12,7 +12,7 @@ SELECT CAST('2020-01-01 00:00:00', 'DateTime') AS a, toTypeName(a), CAST('2020-0 SELECT toDateTime32('2020-01-01 00:00:00') AS a, toTypeName(a); -SELECT parseDateTimeBestEffort('', 3) AS a, toTypeName(a); -- {serverError 6} +SELECT parseDateTimeBestEffort('', 3) AS a, toTypeName(a); -- {serverError 41} SELECT parseDateTimeBestEffort('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a); SELECT parseDateTimeBestEffort('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a); SELECT parseDateTimeBestEffort('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a); @@ -28,7 +28,7 @@ SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'UTC') AS SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk') AS a, toTypeName(a); SELECT parseDateTimeBestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a); -SELECT parseDateTimeBestEffortOrZero('', 3) AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('', 3, 'UTC') AS a, toTypeName(a); SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a); SELECT parseDateTimeBestEffortOrZero('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a); SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a); @@ -37,7 +37,7 @@ SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184Z', 3, 'Europe/M SELECT parseDateTimeBestEffortOrZero(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a); -SELECT parseDateTime32BestEffort('') AS a, toTypeName(a); -- {serverError 6} +SELECT parseDateTime32BestEffort('') AS a, toTypeName(a); -- {serverError 41} SELECT parseDateTime32BestEffort('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a); SELECT parseDateTime32BestEffort('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a); SELECT parseDateTime32BestEffort('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a); @@ -53,7 +53,7 @@ SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'UTC') AS SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'Europe/Minsk') AS a, toTypeName(a); SELECT parseDateTime32BestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 'UTC') AS a, toTypeName(a); -SELECT parseDateTime32BestEffortOrZero('') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('', 'UTC') AS a, toTypeName(a); SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a); SELECT parseDateTime32BestEffortOrZero('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a); SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a); From 15ba4dd85134290b519cf9bf1c9bb6a223e931f9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 30 Oct 2020 11:52:11 +0300 Subject: [PATCH 396/432] Review fix --- src/Storages/MergeTree/DataPartsExchange.cpp | 12 +++++++++--- src/Storages/MergeTree/ReplicatedFetchList.h | 2 ++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index eaf32385908..5ab0cc4612f 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -64,9 +64,15 @@ struct ReplicatedFetchReadCallback void operator() (size_t bytes_count) { replicated_fetch_entry->bytes_read_compressed.store(bytes_count, std::memory_order_relaxed); - replicated_fetch_entry->progress.store( - static_cast(bytes_count) / replicated_fetch_entry->total_size_bytes_compressed, - std::memory_order_relaxed); + + /// It's possible when we fetch part from very old clickhouse version + /// which doesn't send total size. + if (replicated_fetch_entry->total_size_bytes_compressed != 0) + { + replicated_fetch_entry->progress.store( + static_cast(bytes_count) / replicated_fetch_entry->total_size_bytes_compressed, + std::memory_order_relaxed); + } } }; diff --git a/src/Storages/MergeTree/ReplicatedFetchList.h b/src/Storages/MergeTree/ReplicatedFetchList.h index ed134de3f2c..81d538abf9c 100644 --- a/src/Storages/MergeTree/ReplicatedFetchList.h +++ b/src/Storages/MergeTree/ReplicatedFetchList.h @@ -63,6 +63,8 @@ struct ReplicatedFetchListElement : private boost::noncopyable /// How many bytes already read std::atomic bytes_read_compressed{}; /// Total bytes to read + /// NOTE: can be zero if we fetching data from old server. + /// In this case progress is not tracked. const UInt64 total_size_bytes_compressed{}; const UInt64 thread_id; From f52d0919f06eb5448f9c136705c0dcf6a08e66ce Mon Sep 17 00:00:00 2001 From: filimonov <1549571+filimonov@users.noreply.github.com> Date: Fri, 30 Oct 2020 10:31:05 +0100 Subject: [PATCH 397/432] Update date-time-functions.md --- docs/en/sql-reference/functions/date-time-functions.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 15214cc4c00..a5a347e553a 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -23,8 +23,6 @@ SELECT └─────────────────────┴────────────┴────────────┴─────────────────────┘ ``` -Only time zones that differ from UTC by a whole number of hours are supported. - ## toTimeZone {#totimezone} Convert time or date and time to the specified time zone. From 182f9fbdca5e7401490e2e5e3dcd5a174d3ec771 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 30 Oct 2020 12:36:46 +0300 Subject: [PATCH 398/432] Remove wrong assertion from SSDComplexKeyCacheDictionary --- src/Dictionaries/SSDComplexKeyCacheDictionary.cpp | 1 - tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sql | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp b/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp index 44847df48ff..5ac821e5eda 100644 --- a/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp +++ b/src/Dictionaries/SSDComplexKeyCacheDictionary.cpp @@ -1467,7 +1467,6 @@ void SSDComplexKeyCacheDictionary::getItemsNumberImpl( { assert(dict_struct.key); assert(key_columns.size() == key_types.size()); - assert(key_columns.size() == dict_struct.key->size()); dict_struct.validateKeyTypes(key_types); diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sql b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sql index 9faafb6c0c7..7f2da983525 100644 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sql +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sql @@ -62,6 +62,8 @@ SELECT dictGetUInt64('database_for_dict.ssd_dict', 'a', tuple('10', toInt32(-20) SELECT dictGetInt32('database_for_dict.ssd_dict', 'b', tuple('10', toInt32(-20))); SELECT dictGetString('database_for_dict.ssd_dict', 'c', tuple('10', toInt32(-20))); +SELECT dictGetUInt64('database_for_dict.ssd_dict', 'a', tuple(toInt32(3))); --{serverError 53} + DROP DICTIONARY database_for_dict.ssd_dict; DROP TABLE IF EXISTS database_for_dict.keys_table; From cf8202d36ebff92e2104859664a24d7547d334d2 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Fri, 30 Oct 2020 15:41:39 +0300 Subject: [PATCH 399/432] better logging, fix flacky test --- .../MergeTree/ReplicatedMergeTreeQueue.cpp | 84 +++++++++++-------- .../MergeTree/ReplicatedMergeTreeQueue.h | 1 + ...tem_parts_race_condition_drop_zookeeper.sh | 5 +- 3 files changed, 55 insertions(+), 35 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 880ad4dd0d3..bf9a94747b6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -965,13 +965,16 @@ void ReplicatedMergeTreeQueue::checkThereAreNoConflictsInRange(const MergeTreePa } -bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_part_name, String & out_reason, std::lock_guard & /* queue_lock */) const +bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & log_entry_name, const String & new_part_name, + String & out_reason, std::lock_guard & /* queue_lock */) const { /// Let's check if the same part is now being created by another action. if (future_parts.count(new_part_name)) { - out_reason = "Not executing log entry for part " + new_part_name - + " because another log entry for the same part is being processed. This shouldn't happen often."; + const char * format_str = "Not executing log entry {} for part {} " + "because another log entry for the same part is being processed. This shouldn't happen often."; + LOG_INFO(log, format_str, log_entry_name, new_part_name); + out_reason = fmt::format(format_str, log_entry_name, new_part_name); return false; /** When the corresponding action is completed, then `isNotCoveredByFuturePart` next time, will succeed, @@ -992,8 +995,10 @@ bool ReplicatedMergeTreeQueue::isNotCoveredByFuturePartsImpl(const String & new_ if (future_part.contains(result_part)) { - out_reason = "Not executing log entry for part " + new_part_name + " because it is covered by part " - + future_part_elem.first + " that is currently executing"; + const char * format_str = "Not executing log entry {} for part {} " + "because it is covered by part {} that is currently executing."; + LOG_TRACE(log, format_str, log_entry_name, new_part_name, future_part_elem.first); + out_reason = fmt::format(format_str, log_entry_name, new_part_name, future_part_elem.first); return false; } } @@ -1005,7 +1010,7 @@ bool ReplicatedMergeTreeQueue::addFuturePartIfNotCoveredByThem(const String & pa { std::lock_guard lock(state_mutex); - if (isNotCoveredByFuturePartsImpl(part_name, reject_reason, lock)) + if (isNotCoveredByFuturePartsImpl(entry.znode_name, part_name, reject_reason, lock)) { CurrentlyExecuting::setActualPartName(entry, part_name, *this); return true; @@ -1030,12 +1035,8 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { for (const String & new_part_name : entry.getBlockingPartNames()) { - if (!isNotCoveredByFuturePartsImpl(new_part_name, out_postpone_reason, state_lock)) - { - if (!out_postpone_reason.empty()) - LOG_DEBUG(log, out_postpone_reason); + if (!isNotCoveredByFuturePartsImpl(entry.znode_name, new_part_name, out_postpone_reason, state_lock)) return false; - } } } @@ -1051,10 +1052,11 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { if (future_parts.count(name)) { - String reason = "Not merging into part " + entry.new_part_name - + " because part " + name + " is not ready yet (log entry for that part is being processed)."; - LOG_TRACE(log, reason); - out_postpone_reason = reason; + const char * format_str = "Not executing log entry {} of type {} for part {} " + "because part {} is not ready yet (log entry for that part is being processed)."; + LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name); + /// Copy-paste of above because we need structured logging (instead of already formatted message). + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, name); return false; } @@ -1070,9 +1072,9 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (merger_mutator.merges_blocker.isCancelled()) { - String reason = "Not executing log entry for part " + entry.new_part_name + " because merges and mutations are cancelled now."; - LOG_DEBUG(log, reason); - out_postpone_reason = reason; + const char * format_str = "Not executing log entry {} of type {} for part {} because merges and mutations are cancelled now."; + LOG_DEBUG(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name); return false; } @@ -1094,17 +1096,19 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { if (merger_mutator.ttl_merges_blocker.isCancelled()) { - String reason = "Not executing log entry for part " + entry.new_part_name + " because merges with TTL are cancelled now."; - LOG_DEBUG(log, reason); - out_postpone_reason = reason; + const char * format_str = "Not executing log entry {} for part {} because merges with TTL are cancelled now."; + LOG_DEBUG(log, format_str, + entry.znode_name, entry.new_part_name); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.new_part_name); return false; } size_t total_merges_with_ttl = data.getTotalMergesWithTTLInMergeList(); if (total_merges_with_ttl >= data_settings->max_number_of_merges_with_ttl_in_pool) { - const char * format_str = "Not executing log entry for part {}" + const char * format_str = "Not executing log entry {} for part {}" " because {} merges with TTL already executing, maximum {}."; - LOG_DEBUG(log, format_str, entry.new_part_name, total_merges_with_ttl, + LOG_DEBUG(log, format_str, entry.znode_name, + entry.new_part_name, total_merges_with_ttl, data_settings->max_number_of_merges_with_ttl_in_pool); out_postpone_reason = fmt::format(format_str, entry.new_part_name, total_merges_with_ttl, @@ -1116,15 +1120,14 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (!ignore_max_size && sum_parts_size_in_bytes > max_source_parts_size) { - const char * format_str = "Not executing log entry {} for part {}" + const char * format_str = "Not executing log entry {} of type {} for part {}" " because source parts size ({}) is greater than the current maximum ({})."; - LOG_DEBUG(log, format_str, + LOG_DEBUG(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size)); - /// Copy-paste of above because we need structured logging (instead of already formatted message). - out_postpone_reason = fmt::format(format_str, + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name, ReadableSize(sum_parts_size_in_bytes), ReadableSize(max_source_parts_size)); @@ -1139,9 +1142,9 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( if (!alter_sequence.canExecuteMetaAlter(entry.alter_version, state_lock)) { int head_alter = alter_sequence.getHeadAlterVersion(state_lock); - out_postpone_reason = "Cannot execute alter metadata with version: " + std::to_string(entry.alter_version) - + " because another alter " + std::to_string(head_alter) - + " must be executed before"; + const char * format_str = "Cannot execute alter metadata {} with version {} because another alter {} must be executed before"; + LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version); return false; } } @@ -1153,11 +1156,17 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( { int head_alter = alter_sequence.getHeadAlterVersion(state_lock); if (head_alter == entry.alter_version) - out_postpone_reason = "Cannot execute alter data with version: " - + std::to_string(entry.alter_version) + " because metadata still not altered"; + { + const char * format_str = "Cannot execute alter data {} with version {} because metadata still not altered"; + LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version); + } else - out_postpone_reason = "Cannot execute alter data with version: " + std::to_string(entry.alter_version) - + " because another alter " + std::to_string(head_alter) + " must be executed before"; + { + const char * format_str = "Cannot execute alter data {} with version {} because another alter {} must be executed before"; + LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter); + } return false; } @@ -1170,7 +1179,14 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( /// Deadlock is possible if multiple DROP/REPLACE RANGE entries are executing in parallel and wait each other. /// See also removePartProducingOpsInRange(...) and ReplicatedMergeTreeQueue::CurrentlyExecuting. if (currently_executing_drop_or_replace_range) + { + + const char * format_str = "Not executing log entry {} of type {} for part {} " + "because another DROP_RANGE or REPLACE_RANGE entry are currently executing."; + LOG_TRACE(log, format_str, entry.znode_name, entry.typeToString(), entry.new_part_name); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.typeToString(), entry.new_part_name); return false; + } } return true; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 93b79c8336c..8036e66b86b 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -205,6 +205,7 @@ private: * Should be called under state_mutex. */ bool isNotCoveredByFuturePartsImpl( + const String & log_entry_name, const String & new_part_name, String & out_reason, std::lock_guard & state_lock) const; diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index d86631513a1..d4344e6e8bd 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -97,4 +97,7 @@ timeout $TIMEOUT bash -c thread6 2>&1 | grep "was not completely removed from Zo wait -for i in {0..9}; do $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i"; done +for i in {0..9}; do + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS alter_table_$i" & +done +wait From 83220b09566737d7e8e4acc868c1cdf68cfe19e5 Mon Sep 17 00:00:00 2001 From: alexey-milovidov Date: Fri, 30 Oct 2020 16:00:49 +0300 Subject: [PATCH 400/432] Update parseDateTimeBestEffort.cpp --- src/IO/parseDateTimeBestEffort.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index a5e23e7f697..8a188d22236 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -533,7 +533,7 @@ ReturnType parseDateTimeBestEffortImpl( } } - /// If neigher Date nor Time is parsed successfully, it should fail + /// If neither Date nor Time is parsed successfully, it should fail if (!year && !month && !day_of_month && !has_time) return on_error("Cannot read DateTime: neither Date nor Time was parsed successfully", ErrorCodes::CANNOT_PARSE_DATETIME); From 3529100da9a3351a3482151d65044704e9e09d8e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 30 Oct 2020 18:12:15 +0300 Subject: [PATCH 401/432] Adjust ignored perf test changes after NUMA binding --- tests/performance/agg_functions_min_max_any.xml | 2 +- tests/performance/arithmetic.xml | 2 +- tests/performance/array_index_low_cardinality_strings.xml | 2 +- tests/performance/codecs_float_insert.xml | 2 +- tests/performance/codecs_int_insert.xml | 2 +- tests/performance/collations.xml | 2 +- tests/performance/column_column_comparison.xml | 2 +- tests/performance/columns_hashing.xml | 2 +- tests/performance/count.xml | 2 +- tests/performance/cpu_synthetic.xml | 2 +- tests/performance/cryptographic_hashes.xml | 2 +- tests/performance/date_parsing.xml | 2 +- tests/performance/decimal_casts.xml | 2 +- tests/performance/empty_string_serialization.xml | 2 +- tests/performance/entropy.xml | 2 +- tests/performance/extract.xml | 2 +- tests/performance/float_formatting.xml | 2 +- tests/performance/functions_with_hash_tables.xml | 2 +- tests/performance/general_purpose_hashes.xml | 2 +- tests/performance/general_purpose_hashes_on_UUID.xml | 2 +- tests/performance/great_circle_dist.xml | 2 +- tests/performance/insert_parallel.xml | 2 +- tests/performance/jit_large_requests.xml | 2 +- tests/performance/jit_small_requests.xml | 2 +- tests/performance/joins_in_memory.xml | 2 +- tests/performance/joins_in_memory_pmj.xml | 2 +- tests/performance/local_replica.xml | 2 +- tests/performance/logical_functions_large.xml | 2 +- tests/performance/logical_functions_medium.xml | 2 +- tests/performance/logical_functions_small.xml | 2 +- tests/performance/materialized_view_parallel_insert.xml | 2 +- tests/performance/math.xml | 2 +- tests/performance/merge_tree_huge_pk.xml | 2 +- tests/performance/merge_tree_many_partitions.xml | 2 +- tests/performance/merge_tree_many_partitions_2.xml | 2 +- tests/performance/merge_tree_simple_select.xml | 2 +- tests/performance/mingroupby-orderbylimit1.xml | 2 +- tests/performance/number_formatting_formats.xml | 2 +- tests/performance/order_by_single_column.xml | 2 +- tests/performance/parallel_insert.xml | 2 +- tests/performance/push_down_limit.xml | 2 +- tests/performance/random_string.xml | 2 +- tests/performance/range.xml | 2 +- tests/performance/read_in_order_many_parts.xml | 2 +- tests/performance/redundant_functions_in_order_by.xml | 2 +- tests/performance/removing_group_by_keys.xml | 2 +- tests/performance/select_format.xml | 2 +- tests/performance/set.xml | 2 +- tests/performance/set_index.xml | 2 +- tests/performance/single_fixed_string_groupby.xml | 2 +- tests/performance/string_set.xml | 2 +- tests/performance/string_sort.xml | 2 +- tests/performance/sum_map.xml | 2 +- tests/performance/uniq.xml | 2 +- tests/performance/website.xml | 2 +- 55 files changed, 55 insertions(+), 55 deletions(-) diff --git a/tests/performance/agg_functions_min_max_any.xml b/tests/performance/agg_functions_min_max_any.xml index e564cdcc609..79c9e2c6976 100644 --- a/tests/performance/agg_functions_min_max_any.xml +++ b/tests/performance/agg_functions_min_max_any.xml @@ -1,4 +1,4 @@ - + hits_100m_single diff --git a/tests/performance/arithmetic.xml b/tests/performance/arithmetic.xml index 45f0d62f227..0be61eb5823 100644 --- a/tests/performance/arithmetic.xml +++ b/tests/performance/arithmetic.xml @@ -1,4 +1,4 @@ - + 30000000000 diff --git a/tests/performance/array_index_low_cardinality_strings.xml b/tests/performance/array_index_low_cardinality_strings.xml index 896a5923a9e..bbfea083f0a 100644 --- a/tests/performance/array_index_low_cardinality_strings.xml +++ b/tests/performance/array_index_low_cardinality_strings.xml @@ -1,4 +1,4 @@ - + DROP TABLE IF EXISTS perf_lc_str CREATE TABLE perf_lc_str( str LowCardinality(String), diff --git a/tests/performance/codecs_float_insert.xml b/tests/performance/codecs_float_insert.xml index 8470df7e35c..a7cb5152c09 100644 --- a/tests/performance/codecs_float_insert.xml +++ b/tests/performance/codecs_float_insert.xml @@ -1,5 +1,5 @@ - + 1 diff --git a/tests/performance/codecs_int_insert.xml b/tests/performance/codecs_int_insert.xml index 662df80ae70..caefaba3725 100644 --- a/tests/performance/codecs_int_insert.xml +++ b/tests/performance/codecs_int_insert.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/collations.xml b/tests/performance/collations.xml index 40153a48d07..17b2d36b7e3 100644 --- a/tests/performance/collations.xml +++ b/tests/performance/collations.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/performance/column_column_comparison.xml b/tests/performance/column_column_comparison.xml index dd77ba24043..2b59a65a54b 100644 --- a/tests/performance/column_column_comparison.xml +++ b/tests/performance/column_column_comparison.xml @@ -1,4 +1,4 @@ - + comparison diff --git a/tests/performance/columns_hashing.xml b/tests/performance/columns_hashing.xml index 3ea2e013acc..147bee93a17 100644 --- a/tests/performance/columns_hashing.xml +++ b/tests/performance/columns_hashing.xml @@ -1,4 +1,4 @@ - + hits_10m_single hits_100m_single diff --git a/tests/performance/count.xml b/tests/performance/count.xml index 4b8b00f48db..b75fd4e4df5 100644 --- a/tests/performance/count.xml +++ b/tests/performance/count.xml @@ -1,4 +1,4 @@ - + CREATE TABLE data(k UInt64, v UInt64) ENGINE = MergeTree ORDER BY k INSERT INTO data SELECT number, 1 from numbers(10000000) diff --git a/tests/performance/cpu_synthetic.xml b/tests/performance/cpu_synthetic.xml index 00e8b4e86d8..1a3c0737046 100644 --- a/tests/performance/cpu_synthetic.xml +++ b/tests/performance/cpu_synthetic.xml @@ -1,4 +1,4 @@ - + hits_100m_single hits_10m_single diff --git a/tests/performance/cryptographic_hashes.xml b/tests/performance/cryptographic_hashes.xml index 03d275a7bb7..fbe0babd43c 100644 --- a/tests/performance/cryptographic_hashes.xml +++ b/tests/performance/cryptographic_hashes.xml @@ -1,4 +1,4 @@ - + hash_slow diff --git a/tests/performance/date_parsing.xml b/tests/performance/date_parsing.xml index 15d267dbde5..ffe4ffb9799 100644 --- a/tests/performance/date_parsing.xml +++ b/tests/performance/date_parsing.xml @@ -1,4 +1,4 @@ - + hits_100m_single diff --git a/tests/performance/decimal_casts.xml b/tests/performance/decimal_casts.xml index 582672fa30e..d08aec7a0e4 100644 --- a/tests/performance/decimal_casts.xml +++ b/tests/performance/decimal_casts.xml @@ -1,4 +1,4 @@ - + 15G diff --git a/tests/performance/empty_string_serialization.xml b/tests/performance/empty_string_serialization.xml index d82bcf998aa..303283f08c7 100644 --- a/tests/performance/empty_string_serialization.xml +++ b/tests/performance/empty_string_serialization.xml @@ -1,4 +1,4 @@ - + - + expr diff --git a/tests/performance/functions_with_hash_tables.xml b/tests/performance/functions_with_hash_tables.xml index 2e9a88107bd..18697d7fb07 100644 --- a/tests/performance/functions_with_hash_tables.xml +++ b/tests/performance/functions_with_hash_tables.xml @@ -1,4 +1,4 @@ - + select arrayUniq(range(1 + (number % 100) * 10)) from numbers(100000) format Null select arrayDistinct(range(1 + (number % 100) * 10)) from numbers(100000) format Null select arrayEnumerateUniq(range(1 + (number % 100) * 10)) from numbers(100000) format Null diff --git a/tests/performance/general_purpose_hashes.xml b/tests/performance/general_purpose_hashes.xml index cb99b564f17..e37e1c34496 100644 --- a/tests/performance/general_purpose_hashes.xml +++ b/tests/performance/general_purpose_hashes.xml @@ -1,4 +1,4 @@ - + gp_hash_func diff --git a/tests/performance/general_purpose_hashes_on_UUID.xml b/tests/performance/general_purpose_hashes_on_UUID.xml index 9e8de81c1b1..3694e0a38bd 100644 --- a/tests/performance/general_purpose_hashes_on_UUID.xml +++ b/tests/performance/general_purpose_hashes_on_UUID.xml @@ -1,4 +1,4 @@ - + hash_func diff --git a/tests/performance/great_circle_dist.xml b/tests/performance/great_circle_dist.xml index 13f9e6fde56..b5e271ddfa8 100644 --- a/tests/performance/great_circle_dist.xml +++ b/tests/performance/great_circle_dist.xml @@ -1,4 +1,4 @@ - + SELECT count() FROM numbers(1000000) WHERE NOT ignore(greatCircleDistance((rand(1) % 360) * 1. - 180, (number % 150) * 1.2 - 90, (number % 360) + toFloat64(rand(2)) / 4294967296 - 180, (rand(3) % 180) * 1. - 90)) diff --git a/tests/performance/insert_parallel.xml b/tests/performance/insert_parallel.xml index 4c0e3f03e58..4ae50dce29a 100644 --- a/tests/performance/insert_parallel.xml +++ b/tests/performance/insert_parallel.xml @@ -1,4 +1,4 @@ - + 4 diff --git a/tests/performance/jit_large_requests.xml b/tests/performance/jit_large_requests.xml index 46328b64af2..6aed7bea544 100644 --- a/tests/performance/jit_large_requests.xml +++ b/tests/performance/jit_large_requests.xml @@ -1,4 +1,4 @@ - + CREATE TABLE jit_test ( a UInt64, diff --git a/tests/performance/jit_small_requests.xml b/tests/performance/jit_small_requests.xml index c9abec0926b..d8f917fb9af 100644 --- a/tests/performance/jit_small_requests.xml +++ b/tests/performance/jit_small_requests.xml @@ -1,4 +1,4 @@ - + WITH bitXor(number, 0x4CF2D2BAAE6DA887) AS x0, diff --git a/tests/performance/joins_in_memory.xml b/tests/performance/joins_in_memory.xml index ccb4f5d0e20..bac7679930f 100644 --- a/tests/performance/joins_in_memory.xml +++ b/tests/performance/joins_in_memory.xml @@ -1,4 +1,4 @@ - + CREATE TABLE ints (i64 Int64, i32 Int32, i16 Int16, i8 Int8) ENGINE = Memory INSERT INTO ints SELECT number AS i64, i64 AS i32, i64 AS i16, i64 AS i8 FROM numbers(10000) diff --git a/tests/performance/joins_in_memory_pmj.xml b/tests/performance/joins_in_memory_pmj.xml index e8d1d80a12b..2b9596e1591 100644 --- a/tests/performance/joins_in_memory_pmj.xml +++ b/tests/performance/joins_in_memory_pmj.xml @@ -1,4 +1,4 @@ - + CREATE TABLE ints (i64 Int64, i32 Int32, i16 Int16, i8 Int8) ENGINE = Memory SET join_algorithm = 'partial_merge' diff --git a/tests/performance/local_replica.xml b/tests/performance/local_replica.xml index 5a979e82449..4162cb631dd 100644 --- a/tests/performance/local_replica.xml +++ b/tests/performance/local_replica.xml @@ -1,3 +1,3 @@ - + select sum(number) from remote('127.0.0.{{1|2}}', numbers_mt(1000000000)) group by bitAnd(number, 1) diff --git a/tests/performance/logical_functions_large.xml b/tests/performance/logical_functions_large.xml index 51478455c45..e696b506ca5 100644 --- a/tests/performance/logical_functions_large.xml +++ b/tests/performance/logical_functions_large.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/logical_functions_medium.xml b/tests/performance/logical_functions_medium.xml index 5e0dbd9b852..be474894b54 100644 --- a/tests/performance/logical_functions_medium.xml +++ b/tests/performance/logical_functions_medium.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/logical_functions_small.xml b/tests/performance/logical_functions_small.xml index 9876e5188d5..3d70ef6811d 100644 --- a/tests/performance/logical_functions_small.xml +++ b/tests/performance/logical_functions_small.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/materialized_view_parallel_insert.xml b/tests/performance/materialized_view_parallel_insert.xml index 1a34f71e7e6..4b71354dec3 100644 --- a/tests/performance/materialized_view_parallel_insert.xml +++ b/tests/performance/materialized_view_parallel_insert.xml @@ -1,4 +1,4 @@ - + hits_10m_single diff --git a/tests/performance/math.xml b/tests/performance/math.xml index afc20bad927..006e33548c9 100644 --- a/tests/performance/math.xml +++ b/tests/performance/math.xml @@ -1,4 +1,4 @@ - + func_slow diff --git a/tests/performance/merge_tree_huge_pk.xml b/tests/performance/merge_tree_huge_pk.xml index f4012315dbe..84e9ef2e41b 100644 --- a/tests/performance/merge_tree_huge_pk.xml +++ b/tests/performance/merge_tree_huge_pk.xml @@ -1,4 +1,4 @@ - + CREATE TABLE huge_pk ENGINE = MergeTree ORDER BY ( c001, c002, c003, c004, c005, c006, c007, c008, c009, c010, c011, c012, c013, c014, c015, c016, c017, c018, c019, c020, diff --git a/tests/performance/merge_tree_many_partitions.xml b/tests/performance/merge_tree_many_partitions.xml index 0d640d242ae..2a8a52943a3 100644 --- a/tests/performance/merge_tree_many_partitions.xml +++ b/tests/performance/merge_tree_many_partitions.xml @@ -1,4 +1,4 @@ - + CREATE TABLE bad_partitions (x UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x INSERT INTO bad_partitions SELECT * FROM numbers(10000) diff --git a/tests/performance/merge_tree_many_partitions_2.xml b/tests/performance/merge_tree_many_partitions_2.xml index 6799153ed65..0b8a4650835 100644 --- a/tests/performance/merge_tree_many_partitions_2.xml +++ b/tests/performance/merge_tree_many_partitions_2.xml @@ -1,4 +1,4 @@ - + CREATE TABLE bad_partitions (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64, f UInt64, g UInt64, h UInt64, i UInt64, j UInt64, k UInt64, l UInt64, m UInt64, n UInt64, o UInt64, p UInt64, q UInt64, r UInt64, s UInt64, t UInt64, u UInt64, v UInt64, w UInt64, x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x INSERT INTO bad_partitions (x) SELECT * FROM numbers_mt(3000) diff --git a/tests/performance/merge_tree_simple_select.xml b/tests/performance/merge_tree_simple_select.xml index 93c1e5a3f33..624e924f59a 100644 --- a/tests/performance/merge_tree_simple_select.xml +++ b/tests/performance/merge_tree_simple_select.xml @@ -1,4 +1,4 @@ - + CREATE TABLE simple_mergetree (EventDate Date, x UInt64) ENGINE = MergeTree ORDER BY x INSERT INTO simple_mergetree SELECT number, today() + intDiv(number, 10000000) FROM numbers_mt(100000000) OPTIMIZE TABLE simple_mergetree FINAL diff --git a/tests/performance/mingroupby-orderbylimit1.xml b/tests/performance/mingroupby-orderbylimit1.xml index 8cf401235a9..ba86f2e3368 100644 --- a/tests/performance/mingroupby-orderbylimit1.xml +++ b/tests/performance/mingroupby-orderbylimit1.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/number_formatting_formats.xml b/tests/performance/number_formatting_formats.xml index c4a17b1f133..92e04a62024 100644 --- a/tests/performance/number_formatting_formats.xml +++ b/tests/performance/number_formatting_formats.xml @@ -1,4 +1,4 @@ - + format diff --git a/tests/performance/order_by_single_column.xml b/tests/performance/order_by_single_column.xml index d58f3b5fff1..9b708ea393c 100644 --- a/tests/performance/order_by_single_column.xml +++ b/tests/performance/order_by_single_column.xml @@ -1,4 +1,4 @@ - + sorting comparison diff --git a/tests/performance/parallel_insert.xml b/tests/performance/parallel_insert.xml index b17d0f8bd64..4050e771cb8 100644 --- a/tests/performance/parallel_insert.xml +++ b/tests/performance/parallel_insert.xml @@ -1,4 +1,4 @@ - + hits_10m_single diff --git a/tests/performance/push_down_limit.xml b/tests/performance/push_down_limit.xml index 02cce9539e9..0dcd9335a52 100644 --- a/tests/performance/push_down_limit.xml +++ b/tests/performance/push_down_limit.xml @@ -1,4 +1,4 @@ - + select number from (select number from numbers(10000000) order by -number) limit 10 select number from (select number from numbers_mt(100000000) order by -number) limit 10 diff --git a/tests/performance/random_string.xml b/tests/performance/random_string.xml index 3b714187be3..13ea35ebaba 100644 --- a/tests/performance/random_string.xml +++ b/tests/performance/random_string.xml @@ -1,4 +1,4 @@ - + SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(10)) SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(100)) SELECT count() FROM zeros(100000) WHERE NOT ignore(randomString(1000)) diff --git a/tests/performance/range.xml b/tests/performance/range.xml index 97c7d4c9c3f..48115db875b 100644 --- a/tests/performance/range.xml +++ b/tests/performance/range.xml @@ -1,4 +1,4 @@ - + SELECT range(number % 100) FROM numbers(10000000) FORMAT Null SELECT range(0, number % 100, 1) FROM numbers(10000000) FORMAT Null diff --git a/tests/performance/read_in_order_many_parts.xml b/tests/performance/read_in_order_many_parts.xml index 5329c45bfdd..b45655a6f21 100644 --- a/tests/performance/read_in_order_many_parts.xml +++ b/tests/performance/read_in_order_many_parts.xml @@ -1,4 +1,4 @@ - + 1 1 diff --git a/tests/performance/redundant_functions_in_order_by.xml b/tests/performance/redundant_functions_in_order_by.xml index b259e08c973..894e8dc1917 100644 --- a/tests/performance/redundant_functions_in_order_by.xml +++ b/tests/performance/redundant_functions_in_order_by.xml @@ -1,4 +1,4 @@ - + hits_100m_single diff --git a/tests/performance/removing_group_by_keys.xml b/tests/performance/removing_group_by_keys.xml index 6db641966ec..8f792c7ccc2 100644 --- a/tests/performance/removing_group_by_keys.xml +++ b/tests/performance/removing_group_by_keys.xml @@ -1,4 +1,4 @@ - + hits_10m_single hits_100m_single diff --git a/tests/performance/select_format.xml b/tests/performance/select_format.xml index 5f9c2e3f73b..bbe489c06c6 100644 --- a/tests/performance/select_format.xml +++ b/tests/performance/select_format.xml @@ -1,4 +1,4 @@ - + 1000000 1 diff --git a/tests/performance/set.xml b/tests/performance/set.xml index be39cac77e1..cbbff1f5bb2 100644 --- a/tests/performance/set.xml +++ b/tests/performance/set.xml @@ -1,4 +1,4 @@ - + table_small diff --git a/tests/performance/set_index.xml b/tests/performance/set_index.xml index 657d831f3ee..76f1087a1bf 100644 --- a/tests/performance/set_index.xml +++ b/tests/performance/set_index.xml @@ -1,4 +1,4 @@ - + CREATE TABLE test_in (`a` UInt32) ENGINE = MergeTree() ORDER BY a INSERT INTO test_in SELECT number FROM numbers(500000000) diff --git a/tests/performance/single_fixed_string_groupby.xml b/tests/performance/single_fixed_string_groupby.xml index 013de70c11a..6bf5821707f 100644 --- a/tests/performance/single_fixed_string_groupby.xml +++ b/tests/performance/single_fixed_string_groupby.xml @@ -1,4 +1,4 @@ - + DROP TABLE IF EXISTS perf_lc_fixed_str_groupby CREATE TABLE perf_lc_fixed_str_groupby( a LowCardinality(FixedString(14)), diff --git a/tests/performance/string_set.xml b/tests/performance/string_set.xml index bbbfe2d3c2b..4d128787d1f 100644 --- a/tests/performance/string_set.xml +++ b/tests/performance/string_set.xml @@ -1,4 +1,4 @@ - + diff --git a/tests/performance/string_sort.xml b/tests/performance/string_sort.xml index e7d06d930be..5d859398ece 100644 --- a/tests/performance/string_sort.xml +++ b/tests/performance/string_sort.xml @@ -1,4 +1,4 @@ - + hits_10m_single hits_100m_single diff --git a/tests/performance/sum_map.xml b/tests/performance/sum_map.xml index b732c150220..bc9f9be2a18 100644 --- a/tests/performance/sum_map.xml +++ b/tests/performance/sum_map.xml @@ -1,4 +1,4 @@ - + 1 diff --git a/tests/performance/uniq.xml b/tests/performance/uniq.xml index 334a513ab1d..7a35c6fb704 100644 --- a/tests/performance/uniq.xml +++ b/tests/performance/uniq.xml @@ -1,4 +1,4 @@ - + hits_100m_single diff --git a/tests/performance/website.xml b/tests/performance/website.xml index 2127a71c55c..9e7c8cdc536 100644 --- a/tests/performance/website.xml +++ b/tests/performance/website.xml @@ -1,4 +1,4 @@ - + hits_10m_single From 38726847fcd1a7da049790cab65c76f9f42ed370 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 30 Oct 2020 18:16:30 +0300 Subject: [PATCH 402/432] make the test longer --- tests/performance/round_down.xml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/performance/round_down.xml b/tests/performance/round_down.xml index c309a767843..32263d69fc7 100644 --- a/tests/performance/round_down.xml +++ b/tests/performance/round_down.xml @@ -1,10 +1,7 @@ - - - - SELECT count() FROM zeros(10000000) WHERE NOT ignore(roundDuration(rand() % 65536)) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(roundDown(rand() % 65536, [0, 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000])) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(roundAge(rand() % 100)) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(roundDown(rand() % 100, [0, 1, 18, 25, 35, 45, 55])) - SELECT count() FROM numbers(10000000) WHERE NOT ignore(roundDown(rand() % 65536, (SELECT groupArray(number) FROM numbers(65536)))) + SELECT count() FROM zeros (100000000) WHERE NOT ignore(roundDuration(rand() % 65536)) + SELECT count() FROM zeros (100000000) WHERE NOT ignore(roundDown(rand() % 65536, [0, 1, 10, 30, 60, 120, 180, 240, 300, 600, 1200, 1800, 3600, 7200, 18000, 36000])) + SELECT count() FROM zeros (100000000) WHERE NOT ignore(roundAge(rand() % 100)) + SELECT count() FROM zeros (100000000) WHERE NOT ignore(roundDown(rand() % 100, [0, 1, 18, 25, 35, 45, 55])) + SELECT count() FROM numbers(10000000) WHERE NOT ignore(roundDown(rand() % 65536, (SELECT groupArray(number) FROM numbers(65536)))) From ce6c44eb9fdcdde5c6ed77ea30e1d1bd83fb4aa6 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Fri, 30 Oct 2020 18:47:58 +0300 Subject: [PATCH 403/432] Update ReplicatedMergeTreeQueue.cpp --- src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index bf9a94747b6..ef4d7ebc9c8 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1144,7 +1144,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( int head_alter = alter_sequence.getHeadAlterVersion(state_lock); const char * format_str = "Cannot execute alter metadata {} with version {} because another alter {} must be executed before"; LOG_TRACE(log, format_str, entry.znode_name, entry.alter_version, head_alter); - out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version); + out_postpone_reason = fmt::format(format_str, entry.znode_name, entry.alter_version, head_alter); return false; } } From 7e76b7b51090c43e3cbd3459688007db2aeb0e35 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 30 Oct 2020 18:54:38 +0300 Subject: [PATCH 404/432] more fixes --- tests/performance/any_anyLast.xml | 21 ++++--------------- tests/performance/cpu_synthetic.xml | 2 +- tests/performance/decimal_casts.xml | 2 +- tests/performance/format_readable.xml | 6 +++--- .../functions_with_hash_tables.xml | 2 +- .../inserts_arrays_lowcardinality.xml | 2 +- tests/performance/jit_large_requests.xml | 2 +- tests/performance/order_by_read_in_order.xml | 6 +----- tests/performance/parallel_mv.xml | 3 +-- tests/performance/rand.xml | 12 +---------- tests/performance/random_string.xml | 14 ++++++------- tests/performance/random_string_utf8.xml | 17 +++++++-------- 12 files changed, 29 insertions(+), 60 deletions(-) diff --git a/tests/performance/any_anyLast.xml b/tests/performance/any_anyLast.xml index 8d75d7556be..659df4a22ac 100644 --- a/tests/performance/any_anyLast.xml +++ b/tests/performance/any_anyLast.xml @@ -1,19 +1,6 @@ - - - - 10 - - - - - SELECT any(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(120000000) - - SELECT anyLast(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(120000000) - - SELECT any(number * 2) as n, n * 3 FROM numbers(120000000) - - SELECT any(number * round(toInt64(number), -2)) FROM numbers(120000000) - - + SELECT any(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000000) + SELECT anyLast(-1 * (((-2 * (number * -3)) * -4) * -5)) FROM numbers(500000000) + SELECT any(number * 2) as n, n * 3 FROM numbers(500000000) + SELECT any(number * round(toInt64(number), -2)) FROM numbers(500000000) diff --git a/tests/performance/cpu_synthetic.xml b/tests/performance/cpu_synthetic.xml index 1a3c0737046..85a4e54c752 100644 --- a/tests/performance/cpu_synthetic.xml +++ b/tests/performance/cpu_synthetic.xml @@ -67,7 +67,7 @@ PageCharset тоже почти всегда непуст, но его сред SELECT uniqCombined(17)(UserID) FROM hits_100m_single SELECT uniqExact(UserID) FROM hits_10m_single SETTINGS max_threads = 1 -SELECT uniqExact(UserID) FROM hits_10m_single +SELECT uniqExact(UserID) FROM hits_100m_single SELECT RegionID, uniq(UserID) FROM hits_10m_single GROUP BY RegionID SETTINGS max_threads = 1 SELECT RegionID, uniq(UserID) FROM hits_100m_single GROUP BY RegionID diff --git a/tests/performance/decimal_casts.xml b/tests/performance/decimal_casts.xml index d08aec7a0e4..f087d0938c1 100644 --- a/tests/performance/decimal_casts.xml +++ b/tests/performance/decimal_casts.xml @@ -5,7 +5,7 @@ CREATE TABLE t (x UInt64, d32 Decimal32(3), d64 Decimal64(4), d128 Decimal128(5)) ENGINE = Memory - INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(100000000) SETTINGS max_threads = 8 + INSERT INTO t SELECT number AS x, x % 1000000 AS d32, x AS d64, x d128 FROM numbers_mt(200000000) SETTINGS max_threads = 8 DROP TABLE IF EXISTS t SELECT toUInt32(x) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6) FROM t FORMAT Null diff --git a/tests/performance/format_readable.xml b/tests/performance/format_readable.xml index 7040b35da2b..5a76ac67333 100644 --- a/tests/performance/format_readable.xml +++ b/tests/performance/format_readable.xml @@ -1,5 +1,5 @@ -SELECT count() FROM numbers(1000000) WHERE NOT ignore(formatReadableSize(number)) -SELECT count() FROM numbers(1000000) WHERE NOT ignore(formatReadableQuantity(number)) -SELECT count() FROM numbers(1000000) WHERE NOT ignore(formatReadableTimeDelta(number)) +SELECT count() FROM numbers(10000000) WHERE NOT ignore(formatReadableSize(number)) +SELECT count() FROM numbers(10000000) WHERE NOT ignore(formatReadableQuantity(number)) +SELECT count() FROM numbers(10000000) WHERE NOT ignore(formatReadableTimeDelta(number)) diff --git a/tests/performance/functions_with_hash_tables.xml b/tests/performance/functions_with_hash_tables.xml index 18697d7fb07..18f73b54e30 100644 --- a/tests/performance/functions_with_hash_tables.xml +++ b/tests/performance/functions_with_hash_tables.xml @@ -2,7 +2,7 @@ select arrayUniq(range(1 + (number % 100) * 10)) from numbers(100000) format Null select arrayDistinct(range(1 + (number % 100) * 10)) from numbers(100000) format Null select arrayEnumerateUniq(range(1 + (number % 100) * 10)) from numbers(100000) format Null - select arrayIntersect(range((1 + number % 100)), range(1, (1 + number % 100) + 1)) from numbers(100000) format Null + select arrayIntersect(range((1 + number % 100)), range(1, (1 + number % 100) + 1)) from numbers(1000000) format Null select groupUniqArray(rand() % 100) from numbers(1000 * 1000) group by number / 1000 format Null select entropy(number / 10) from numbers(1000 * 1000) group by number / 1000 format Null diff --git a/tests/performance/inserts_arrays_lowcardinality.xml b/tests/performance/inserts_arrays_lowcardinality.xml index 40617fb9593..505156d4fd5 100644 --- a/tests/performance/inserts_arrays_lowcardinality.xml +++ b/tests/performance/inserts_arrays_lowcardinality.xml @@ -61,7 +61,7 @@ intDiv( number, 4 ) as rowid, groupArray( base64Encode( left( reinterpretAsString( rand64() ), 6) ) ) as arr FROM - numbers(1000000) + numbers(2000000) GROUP BY rowid ) GROUP BY id optimize table lot_of_string_arrays_src diff --git a/tests/performance/jit_large_requests.xml b/tests/performance/jit_large_requests.xml index 6aed7bea544..fe7d4346152 100644 --- a/tests/performance/jit_large_requests.xml +++ b/tests/performance/jit_large_requests.xml @@ -29,7 +29,7 @@ number FROM system.numbers - LIMIT 10000000 + LIMIT 100000000 SELECT diff --git a/tests/performance/order_by_read_in_order.xml b/tests/performance/order_by_read_in_order.xml index bb3a921c862..b91cd14baf4 100644 --- a/tests/performance/order_by_read_in_order.xml +++ b/tests/performance/order_by_read_in_order.xml @@ -1,13 +1,9 @@ - - - - hits_100m_single -SELECT * FROM hits_100m_single ORDER BY CounterID, EventDate LIMIT 100 +SELECT * FROM hits_100m_single ORDER BY CounterID, EventDate LIMIT 1000 SELECT * FROM hits_100m_single ORDER BY CounterID DESC, toStartOfWeek(EventDate) DESC LIMIT 100 SELECT * FROM hits_100m_single ORDER BY CounterID, EventDate, URL LIMIT 100 SELECT * FROM hits_100m_single WHERE CounterID IN (152220, 168777, 149234, 149234) ORDER BY CounterID DESC, EventDate DESC LIMIT 100 diff --git a/tests/performance/parallel_mv.xml b/tests/performance/parallel_mv.xml index ef50d506708..1e9b92c54df 100644 --- a/tests/performance/parallel_mv.xml +++ b/tests/performance/parallel_mv.xml @@ -13,8 +13,7 @@ create materialized view mv_4 engine = MergeTree order by tuple() as select number, toString(number) from main_table where number % 13 != 4; - - insert into main_table select number from numbers(1000000) + insert into main_table select number from numbers(10000000) drop table if exists main_table; drop table if exists mv_1; diff --git a/tests/performance/rand.xml b/tests/performance/rand.xml index ed629e5a2a7..3f46d152206 100644 --- a/tests/performance/rand.xml +++ b/tests/performance/rand.xml @@ -1,19 +1,9 @@ - - - 10000 - - - 20000 - - - - table - zeros(100000000) + zeros(1000000000) diff --git a/tests/performance/random_string.xml b/tests/performance/random_string.xml index 13ea35ebaba..1a740ae077a 100644 --- a/tests/performance/random_string.xml +++ b/tests/performance/random_string.xml @@ -1,9 +1,9 @@ - SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(10)) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(100)) - SELECT count() FROM zeros(100000) WHERE NOT ignore(randomString(1000)) - SELECT count() FROM zeros(10000) WHERE NOT ignore(randomString(10000)) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(rand() % 10)) - SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(rand() % 100)) - SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomString(rand() % 1000)) + SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(10)) + SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(100)) + SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomString(1000)) + SELECT count() FROM zeros(100000) WHERE NOT ignore(randomString(10000)) + SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(rand() % 10)) + SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(rand() % 100)) + SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomString(rand() % 1000)) diff --git a/tests/performance/random_string_utf8.xml b/tests/performance/random_string_utf8.xml index 0185f519c31..f501cc31c24 100644 --- a/tests/performance/random_string_utf8.xml +++ b/tests/performance/random_string_utf8.xml @@ -1,12 +1,9 @@ - - - - SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomStringUTF8(10)) - SELECT count() FROM zeros(100000) WHERE NOT ignore(randomStringUTF8(100)) - SELECT count() FROM zeros(10000) WHERE NOT ignore(randomStringUTF8(1000)) - SELECT count() FROM zeros(1000) WHERE NOT ignore(randomStringUTF8(10000)) - SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomStringUTF8(rand() % 10)) - SELECT count() FROM zeros(100000) WHERE NOT ignore(randomStringUTF8(rand() % 100)) - SELECT count() FROM zeros(10000) WHERE NOT ignore(randomStringUTF8(rand() % 1000)) + SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomStringUTF8(10)) + SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomStringUTF8(100)) + SELECT count() FROM zeros(100000) WHERE NOT ignore(randomStringUTF8(1000)) + SELECT count() FROM zeros(10000) WHERE NOT ignore(randomStringUTF8(10000)) + SELECT count() FROM zeros(10000000) WHERE NOT ignore(randomStringUTF8(rand() % 10)) + SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomStringUTF8(rand() % 100)) + SELECT count() FROM zeros(100000) WHERE NOT ignore(randomStringUTF8(rand() % 1000)) From 49d3b65cc276aa743d6964bd420d1eba6bc6cfb0 Mon Sep 17 00:00:00 2001 From: olgarev <56617294+olgarev@users.noreply.github.com> Date: Fri, 30 Oct 2020 20:25:05 +0300 Subject: [PATCH 405/432] DOCSUP-3537: Documented the output_format_pretty_row_numbers setting (#16446) * Docs en, ru * Update docs/en/operations/settings/settings.md Co-authored-by: BayoNet * Docs for allow_experimental_bigint_types translated to Russian. Co-authored-by: Olga Revyakina Co-authored-by: BayoNet --- docs/en/operations/settings/settings.md | 31 +++++++++++++++++- docs/ru/operations/settings/settings.md | 42 +++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1efb7cfa8d6..a4bd7d77bfc 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2148,7 +2148,34 @@ Result: └───────────────┘ ``` -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) +## output_format_pretty_row_numbers {#output_format_pretty_row_numbers} + +Adds row numbers to output in the [Pretty](../../interfaces/formats.md#pretty) format. + +Possible values: + +- 0 — Output without row numbers. +- 1 — Output with row numbers. + +Default value: `0`. + +**Example** + +Query: + +```sql +SET output_format_pretty_row_numbers = 1; +SELECT TOP 3 name, value FROM system.settings; +``` + +Result: +```text + ┌─name────────────────────┬─value───┐ +1. │ min_compress_block_size │ 65536 │ +2. │ max_compress_block_size │ 1048576 │ +3. │ max_block_size │ 65505 │ + └─────────────────────────┴─────────┘ +``` ## allow_experimental_bigint_types {#allow_experimental_bigint_types} @@ -2160,3 +2187,5 @@ Possible values: - 0 — The bigint data type is disabled. Default value: `0`. + +[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 3192b523d48..700203e36e1 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1977,6 +1977,48 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes; └───────────────┘ ``` +## output_format_pretty_row_numbers {#output_format_pretty_row_numbers} + +Включает режим отображения номеров строк для запросов, выводимых в формате [Pretty](../../interfaces/formats.md#pretty). + +Возможные значения: + +- 0 — номера строк не выводятся. +- 1 — номера строк выводятся. + +Значение по умолчанию: `0`. + +**Пример** + +Запрос: + +```sql +SET output_format_pretty_row_numbers = 1; +SELECT TOP 3 name, value FROM system.settings; +``` + +Результат: + +```text + ┌─name────────────────────┬─value───┐ +1. │ min_compress_block_size │ 65536 │ +2. │ max_compress_block_size │ 1048576 │ +3. │ max_block_size │ 65505 │ + └─────────────────────────┴─────────┘ +``` + +## allow_experimental_bigint_types {#allow_experimental_bigint_types} + +Включает или отключает поддержку целочисленных значений, превышающих максимальное значение, допустимое для типа `int`. + +Возможные значения: + +- 1 — большие целочисленные значения поддерживаются. +- 0 — большие целочисленные значения не поддерживаются. + +Значение по умолчанию: `0`. + + ## lock_acquire_timeout {#lock_acquire_timeout} Устанавливает, сколько секунд сервер ожидает возможности выполнить блокировку таблицы. From e15a8d5ac0a509f52f7d3d74e2f219cea05fdf17 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov Date: Fri, 30 Oct 2020 21:03:07 +0300 Subject: [PATCH 406/432] fixup --- tests/performance/parse_engine_file.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/parse_engine_file.xml b/tests/performance/parse_engine_file.xml index dacd73f5364..f876f314a90 100644 --- a/tests/performance/parse_engine_file.xml +++ b/tests/performance/parse_engine_file.xml @@ -2,7 +2,7 @@ CREATE TABLE IF NOT EXISTS table_{format} ENGINE = File({format}) AS test.hits - INSERT INTO table_{format} SELECT * FROM test.hits LIMIT 100000 + INSERT INTO table_{format} SELECT * FROM test.hits LIMIT 200000 From 5bab7a5bc72ee8bfe7d79f283f265aa0e1d87bb1 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 30 Oct 2020 21:16:10 +0300 Subject: [PATCH 407/432] Add current_database into query_thread_log --- src/Common/ThreadStatus.h | 2 +- src/Interpreters/QueryThreadLog.cpp | 2 + src/Interpreters/QueryThreadLog.h | 2 + src/Interpreters/ThreadStatusExt.cpp | 5 ++- ...01547_query_log_current_database.reference | 2 + .../01547_query_log_current_database.sql | 37 +++++++++++++++++++ 6 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01547_query_log_current_database.reference create mode 100644 tests/queries/0_stateless/01547_query_log_current_database.sql diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 7d85059f23e..820ea449d66 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -172,7 +172,7 @@ protected: void finalizeQueryProfiler(); - void logToQueryThreadLog(QueryThreadLog & thread_log); + void logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database); void assertState(const std::initializer_list & permitted_states, const char * description = nullptr) const; diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp index 2ecb03d622a..8fea360085b 100644 --- a/src/Interpreters/QueryThreadLog.cpp +++ b/src/Interpreters/QueryThreadLog.cpp @@ -38,6 +38,7 @@ Block QueryThreadLogElement::createBlock() {std::make_shared(), "thread_name"}, {std::make_shared(), "thread_id"}, {std::make_shared(), "master_thread_id"}, + {std::make_shared(), "current_database"}, {std::make_shared(), "query"}, {std::make_shared(), "is_initial_query"}, @@ -91,6 +92,7 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(thread_id); columns[i++]->insert(master_thread_id); + columns[i++]->insertData(current_database.data(), current_database.size()); columns[i++]->insertData(query.data(), query.size()); QueryLogElement::appendClientInfo(client_info, columns, i); diff --git a/src/Interpreters/QueryThreadLog.h b/src/Interpreters/QueryThreadLog.h index 715902b29ad..5080bfe6919 100644 --- a/src/Interpreters/QueryThreadLog.h +++ b/src/Interpreters/QueryThreadLog.h @@ -39,7 +39,9 @@ struct QueryThreadLogElement UInt64 thread_id{}; UInt64 master_thread_id{}; + String current_database; String query; + ClientInfo client_info; std::shared_ptr profile_counters; diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 7f29cfc7e5c..2ce98819a44 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -243,7 +243,7 @@ void ThreadStatus::finalizePerformanceCounters() const auto & settings = query_context->getSettingsRef(); if (settings.log_queries && settings.log_query_threads) if (auto thread_log = global_context->getQueryThreadLog()) - logToQueryThreadLog(*thread_log); + logToQueryThreadLog(*thread_log, query_context->getCurrentDatabase()); } } catch (...) @@ -322,7 +322,7 @@ void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) #endif } -void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log) +void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String & current_database) { QueryThreadLogElement elem; @@ -350,6 +350,7 @@ void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log) elem.thread_name = getThreadName(); elem.thread_id = thread_id; + elem.current_database = current_database; if (thread_group) { { diff --git a/tests/queries/0_stateless/01547_query_log_current_database.reference b/tests/queries/0_stateless/01547_query_log_current_database.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01547_query_log_current_database.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/01547_query_log_current_database.sql b/tests/queries/0_stateless/01547_query_log_current_database.sql new file mode 100644 index 00000000000..c0ad22163ba --- /dev/null +++ b/tests/queries/0_stateless/01547_query_log_current_database.sql @@ -0,0 +1,37 @@ +-- +-- This is more cleaner approach for writing a test that relies on system.query_log/query_thread_log. +-- +-- It uses current database, and since clickhouse-test will generate random for +-- each run you can run the test multiple times without worrying about +-- overlaps. +-- +-- There is still event_date/event_time filter for better performance +-- (even though this is not relevant for runs on CI) +-- + +set log_query_threads=1; +set log_queries_min_type='QUERY_FINISH'; +set log_queries=1; +select '01547_query_log_current_database' from system.one format Null; +set log_queries=0; +set log_query_threads=0; + +system flush logs; + +select count() +from system.query_log +where + query like '%01547_query_log_current_database%' + and current_database = currentDatabase() + and event_date = today() + and event_time >= now() - interval 1 minute; + +-- at least two threads for processing +-- (but one just waits for another, sigh) +select count() == 2 +from system.query_thread_log +where + query like '%01547_query_log_current_database%' + and current_database = currentDatabase() + and event_date = today() + and event_time >= now() - interval 1 minute; From ccddf5e9cfcdb977c2c777b4bbd25d584b612e03 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 30 Oct 2020 22:56:11 +0300 Subject: [PATCH 408/432] Add 01547_query_log_current_database into arcadia_skip_list In attempt to fix "Yandex synchronization check (only for Yandex employees)" --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 65cdca80485..928ddb4cf97 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -155,3 +155,4 @@ 01509_dictionary_preallocate 01526_max_untracked_memory 01530_drop_database_atomic_sync +01547_query_log_current_database From 067a3f9814186b62c3cd60b97adbee582c32b1b5 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 31 Oct 2020 01:23:28 +0300 Subject: [PATCH 409/432] Fix 01541_max_memory_usage_for_user flackiness --- .../queries/0_stateless/01541_max_memory_usage_for_user.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh index 945f202a803..ecae442b134 100755 --- a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh +++ b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh @@ -35,7 +35,8 @@ function execute_tcp_one_session() # one users query in background (to avoid reseting max_memory_usage_for_user) # --max_block_size=1 to make it killable (check the state each 1 second, 1 row) # (the test takes ~40 seconds in debug build, so 60 seconds is ok) -${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null -q 'SELECT sleepEachRow(1) FROM numbers(600)' & +query_id=$$-$RANDOM-$SECONDS +${CLICKHOUSE_CLIENT} --max_block_size=1 --format Null --query_id $query_id -q 'SELECT sleepEachRow(1) FROM numbers(600)' & # trap sleep_query_pid=$! function cleanup() @@ -43,6 +44,10 @@ function cleanup() echo 'KILL sleep' # if the timeout will not be enough, it will trigger "No such process" error/message kill $sleep_query_pid + # waiting for a query to finish + while ${CLICKHOUSE_CLIENT} -q "SELECT query_id FROM system.processes WHERE query_id = '$query_id'" | grep -xq "$query_id"; do + sleep 0.1 + done } trap cleanup EXIT From 3b188921dd6320289eddb108fe7cdcbaeb28537f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 31 Oct 2020 11:15:27 +0300 Subject: [PATCH 410/432] Fix query_thread_log.query_duration_ms unit There is a similar problem as in [1], the problem is that getCurrentTimeNanoseconds() returns time using CLOCK_MONOTONIC. [1]: c0e15ba348dae4030bd592ca1635fb3b09043afc ("Fix RealTimeMicroseconds ProfileEvents") --- src/Interpreters/ThreadStatusExt.cpp | 2 +- ...548_query_log_query_execution_ms.reference | 2 ++ .../01548_query_log_query_execution_ms.sql | 28 +++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/01548_query_log_query_execution_ms.reference create mode 100644 tests/queries/0_stateless/01548_query_log_query_execution_ms.sql diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 2ce98819a44..ffb9e140ce6 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -336,7 +336,7 @@ void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String elem.event_time_microseconds = current_time_microseconds; elem.query_start_time = query_start_time; elem.query_start_time_microseconds = query_start_time_microseconds; - elem.query_duration_ms = (getCurrentTimeNanoseconds() - query_start_time_nanoseconds) / 1000000U; + elem.query_duration_ms = (time_in_nanoseconds(now) - query_start_time_nanoseconds) / 1000000U; elem.read_rows = progress_in.read_rows.load(std::memory_order_relaxed); elem.read_bytes = progress_in.read_bytes.load(std::memory_order_relaxed); diff --git a/tests/queries/0_stateless/01548_query_log_query_execution_ms.reference b/tests/queries/0_stateless/01548_query_log_query_execution_ms.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/01548_query_log_query_execution_ms.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/01548_query_log_query_execution_ms.sql b/tests/queries/0_stateless/01548_query_log_query_execution_ms.sql new file mode 100644 index 00000000000..e80e84646be --- /dev/null +++ b/tests/queries/0_stateless/01548_query_log_query_execution_ms.sql @@ -0,0 +1,28 @@ +set log_query_threads=1; +set log_queries_min_type='QUERY_FINISH'; +set log_queries=1; +select '01548_query_log_query_execution_ms', sleep(0.4) format Null; +set log_queries=0; +set log_query_threads=0; + +system flush logs; + +select count() +from system.query_log +where + query like '%01548_query_log_query_execution_ms%' + and current_database = currentDatabase() + and query_duration_ms between 100 and 800 + and event_date = today() + and event_time >= now() - interval 1 minute; + +-- at least two threads for processing +-- (but one just waits for another, sigh) +select count() == 2 +from system.query_thread_log +where + query like '%01548_query_log_query_execution_ms%' + and current_database = currentDatabase() + and query_duration_ms between 100 and 800 + and event_date = today() + and event_time >= now() - interval 1 minute; From fff6b5549eaa508bfceaf3bdd97fca7e4cb7ad4b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 31 Oct 2020 11:15:27 +0300 Subject: [PATCH 411/432] Hide getCurrentTimeNanoseconds() to avoid possible errors To avoid further errors like in: - c0e15ba348dae4030bd592ca1635fb3b09043afc ("Fix RealTimeMicroseconds ProfileEvents") - c856d02e0f4a519cd2a29ffbe1135196de0bc65a ("Fix query_thread_log.query_duration_ms unit") --- src/Common/ThreadProfileEvents.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/Common/ThreadProfileEvents.h b/src/Common/ThreadProfileEvents.h index 9ebb5fbc2c6..d96339add26 100644 --- a/src/Common/ThreadProfileEvents.h +++ b/src/Common/ThreadProfileEvents.h @@ -75,13 +75,6 @@ inline TUInt safeDiff(TUInt prev, TUInt curr) } -inline UInt64 getCurrentTimeNanoseconds(clockid_t clock_type = CLOCK_MONOTONIC) -{ - struct timespec ts; - clock_gettime(clock_type, &ts); - return ts.tv_sec * 1000000000ULL + ts.tv_nsec; -} - struct RUsageCounters { /// In nanoseconds @@ -108,13 +101,13 @@ struct RUsageCounters hard_page_faults = static_cast(rusage.ru_majflt); } - static RUsageCounters current(UInt64 real_time_ = getCurrentTimeNanoseconds()) + static RUsageCounters current() { ::rusage rusage {}; #if !defined(__APPLE__) ::getrusage(RUSAGE_THREAD, &rusage); #endif - return RUsageCounters(rusage, real_time_); + return RUsageCounters(rusage, getClockMonotonic()); } static void incrementProfileEvents(const RUsageCounters & prev, const RUsageCounters & curr, ProfileEvents::Counters & profile_events) @@ -133,6 +126,14 @@ struct RUsageCounters incrementProfileEvents(last_counters, current_counters, profile_events); last_counters = current_counters; } + +private: + static inline UInt64 getClockMonotonic() + { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ULL + ts.tv_nsec; + } }; // thread_local is disabled in Arcadia, so we have to use a dummy implementation From d466d147e67b63ed65260a90b4d4eec4e7e49bd3 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 31 Oct 2020 14:20:27 +0300 Subject: [PATCH 412/432] Add 01548_query_log_query_execution_ms into arcadia_skip_list In attempt to fix "Yandex synchronization check (only for Yandex employees)" --- tests/queries/0_stateless/arcadia_skip_list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/arcadia_skip_list.txt b/tests/queries/0_stateless/arcadia_skip_list.txt index 928ddb4cf97..f5b81c08520 100644 --- a/tests/queries/0_stateless/arcadia_skip_list.txt +++ b/tests/queries/0_stateless/arcadia_skip_list.txt @@ -156,3 +156,4 @@ 01526_max_untracked_memory 01530_drop_database_atomic_sync 01547_query_log_current_database +01548_query_log_query_execution_ms From 8631b31ae0156602fbc21a751908afbf058ea707 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Sat, 31 Oct 2020 14:43:24 +0300 Subject: [PATCH 413/432] Update MySQLGtid.cpp --- src/Core/MySQL/MySQLGtid.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/MySQL/MySQLGtid.cpp b/src/Core/MySQL/MySQLGtid.cpp index 6e50998da62..aac3e01369a 100644 --- a/src/Core/MySQL/MySQLGtid.cpp +++ b/src/Core/MySQL/MySQLGtid.cpp @@ -121,7 +121,8 @@ void GTIDSets::update(const GTID & other) void GTIDSet::tryShirnk(GTIDSet & set, unsigned int i, GTIDSet::Interval & current) { - if (i != set.intervals.size() -1) { + if (i != set.intervals.size() -1) + { auto & next = set.intervals[i+1]; if (current.end == next.start) set.tryMerge(i); From 17ecafe452b237e29a0ec7039da0fa77f2fc36c9 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Sat, 31 Oct 2020 14:43:46 +0300 Subject: [PATCH 414/432] Update MySQLGtid.h --- src/Core/MySQL/MySQLGtid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/MySQL/MySQLGtid.h b/src/Core/MySQL/MySQLGtid.h index cd8cd0a2e98..27aabdafc11 100644 --- a/src/Core/MySQL/MySQLGtid.h +++ b/src/Core/MySQL/MySQLGtid.h @@ -27,7 +27,7 @@ public: void tryMerge(size_t i); - static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current) ; + static void tryShirnk(GTIDSet & set, unsigned int i, Interval & current); }; class GTIDSets From 90dcc135b4043ecca726adc2dc9470ef48a0fba2 Mon Sep 17 00:00:00 2001 From: Anna Shakhova <72564598+annvsh@users.noreply.github.com> Date: Sun, 1 Nov 2020 03:26:14 +0700 Subject: [PATCH 415/432] DOCSUP-3199: Support default arguments for Replicated engine (#16282) * Init commit * Fixed * Fixed * Fixed --- .../mergetree-family/replication.md | 25 +++++++++++++++++++ .../mergetree-family/replication.md | 25 +++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 26f79484d67..932facc9ddc 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -148,6 +148,31 @@ You can define the parameters explicitly instead of using substitutions. This mi When working with large clusters, we recommend using substitutions because they reduce the probability of error. +You can specify default arguments for `Replicated` table engine in the server configuration file. For instance: + +```xml +/clickhouse/tables/{shard}/{database}/{table} +{replica} +``` + +In this case, you can omit arguments when creating tables: + +``` sql +CREATE TABLE table_name ( + x UInt32 +) ENGINE = ReplicatedMergeTree +ORDER BY x; +``` + +It is equivalent to: + +``` sql +CREATE TABLE table_name ( + x UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}') +ORDER BY x; +``` + Run the `CREATE TABLE` query on each replica. This query creates a new replicated table, or adds a new replica to an existing one. If you add a new replica after the table already contains some data on other replicas, the data will be copied from the other replicas to the new one after running the query. In other words, the new replica syncs itself with the others. diff --git a/docs/ru/engines/table-engines/mergetree-family/replication.md b/docs/ru/engines/table-engines/mergetree-family/replication.md index 426c456ba3a..6d3930e33ce 100644 --- a/docs/ru/engines/table-engines/mergetree-family/replication.md +++ b/docs/ru/engines/table-engines/mergetree-family/replication.md @@ -149,6 +149,31 @@ CREATE TABLE table_name При работе с большими кластерами мы рекомендуем использовать подстановки, они уменьшают вероятность ошибки. +Можно указать аргументы по умолчанию для движка реплицируемых таблиц в файле конфигурации сервера. + +```xml +/clickhouse/tables/{shard}/{database}/{table} +{replica} +``` + +В этом случае можно опустить аргументы при создании таблиц: + +``` sql +CREATE TABLE table_name ( + x UInt32 +) ENGINE = ReplicatedMergeTree +ORDER BY x; +``` + +Это будет эквивалентно следующему запросу: + +``` sql +CREATE TABLE table_name ( + x UInt32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/table_name', '{replica}') +ORDER BY x; +``` + Выполните запрос `CREATE TABLE` на каждой реплике. Запрос создаёт новую реплицируемую таблицу, или добавляет новую реплику к имеющимся. Если вы добавляете новую реплику после того, как таблица на других репликах уже содержит некоторые данные, то после выполнения запроса, данные на новую реплику будут скачаны с других реплик. То есть, новая реплика синхронизирует себя с остальными. From 643ee052885d5ec4fd9eb37aef83ee907b4df4e7 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sun, 1 Nov 2020 16:02:09 +0800 Subject: [PATCH 416/432] add integration test --- .../__init__.py | 0 .../configs/zookeeper_config.xml | 28 ++++++++++++ .../test.py | 44 +++++++++++++++++++ 3 files changed, 72 insertions(+) create mode 100644 tests/integration/test_fetch_partition_with_outdated_parts/__init__.py create mode 100644 tests/integration/test_fetch_partition_with_outdated_parts/configs/zookeeper_config.xml create mode 100644 tests/integration/test_fetch_partition_with_outdated_parts/test.py diff --git a/tests/integration/test_fetch_partition_with_outdated_parts/__init__.py b/tests/integration/test_fetch_partition_with_outdated_parts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_fetch_partition_with_outdated_parts/configs/zookeeper_config.xml b/tests/integration/test_fetch_partition_with_outdated_parts/configs/zookeeper_config.xml new file mode 100644 index 00000000000..b2b0667ebbf --- /dev/null +++ b/tests/integration/test_fetch_partition_with_outdated_parts/configs/zookeeper_config.xml @@ -0,0 +1,28 @@ + + + + zoo1 + 2181 + + + zoo2 + 2181 + + + zoo3 + 2181 + + + + + + zoo1 + 2181 + + + zoo2 + 2181 + + + + diff --git a/tests/integration/test_fetch_partition_with_outdated_parts/test.py b/tests/integration/test_fetch_partition_with_outdated_parts/test.py new file mode 100644 index 00000000000..08d5e53e41e --- /dev/null +++ b/tests/integration/test_fetch_partition_with_outdated_parts/test.py @@ -0,0 +1,44 @@ +from __future__ import print_function +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException +import helpers +import pytest + + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) + + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def test_fetch_partition_with_outdated_parts(start_cluster): + node.query( + "CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;" + ) + node.query("INSERT INTO simple VALUES ('2020-08-27', 1)") + + node.query( + "CREATE TABLE simple2 (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/simple', 'node') ORDER BY tuple() PARTITION BY date;" + ) + node.query("INSERT INTO simple2 VALUES ('2020-08-27', 2)") + node.query("INSERT INTO simple2 VALUES ('2020-08-27', 3)") + node.query("OPTIMIZE TABLE simple2 FINAL") + + # until now both tables will have the same part + + node.query( + "ALTER TABLE simple2 FETCH PARTITION '2020-08-27' FROM 'zookeeper2:/clickhouse/tables/0/simple';" + ) + + node.query("ALTER TABLE simple2 ATTACH PARTITION '2020-08-27';") + + assert node.query("SELECT id FROM simple2 order by id").strip() == "1\n2\n3" From 4adef402e6ae82823518712f4901d93093df027a Mon Sep 17 00:00:00 2001 From: Denis Zhuravlev Date: Sun, 1 Nov 2020 12:14:25 -0400 Subject: [PATCH 417/432] Doc/multiple_leaders --- docs/en/development/architecture.md | 2 +- docs/en/operations/system-tables/replicas.md | 4 ++-- docs/ru/development/architecture.md | 2 +- docs/ru/operations/system-tables/replicas.md | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index bea75140606..b4f53207a41 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -189,7 +189,7 @@ Replication is implemented in the `ReplicatedMergeTree` storage engine. The path Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn’t support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. -Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. It is achieved by electing one replica as the leader, and that replica initiates merges and writes “merge parts” actions to the log. +Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. One of the leaders initiates a new merge first and writes “merge parts” actions to the log. Multiple replicas can be leaders at the same time. A replica can be prevented from becoming a leader using the `merge_tree` setting `replicated_can_become_leader`. The leaders are responsible for scheduling background merges. Replication is physical: only compressed parts are transferred between nodes, not queries. Merges are processed on each replica independently in most cases to lower the network costs by avoiding network amplification. Large merged parts are sent over the network only in cases of significant replication lag. diff --git a/docs/en/operations/system-tables/replicas.md b/docs/en/operations/system-tables/replicas.md index f31f7fd27ea..8da68d2d2ab 100644 --- a/docs/en/operations/system-tables/replicas.md +++ b/docs/en/operations/system-tables/replicas.md @@ -53,9 +53,9 @@ Columns: - `table` (`String`) - Table name - `engine` (`String`) - Table engine name - `is_leader` (`UInt8`) - Whether the replica is the leader. - Only one replica at a time can be the leader. The leader is responsible for selecting background merges to perform. + Multiple replicas can be leaders at the same time. A replica can be prevented from becoming a leader using the `merge_tree` setting `replicated_can_become_leader`. The leaders are responsible for scheduling background merges. Note that writes can be performed to any replica that is available and has a session in ZK, regardless of whether it is a leader. -- `can_become_leader` (`UInt8`) - Whether the replica can be elected as a leader. +- `can_become_leader` (`UInt8`) - Whether the replica can be a leader. - `is_readonly` (`UInt8`) - Whether the replica is in read-only mode. This mode is turned on if the config doesn’t have sections with ZooKeeper, if an unknown error occurred when reinitializing sessions in ZooKeeper, and during session reinitialization in ZooKeeper. - `is_session_expired` (`UInt8`) - the session with ZooKeeper has expired. Basically the same as `is_readonly`. diff --git a/docs/ru/development/architecture.md b/docs/ru/development/architecture.md index 0d8ddb6f795..53c007e078f 100644 --- a/docs/ru/development/architecture.md +++ b/docs/ru/development/architecture.md @@ -190,7 +190,7 @@ ClickHouse имеет сильную типизацию, поэтому нет Репликация использует асинхронную multi-master схему. Вы можете вставить данные в любую реплику, которая имеет открытую сессию в `ZooKeeper`, и данные реплицируются на все другие реплики асинхронно. Поскольку ClickHouse не поддерживает UPDATE, репликация исключает конфликты (conflict-free replication). Поскольку подтверждение вставок кворумом не реализовано, только что вставленные данные могут быть потеряны в случае сбоя одного узла. -Метаданные для репликации хранятся в `ZooKeeper`. Существует журнал репликации, в котором перечислены действия, которые необходимо выполнить. Среди этих действий: получить часть (get the part); объединить части (merge parts); удалить партицию (drop a partition) и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из очереди. Например, при вставке в журнале создается действие «получить часть» (get the part), и каждая реплика загружает эту часть. Слияния координируются между репликами, чтобы получить идентичные до байта результаты. Все части объединяются одинаково на всех репликах. Это достигается путем выбора одной реплики в качестве лидера, и эта реплика инициирует слияния и записывает действия «слияния частей» в журнал. +Метаданные для репликации хранятся в `ZooKeeper`. Существует журнал репликации, в котором перечислены действия, которые необходимо выполнить. Среди этих действий: получить часть (get the part); объединить части (merge parts); удалить партицию (drop a partition) и так далее. Каждая реплика копирует журнал репликации в свою очередь, а затем выполняет действия из очереди. Например, при вставке в журнале создается действие «получить часть» (get the part), и каждая реплика загружает эту часть. Слияния координируются между репликами, чтобы получить идентичные до байта результаты. Все части объединяются одинаково на всех репликах. Одна из реплик-лидеров инициирует новое слияние кусков первой и записывает действия «слияния частей» в журнал. Несколько реплик (или все) могут быть лидерами одновременно. Реплике можно запретить быть лидером с помощью `merge_tree` настройки `replicated_can_become_leader`. Репликация является физической: между узлами передаются только сжатые части, а не запросы. Слияния обрабатываются на каждой реплике независимо, в большинстве случаев, чтобы снизить затраты на сеть, во избежание усиления роли сети. Крупные объединенные части отправляются по сети только в случае значительной задержки репликации. diff --git a/docs/ru/operations/system-tables/replicas.md b/docs/ru/operations/system-tables/replicas.md index bb2e6a550a0..8d4eb60c56a 100644 --- a/docs/ru/operations/system-tables/replicas.md +++ b/docs/ru/operations/system-tables/replicas.md @@ -53,9 +53,9 @@ active_replicas: 2 - `table` (`String`) - имя таблицы. - `engine` (`String`) - имя движка таблицы. - `is_leader` (`UInt8`) - является ли реплика лидером. - В один момент времени, не более одной из реплик является лидером. Лидер отвечает за выбор фоновых слияний, которые следует произвести. + Несколько реплик могут быть лидерами одновременно. Реплике можно запретить быть лидером с помощью `merge_tree` настройки `replicated_can_become_leader`. Лидеры назначают фоновые слияния, которые следует произвести. Замечу, что запись можно осуществлять на любую реплику (доступную и имеющую сессию в ZK), независимо от лидерства. -- `can_become_leader` (`UInt8`) - может ли реплика быть выбрана лидером. +- `can_become_leader` (`UInt8`) - может ли реплика быть лидером. - `is_readonly` (`UInt8`) - находится ли реплика в режиме «только для чтения» Этот режим включается, если в конфиге нет секции с ZK; если при переинициализации сессии в ZK произошла неизвестная ошибка; во время переинициализации сессии с ZK. - `is_session_expired` (`UInt8`) - истекла ли сессия с ZK. В основном, то же самое, что и `is_readonly`. From de3f88c3fd77150eea64c0a81f018a201137972e Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Sun, 1 Nov 2020 12:17:17 -0400 Subject: [PATCH 418/432] Update architecture.md --- docs/en/development/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index b4f53207a41..19caa5241b0 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -189,7 +189,7 @@ Replication is implemented in the `ReplicatedMergeTree` storage engine. The path Replication uses an asynchronous multi-master scheme. You can insert data into any replica that has a session with `ZooKeeper`, and data is replicated to all other replicas asynchronously. Because ClickHouse doesn’t support UPDATEs, replication is conflict-free. As there is no quorum acknowledgment of inserts, just-inserted data might be lost if one node fails. -Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. One of the leaders initiates a new merge first and writes “merge parts” actions to the log. Multiple replicas can be leaders at the same time. A replica can be prevented from becoming a leader using the `merge_tree` setting `replicated_can_become_leader`. The leaders are responsible for scheduling background merges. +Metadata for replication is stored in ZooKeeper. There is a replication log that lists what actions to do. Actions are: get part; merge parts; drop a partition, and so on. Each replica copies the replication log to its queue and then executes the actions from the queue. For example, on insertion, the “get the part” action is created in the log, and every replica downloads that part. Merges are coordinated between replicas to get byte-identical results. All parts are merged in the same way on all replicas. One of the leaders initiates a new merge first and writes “merge parts” actions to the log. Multiple replicas (or all) can be leaders at the same time. A replica can be prevented from becoming a leader using the `merge_tree` setting `replicated_can_become_leader`. The leaders are responsible for scheduling background merges. Replication is physical: only compressed parts are transferred between nodes, not queries. Merges are processed on each replica independently in most cases to lower the network costs by avoiding network amplification. Large merged parts are sent over the network only in cases of significant replication lag. From af76657855d3e5cb85f038d98452ab885a0ebcfd Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 2 Nov 2020 06:10:24 +0000 Subject: [PATCH 419/432] Bump tornado from 5.1.1 to 6.1 in /docs/tools Bumps [tornado](https://github.com/tornadoweb/tornado) from 5.1.1 to 6.1. - [Release notes](https://github.com/tornadoweb/tornado/releases) - [Changelog](https://github.com/tornadoweb/tornado/blob/master/docs/releases.rst) - [Commits](https://github.com/tornadoweb/tornado/compare/v5.1.1...v6.1.0) Signed-off-by: dependabot-preview[bot] --- docs/tools/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index c4559696b16..3c59a601ff2 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -33,6 +33,6 @@ singledispatch==3.4.0.3 six==1.15.0 soupsieve==2.0.1 termcolor==1.1.0 -tornado==5.1.1 +tornado==6.1 Unidecode==1.1.1 urllib3==1.25.10 From 962a89843cc30df136458e3fe8d95efe3bd28537 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 2 Nov 2020 12:01:57 +0300 Subject: [PATCH 420/432] Revert "Try fix IfAggCombinator with NullAggCombinator" --- .../AggregateFunctionCount.cpp | 2 +- .../AggregateFunctionCount.h | 2 +- .../AggregateFunctionIf.cpp | 160 ------------------ src/AggregateFunctions/AggregateFunctionIf.h | 4 - .../AggregateFunctionNull.cpp | 2 +- .../AggregateFunctionWindowFunnel.h | 3 +- src/AggregateFunctions/IAggregateFunction.h | 4 +- ...able_type_with_if_agg_combinator.reference | 3 - ...5_nullable_type_with_if_agg_combinator.sql | 6 - 9 files changed, 5 insertions(+), 181 deletions(-) delete mode 100644 tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.reference delete mode 100644 tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.sql diff --git a/src/AggregateFunctions/AggregateFunctionCount.cpp b/src/AggregateFunctions/AggregateFunctionCount.cpp index 05824947b87..6ea63bedaf0 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.cpp +++ b/src/AggregateFunctions/AggregateFunctionCount.cpp @@ -8,7 +8,7 @@ namespace DB { AggregateFunctionPtr AggregateFunctionCount::getOwnNullAdapter( - const AggregateFunctionPtr &, const DataTypes & types, const Array & params, const AggregateFunctionProperties & /*properties*/) const + const AggregateFunctionPtr &, const DataTypes & types, const Array & params) const { return std::make_shared(types[0], params); } diff --git a/src/AggregateFunctions/AggregateFunctionCount.h b/src/AggregateFunctions/AggregateFunctionCount.h index eb1583df92a..29c5de0021c 100644 --- a/src/AggregateFunctions/AggregateFunctionCount.h +++ b/src/AggregateFunctions/AggregateFunctionCount.h @@ -69,7 +69,7 @@ public: } AggregateFunctionPtr getOwnNullAdapter( - const AggregateFunctionPtr &, const DataTypes & types, const Array & params, const AggregateFunctionProperties & /*properties*/) const override; + const AggregateFunctionPtr &, const DataTypes & types, const Array & params) const override; }; diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index 47afddaf7ff..19a175de911 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -1,7 +1,6 @@ #include #include #include "registerAggregateFunctions.h" -#include "AggregateFunctionNull.h" namespace DB @@ -9,7 +8,6 @@ namespace DB namespace ErrorCodes { - extern const int LOGICAL_ERROR; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } @@ -42,164 +40,6 @@ public: } }; -/** There are two cases: for single argument and variadic. - * Code for single argument is much more efficient. - */ -template -class AggregateFunctionIfNullUnary final - : public AggregateFunctionNullBase> -{ -private: - size_t num_arguments; - - using Base = AggregateFunctionNullBase>; -public: - - String getName() const override - { - return Base::getName() + "If"; - } - - AggregateFunctionIfNullUnary(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params) - : Base(std::move(nested_function_), arguments, params), num_arguments(arguments.size()) - { - if (num_arguments == 0) - throw Exception("Aggregate function " + getName() + " require at least one argument", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - } - - static inline bool singleFilter(const IColumn ** columns, size_t row_num, size_t num_arguments) - { - const IColumn * filter_column = columns[num_arguments - 1]; - if (const ColumnNullable * nullable_column = typeid_cast(filter_column)) - filter_column = nullable_column->getNestedColumnPtr().get(); - - return assert_cast(*filter_column).getData()[row_num]; - } - - void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override - { - const ColumnNullable * column = assert_cast(columns[0]); - const IColumn * nested_column = &column->getNestedColumn(); - if (!column->isNullAt(row_num) && singleFilter(columns, row_num, num_arguments)) - { - this->setFlag(place); - this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena); - } - } -}; - -template -class AggregateFunctionIfNullVariadic final - : public AggregateFunctionNullBase> -{ -public: - - String getName() const override - { - return Base::getName() + "If"; - } - - AggregateFunctionIfNullVariadic(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params) - : Base(std::move(nested_function_), arguments, params), number_of_arguments(arguments.size()) - { - if (number_of_arguments == 1) - throw Exception("Logical error: single argument is passed to AggregateFunctionIfNullVariadic", ErrorCodes::LOGICAL_ERROR); - - if (number_of_arguments > MAX_ARGS) - throw Exception("Maximum number of arguments for aggregate function with Nullable types is " + toString(size_t(MAX_ARGS)), - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - for (size_t i = 0; i < number_of_arguments; ++i) - is_nullable[i] = arguments[i]->isNullable(); - } - - static inline bool singleFilter(const IColumn ** columns, size_t row_num, size_t num_arguments) - { - return assert_cast(*columns[num_arguments - 1]).getData()[row_num]; - } - - void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override - { - /// This container stores the columns we really pass to the nested function. - const IColumn * nested_columns[number_of_arguments]; - - for (size_t i = 0; i < number_of_arguments; ++i) - { - if (is_nullable[i]) - { - const ColumnNullable & nullable_col = assert_cast(*columns[i]); - if (null_is_skipped && nullable_col.isNullAt(row_num)) - { - /// If at least one column has a null value in the current row, - /// we don't process this row. - return; - } - nested_columns[i] = &nullable_col.getNestedColumn(); - } - else - nested_columns[i] = columns[i]; - } - - if (singleFilter(nested_columns, row_num, number_of_arguments)) - { - this->setFlag(place); - this->nested_function->add(this->nestedPlace(place), nested_columns, row_num, arena); - } - } - -private: - using Base = AggregateFunctionNullBase>; - - enum { MAX_ARGS = 8 }; - size_t number_of_arguments = 0; - std::array is_nullable; /// Plain array is better than std::vector due to one indirection less. -}; - - -AggregateFunctionPtr AggregateFunctionIf::getOwnNullAdapter( - const AggregateFunctionPtr & nested_function, const DataTypes & arguments, - const Array & params, const AggregateFunctionProperties & properties) const -{ - bool return_type_is_nullable = !properties.returns_default_when_only_null && getReturnType()->canBeInsideNullable(); - size_t nullable_size = std::count_if(arguments.begin(), arguments.end(), [](const auto & element) { return element->isNullable(); }); - return_type_is_nullable &= nullable_size != 1 || !arguments.back()->isNullable(); /// If only condition is nullable. we should non-nullable type. - bool serialize_flag = return_type_is_nullable || properties.returns_default_when_only_null; - - if (arguments.size() <= 2 && arguments.front()->isNullable()) - { - if (return_type_is_nullable) - { - return std::make_shared>(nested_func, arguments, params); - } - else - { - if (serialize_flag) - return std::make_shared>(nested_func, arguments, params); - else - return std::make_shared>(nested_func, arguments, params); - } - } - else - { - if (return_type_is_nullable) - { - return std::make_shared>(nested_function, arguments, params); - } - else - { - if (serialize_flag) - return std::make_shared>(nested_function, arguments, params); - else - return std::make_shared>(nested_function, arguments, params); - } - } -} - void registerAggregateFunctionCombinatorIf(AggregateFunctionCombinatorFactory & factory) { factory.registerCombinator(std::make_shared()); diff --git a/src/AggregateFunctions/AggregateFunctionIf.h b/src/AggregateFunctions/AggregateFunctionIf.h index d5d2b9be0dd..f04450c9142 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.h +++ b/src/AggregateFunctions/AggregateFunctionIf.h @@ -109,10 +109,6 @@ public: { return nested_func->isState(); } - - AggregateFunctionPtr getOwnNullAdapter( - const AggregateFunctionPtr & nested_function, const DataTypes & arguments, - const Array & params, const AggregateFunctionProperties & properties) const override; }; } diff --git a/src/AggregateFunctions/AggregateFunctionNull.cpp b/src/AggregateFunctions/AggregateFunctionNull.cpp index f584ae1f34c..5e0d6ee6e21 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.cpp +++ b/src/AggregateFunctions/AggregateFunctionNull.cpp @@ -72,7 +72,7 @@ public: assert(nested_function); - if (auto adapter = nested_function->getOwnNullAdapter(nested_function, arguments, params, properties)) + if (auto adapter = nested_function->getOwnNullAdapter(nested_function, arguments, params)) return adapter; /// If applied to aggregate function with -State combinator, we apply -Null combinator to it's nested_function instead of itself. diff --git a/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h index fe45fec4b76..3297819a9ff 100644 --- a/src/AggregateFunctions/AggregateFunctionWindowFunnel.h +++ b/src/AggregateFunctions/AggregateFunctionWindowFunnel.h @@ -241,8 +241,7 @@ public: } AggregateFunctionPtr getOwnNullAdapter( - const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params, - const AggregateFunctionProperties & /*properties*/) const override + const AggregateFunctionPtr & nested_function, const DataTypes & arguments, const Array & params) const override { return std::make_shared>(nested_function, arguments, params); } diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index b5a15eb8cbe..4f9552d2345 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -33,7 +33,6 @@ using ConstAggregateDataPtr = const char *; class IAggregateFunction; using AggregateFunctionPtr = std::shared_ptr; -struct AggregateFunctionProperties; /** Aggregate functions interface. * Instances of classes with this interface do not contain the data itself for aggregation, @@ -186,8 +185,7 @@ public: * arguments and params are for nested_function. */ virtual AggregateFunctionPtr getOwnNullAdapter( - const AggregateFunctionPtr & /*nested_function*/, const DataTypes & /*arguments*/, - const Array & /*params*/, const AggregateFunctionProperties & /*properties*/) const + const AggregateFunctionPtr & /*nested_function*/, const DataTypes & /*arguments*/, const Array & /*params*/) const { return nullptr; } diff --git a/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.reference b/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.reference deleted file mode 100644 index 77f38b722ce..00000000000 --- a/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.reference +++ /dev/null @@ -1,3 +0,0 @@ -\N Nullable(UInt8) -\N Nullable(UInt8) -0 UInt8 diff --git a/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.sql b/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.sql deleted file mode 100644 index 852660117f5..00000000000 --- a/tests/queries/0_stateless/01455_nullable_type_with_if_agg_combinator.sql +++ /dev/null @@ -1,6 +0,0 @@ --- Value nullable -SELECT anyIf(CAST(number, 'Nullable(UInt8)'), number = 3) AS a, toTypeName(a) FROM numbers(2); --- Value and condition nullable -SELECT anyIf(number, number = 3) AS a, toTypeName(a) FROM (SELECT CAST(number, 'Nullable(UInt8)') AS number FROM numbers(2)); --- Condition nullable -SELECT anyIf(CAST(number, 'UInt8'), number = 3) AS a, toTypeName(a) FROM (SELECT CAST(number, 'Nullable(UInt8)') AS number FROM numbers(2)); From 85ed639f8e74fc992e30b3c618bec5db43386125 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 2 Nov 2020 12:06:17 +0300 Subject: [PATCH 421/432] Added test. --- .../0_stateless/01533_sum_if_nullable_bug.reference | 4 ++++ .../queries/0_stateless/01533_sum_if_nullable_bug.sql | 10 ++++++++++ 2 files changed, 14 insertions(+) create mode 100644 tests/queries/0_stateless/01533_sum_if_nullable_bug.reference create mode 100644 tests/queries/0_stateless/01533_sum_if_nullable_bug.sql diff --git a/tests/queries/0_stateless/01533_sum_if_nullable_bug.reference b/tests/queries/0_stateless/01533_sum_if_nullable_bug.reference new file mode 100644 index 00000000000..8414f885299 --- /dev/null +++ b/tests/queries/0_stateless/01533_sum_if_nullable_bug.reference @@ -0,0 +1,4 @@ +84 +168 +210 +420 diff --git a/tests/queries/0_stateless/01533_sum_if_nullable_bug.sql b/tests/queries/0_stateless/01533_sum_if_nullable_bug.sql new file mode 100644 index 00000000000..6b110748fbe --- /dev/null +++ b/tests/queries/0_stateless/01533_sum_if_nullable_bug.sql @@ -0,0 +1,10 @@ +drop table if exists T; +create table T(a Nullable(Int64)) engine = Memory(); +insert into T values (1), (2), (3), (4), (5); +select sumIf(42, (a % 2) = 0) from T; +select sumIf(42, (a % 2) = 0) from remote('127.0.0.{1,2}', currentDatabase(), T); +select sumIf(42, toNullable(1)) from T; +select sumIf(42, toNullable(1)) from remote('127.0.0.{1,2}', currentDatabase(), T); +-- select sumIf(42, toNullable(toInt64(1))) from T; +-- select sumIf(42, toNullable(toInt64(1))) from remote('127.0.0.{1,2}', currentDatabase(), T); +drop table if exists T; From 67daa37a7ffd201d62312fe8de4913b051f2435c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 2 Nov 2020 13:42:47 +0300 Subject: [PATCH 422/432] Add strange C++ code that actually does not change anything --- src/Common/HashTable/HashTable.h | 5 +++-- src/Common/HashTable/StringHashTable.h | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index 897c84fe951..a569b1c15db 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -4,6 +4,7 @@ #include +#include #include #include @@ -314,8 +315,8 @@ public: zeroValue()->~Cell(); } - Cell * zeroValue() { return reinterpret_cast(&zero_value_storage); } - const Cell * zeroValue() const { return reinterpret_cast(&zero_value_storage); } + Cell * zeroValue() { return std::launder(reinterpret_cast(&zero_value_storage)); } + const Cell * zeroValue() const { return std::launder(reinterpret_cast(&zero_value_storage)); } }; template diff --git a/src/Common/HashTable/StringHashTable.h b/src/Common/HashTable/StringHashTable.h index e316f9b6520..06389825e60 100644 --- a/src/Common/HashTable/StringHashTable.h +++ b/src/Common/HashTable/StringHashTable.h @@ -3,8 +3,10 @@ #include #include +#include #include + using StringKey8 = UInt64; using StringKey16 = DB::UInt128; struct StringKey24 @@ -106,8 +108,8 @@ public: zeroValue()->~Cell(); } - Cell * zeroValue() { return reinterpret_cast(&zero_value_storage); } - const Cell * zeroValue() const { return reinterpret_cast(&zero_value_storage); } + Cell * zeroValue() { return std::launder(reinterpret_cast(&zero_value_storage)); } + const Cell * zeroValue() const { return std::launder(reinterpret_cast(&zero_value_storage)); } using LookupResult = Cell *; using ConstLookupResult = const Cell *; From 8a68ee2547173cd90827659d2bdc300fa3075eb3 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 2 Nov 2020 13:45:37 +0300 Subject: [PATCH 423/432] Whitespaces --- src/IO/ReadWriteBufferFromHTTP.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 6ff99690bb4..ee6fcc58ab0 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -187,7 +187,7 @@ namespace detail session->updateSession(uri_redirect); - istr = call(uri_redirect,response); + istr = call(uri_redirect, response); } try @@ -241,7 +241,8 @@ class UpdatableSession : public UpdatableSessionBase using Parent = UpdatableSessionBase; public: - explicit UpdatableSession(const Poco::URI uri, + explicit UpdatableSession( + const Poco::URI uri, const ConnectionTimeouts & timeouts_, const UInt64 max_redirects_) : Parent(uri, timeouts_, max_redirects_) @@ -260,7 +261,8 @@ class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase>; public: - explicit ReadWriteBufferFromHTTP(Poco::URI uri_, + explicit ReadWriteBufferFromHTTP( + Poco::URI uri_, const std::string & method_, OutStreamCallback out_stream_callback_, const ConnectionTimeouts & timeouts, @@ -269,7 +271,8 @@ public: size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, const HTTPHeaderEntries & http_header_entries_ = {}, const RemoteHostFilter & remote_host_filter_ = {}) - : Parent(std::make_shared(uri_, timeouts, max_redirects), uri_, method_, out_stream_callback_, credentials_, buffer_size_, http_header_entries_, remote_host_filter_) + : Parent(std::make_shared(uri_, timeouts, max_redirects), + uri_, method_, out_stream_callback_, credentials_, buffer_size_, http_header_entries_, remote_host_filter_) { } }; From f3b3025719815a656d363336e5758572b03effb6 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 2 Nov 2020 14:39:27 +0300 Subject: [PATCH 424/432] Ban non comparable types in primary key --- src/Common/ErrorCodes.cpp | 1 + src/Core/Field.h | 8 +++++--- src/Storages/KeyDescription.cpp | 9 +++++++++ .../01548_uncomparable_columns_in_keys.reference | 0 .../0_stateless/01548_uncomparable_columns_in_keys.sql | 9 +++++++++ 5 files changed, 24 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/01548_uncomparable_columns_in_keys.reference create mode 100644 tests/queries/0_stateless/01548_uncomparable_columns_in_keys.sql diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index b14c090c848..371cc6be304 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -512,6 +512,7 @@ namespace ErrorCodes extern const int NO_ROW_DELIMITER = 546; extern const int INVALID_RAID_TYPE = 547; extern const int UNKNOWN_VOLUME = 548; + extern const int DATA_TYPE_CANNOT_BE_USED_IN_KEY = 549; extern const int KEEPER_EXCEPTION = 999; extern const int POCO_EXCEPTION = 1000; diff --git a/src/Core/Field.h b/src/Core/Field.h index 8973d106c0b..be01352c168 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -26,6 +26,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int LOGICAL_ERROR; } template @@ -767,9 +768,10 @@ T & Field::get() #ifndef NDEBUG // Disregard signedness when converting between int64 types. constexpr Field::Types::Which target = TypeToEnum>::value; - assert(target == which - || (isInt64FieldType(target) && isInt64FieldType(which)) - || target == Field::Types::Decimal64 /* DateTime64 fields */); + if (target != which + && (!isInt64FieldType(target) || !isInt64FieldType(which)) + && target != Field::Types::Decimal64 /* DateTime64 fields */) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid Field get from type {} to type {}", Types::toString(which), Types::toString(target)); #endif ValueType * MAY_ALIAS ptr = reinterpret_cast(&storage); diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 533736d19ed..ebf31f02aab 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -2,10 +2,12 @@ #include #include +#include #include #include #include #include +#include namespace DB @@ -14,6 +16,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int DATA_TYPE_CANNOT_BE_USED_IN_KEY; } KeyDescription::KeyDescription(const KeyDescription & other) @@ -115,7 +118,13 @@ KeyDescription KeyDescription::getSortingKeyFromAST( } for (size_t i = 0; i < result.sample_block.columns(); ++i) + { result.data_types.emplace_back(result.sample_block.getByPosition(i).type); + if (!result.data_types.back()->isComparable()) + throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, + "Column {} with type {} is not allowed in key expression, it's not comparable", + backQuote(result.sample_block.getByPosition(i).name), result.data_types.back()->getName()); + } return result; } diff --git a/tests/queries/0_stateless/01548_uncomparable_columns_in_keys.reference b/tests/queries/0_stateless/01548_uncomparable_columns_in_keys.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01548_uncomparable_columns_in_keys.sql b/tests/queries/0_stateless/01548_uncomparable_columns_in_keys.sql new file mode 100644 index 00000000000..ff51085f58c --- /dev/null +++ b/tests/queries/0_stateless/01548_uncomparable_columns_in_keys.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS uncomparable_keys; + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree ORDER BY key; --{serverError 549} + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree PARTITION BY key; --{serverError 549} + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree ORDER BY (key) SAMPLE BY key; --{serverError 549} + +DROP TABLE IF EXISTS uncomparable_keys; From 360548180cea9bff067322cf29444191e14a4114 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 2 Nov 2020 14:42:25 +0300 Subject: [PATCH 425/432] Remove redundant include --- src/Storages/KeyDescription.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index ebf31f02aab..32df6398d4c 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -2,7 +2,6 @@ #include #include -#include #include #include #include From 281bf351d2fa86faac9817a1d99ddd554306ecb3 Mon Sep 17 00:00:00 2001 From: Ivan <5627721+abyss7@users.noreply.github.com> Date: Mon, 2 Nov 2020 15:47:12 +0300 Subject: [PATCH 426/432] Remove ANALYZE and AST queries (#16536) --- programs/client/Client.cpp | 2 +- src/Core/Settings.h | 4 +- src/Interpreters/QueryParameterVisitor.cpp | 2 +- src/Interpreters/executeQuery.cpp | 2 +- src/Parsers/ASTExplainQuery.h | 16 ++- src/Parsers/ParserExplainQuery.cpp | 16 +-- src/Parsers/ParserExplainQuery.h | 9 -- src/Parsers/ParserQuery.cpp | 2 +- src/Parsers/ParserQuery.h | 6 +- src/Parsers/ParserQueryWithOutput.cpp | 2 +- src/Parsers/ParserQueryWithOutput.h | 10 -- tests/fuzz/ast.dict | 1 - .../0_stateless/00597_push_down_predicate.sql | 61 ++++++------ .../00808_not_optimize_predicate.sql | 4 +- .../0_stateless/00826_cross_to_inner_join.sql | 17 ++-- .../00849_multiple_comma_join_2.sql | 33 +++---- .../0_stateless/00908_analyze_query.sql | 4 +- .../0_stateless/01021_tuple_parser.sql | 4 +- .../01029_early_constant_folding.sql | 12 +-- .../01056_predicate_optimizer_bugs.sql | 19 ++-- .../01076_predicate_optimizer_with_view.sql | 9 +- .../01083_cross_to_inner_with_like.sql | 7 +- .../0_stateless/01259_combinator_distinct.sql | 5 +- ...ize_arithmetic_operations_in_aggr_func.sql | 97 +++++++++---------- .../0_stateless/01300_group_by_other_keys.sql | 21 ++-- .../01300_group_by_other_keys_having.sql | 13 ++- .../01305_duplicate_order_by_and_distinct.sql | 9 +- ...1_aggregate_functions_of_group_by_keys.sql | 17 ++-- ...01321_monotonous_functions_in_order_by.sql | 53 +++++----- .../0_stateless/01322_any_input_optimize.sql | 17 ++-- .../01323_redundant_functions_in_order_by.sql | 25 +++-- .../01324_if_transform_strings_to_enum.sql | 9 +- .../01372_wrong_order_by_removal.sql | 4 +- .../01390_remove_injective_in_uniq.sql | 33 +++---- .../01455_duplicate_distinct_optimization.sql | 29 +++--- 35 files changed, 254 insertions(+), 320 deletions(-) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index ace509d6691..005fece3277 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1502,7 +1502,7 @@ private: ASTPtr parseQuery(const char * & pos, const char * end, bool allow_multi_statements) { - ParserQuery parser(end, true); + ParserQuery parser(end); ASTPtr res; const auto & settings = context.getSettingsRef(); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index df6cf5fc85d..896fcaaca82 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -331,7 +331,6 @@ class IColumn; M(Bool, calculate_text_stack_trace, 1, "Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when huge amount of wrong queries are executed. In normal cases you should not disable this option.", 0) \ M(Bool, allow_ddl, true, "If it is set to true, then a user is allowed to executed DDL queries.", 0) \ M(Bool, parallel_view_processing, false, "Enables pushing to attached views concurrently instead of sequentially.", 0) \ - M(Bool, enable_debug_queries, false, "Enables debug queries such as AST.", 0) \ M(Bool, enable_unaligned_array_join, false, "Allow ARRAY JOIN with multiple arrays that have different sizes. When this settings is enabled, arrays will be resized to the longest one.", 0) \ M(Bool, optimize_read_in_order, true, "Enable ORDER BY optimization for reading data in corresponding order in MergeTree tables.", 0) \ M(Bool, optimize_aggregation_in_order, false, "Enable GROUP BY optimization for aggregating data in corresponding order in MergeTree tables.", 0) \ @@ -405,7 +404,8 @@ class IColumn; M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \ M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \ M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \ - M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) + M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \ + M(Bool, enable_debug_queries, false, "Enabled debug queries, but now is obsolete", 0) // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below. diff --git a/src/Interpreters/QueryParameterVisitor.cpp b/src/Interpreters/QueryParameterVisitor.cpp index 297bc211712..389948ecc18 100644 --- a/src/Interpreters/QueryParameterVisitor.cpp +++ b/src/Interpreters/QueryParameterVisitor.cpp @@ -43,7 +43,7 @@ NameSet analyzeReceiveQueryParams(const std::string & query) const char * query_begin = query.data(); const char * query_end = query.data() + query.size(); - ParserQuery parser(query_end, false); + ParserQuery parser(query_end); ASTPtr extract_query_ast = parseQuery(parser, query_begin, query_end, "analyzeReceiveQueryParams", 0, 0); QueryParameterVisitor(query_params).visit(extract_query_ast); return query_params; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index a672b58633d..5feff841ca9 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -323,7 +323,7 @@ static std::tuple executeQueryImpl( const Settings & settings = context.getSettingsRef(); - ParserQuery parser(end, settings.enable_debug_queries); + ParserQuery parser(end); ASTPtr ast; const char * query_end; diff --git a/src/Parsers/ASTExplainQuery.h b/src/Parsers/ASTExplainQuery.h index 0c376e270d4..95a3a362030 100644 --- a/src/Parsers/ASTExplainQuery.h +++ b/src/Parsers/ASTExplainQuery.h @@ -19,12 +19,9 @@ public: QueryPipeline, /// 'EXPLAIN PIPELINE ...' }; - ASTExplainQuery(ExplainKind kind_, bool old_syntax_) - : kind(kind_), old_syntax(old_syntax_) - { - } + explicit ASTExplainQuery(ExplainKind kind_) : kind(kind_) {} - String getID(char delim) const override { return "Explain" + (delim + toString(kind, old_syntax)); } + String getID(char delim) const override { return "Explain" + (delim + toString(kind)); } ExplainKind getKind() const { return kind; } ASTPtr clone() const override { @@ -53,7 +50,7 @@ public: protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { - settings.ostr << (settings.hilite ? hilite_keyword : "") << toString(kind, old_syntax) << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << toString(kind) << (settings.hilite ? hilite_none : ""); if (ast_settings) { @@ -67,17 +64,16 @@ protected: private: ExplainKind kind; - bool old_syntax; /// "EXPLAIN AST" -> "AST", "EXPLAIN SYNTAX" -> "ANALYZE" ASTPtr query; ASTPtr ast_settings; - static String toString(ExplainKind kind, bool old_syntax) + static String toString(ExplainKind kind) { switch (kind) { - case ParsedAST: return old_syntax ? "AST" : "EXPLAIN AST"; - case AnalyzedSyntax: return old_syntax ? "ANALYZE" : "EXPLAIN SYNTAX"; + case ParsedAST: return "EXPLAIN AST"; + case AnalyzedSyntax: return "EXPLAIN SYNTAX"; case QueryPlan: return "EXPLAIN"; case QueryPipeline: return "EXPLAIN PIPELINE"; } diff --git a/src/Parsers/ParserExplainQuery.cpp b/src/Parsers/ParserExplainQuery.cpp index c6792d6094b..69281149dc1 100644 --- a/src/Parsers/ParserExplainQuery.cpp +++ b/src/Parsers/ParserExplainQuery.cpp @@ -10,26 +10,14 @@ namespace DB bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ASTExplainQuery::ExplainKind kind; - bool old_syntax = false; ParserKeyword s_ast("AST"); - ParserKeyword s_analyze("ANALYZE"); ParserKeyword s_explain("EXPLAIN"); ParserKeyword s_syntax("SYNTAX"); ParserKeyword s_pipeline("PIPELINE"); ParserKeyword s_plan("PLAN"); - if (enable_debug_queries && s_ast.ignore(pos, expected)) - { - old_syntax = true; - kind = ASTExplainQuery::ExplainKind::ParsedAST; - } - else if (enable_debug_queries && s_analyze.ignore(pos, expected)) - { - old_syntax = true; - kind = ASTExplainQuery::ExplainKind::AnalyzedSyntax; - } - else if (s_explain.ignore(pos, expected)) + if (s_explain.ignore(pos, expected)) { kind = ASTExplainQuery::QueryPlan; @@ -45,7 +33,7 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected else return false; - auto explain_query = std::make_shared(kind, old_syntax); + auto explain_query = std::make_shared(kind); { ASTPtr settings; diff --git a/src/Parsers/ParserExplainQuery.h b/src/Parsers/ParserExplainQuery.h index 224f466c1a1..b58597b9248 100644 --- a/src/Parsers/ParserExplainQuery.h +++ b/src/Parsers/ParserExplainQuery.h @@ -8,18 +8,9 @@ namespace DB class ParserExplainQuery : public IParserBase { -public: - explicit ParserExplainQuery(bool enable_debug_queries_ = false) - : enable_debug_queries(enable_debug_queries_) - { - } - protected: const char * getName() const override { return "EXPLAIN"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - bool enable_debug_queries; }; } diff --git a/src/Parsers/ParserQuery.cpp b/src/Parsers/ParserQuery.cpp index 801f36afa14..89ddd1797c0 100644 --- a/src/Parsers/ParserQuery.cpp +++ b/src/Parsers/ParserQuery.cpp @@ -26,7 +26,7 @@ namespace DB bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ParserQueryWithOutput query_with_output_p(enable_explain); + ParserQueryWithOutput query_with_output_p; ParserInsertQuery insert_p(end); ParserUseQuery use_p; ParserSetQuery set_p; diff --git a/src/Parsers/ParserQuery.h b/src/Parsers/ParserQuery.h index e9a2aae29a7..be72a436be8 100644 --- a/src/Parsers/ParserQuery.h +++ b/src/Parsers/ParserQuery.h @@ -10,16 +10,12 @@ class ParserQuery : public IParserBase { private: const char * end; - bool enable_explain; /// Allow queries prefixed with AST and ANALYZE for development purposes. const char * getName() const override { return "Query"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; public: - ParserQuery(const char * end_, bool enable_explain_ = false) - : end(end_), - enable_explain(enable_explain_) - {} + explicit ParserQuery(const char * end_) : end(end_) {} }; } diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index c2ebf85b709..76a3e8b95f8 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -48,7 +48,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec ParserShowCreateAccessEntityQuery show_create_access_entity_p; ParserShowGrantsQuery show_grants_p; ParserShowPrivilegesQuery show_privileges_p; - ParserExplainQuery explain_p(enable_debug_queries); + ParserExplainQuery explain_p; ASTPtr query; diff --git a/src/Parsers/ParserQueryWithOutput.h b/src/Parsers/ParserQueryWithOutput.h index d0962862c42..110cf923147 100644 --- a/src/Parsers/ParserQueryWithOutput.h +++ b/src/Parsers/ParserQueryWithOutput.h @@ -10,19 +10,9 @@ namespace DB /// Parse queries supporting [INTO OUTFILE 'file_name'] [FORMAT format_name] [SETTINGS key1 = value1, key2 = value2, ...] suffix. class ParserQueryWithOutput : public IParserBase { -public: - /// enable_debug_queries flag enables queries 'AST SELECT' and 'ANALYZE SELECT' - explicit ParserQueryWithOutput(bool enable_debug_queries_ = false) - : enable_debug_queries(enable_debug_queries_) - {} - protected: const char * getName() const override { return "Query with output"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; - -private: - bool enable_debug_queries; }; } diff --git a/tests/fuzz/ast.dict b/tests/fuzz/ast.dict index af759b6e805..fff02af1c4d 100644 --- a/tests/fuzz/ast.dict +++ b/tests/fuzz/ast.dict @@ -8,7 +8,6 @@ "ALL" "ALTER LIVE VIEW" "ALTER TABLE" -"ANALYZE" "and" "ANTI" "any" diff --git a/tests/queries/0_stateless/00597_push_down_predicate.sql b/tests/queries/0_stateless/00597_push_down_predicate.sql index 7fd30a82d8d..ea01bba9f4d 100644 --- a/tests/queries/0_stateless/00597_push_down_predicate.sql +++ b/tests/queries/0_stateless/00597_push_down_predicate.sql @@ -12,7 +12,6 @@ INSERT INTO test_00597 VALUES('2000-01-01', 1, 'test string 1', 1); INSERT INTO test_00597 VALUES('2000-01-01', 2, 'test string 2', 2); SET enable_optimize_predicate_expression = 1; -SET enable_debug_queries = 1; SELECT '-------No need for predicate optimization, but still works-------'; SELECT 1; @@ -23,115 +22,115 @@ SELECT * FROM test_00597 WHERE id = 1; SELECT '-------Forbid push down-------'; -- ARRAY JOIN -ANALYZE SELECT count() FROM (SELECT [number] a, [number * 2] b FROM system.numbers LIMIT 1) AS t ARRAY JOIN a, b WHERE NOT ignore(a + b); +EXPLAIN SYNTAX SELECT count() FROM (SELECT [number] a, [number * 2] b FROM system.numbers LIMIT 1) AS t ARRAY JOIN a, b WHERE NOT ignore(a + b); SELECT count() FROM (SELECT [number] a, [number * 2] b FROM system.numbers LIMIT 1) AS t ARRAY JOIN a, b WHERE NOT ignore(a + b); -- LEFT JOIN -ANALYZE SELECT a, b FROM (SELECT 1 AS a) ANY LEFT JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a) ANY LEFT JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; SELECT a, b FROM (SELECT 1 AS a) ANY LEFT JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; -- RIGHT JOIN -ANALYZE SELECT a, b FROM (SELECT 1 AS a, 1 as b) ANY RIGHT JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a, 1 as b) ANY RIGHT JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; SELECT a, b FROM (SELECT 1 AS a, 1 as b) ANY RIGHT JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; -- FULL JOIN -ANALYZE SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; -ANALYZE SELECT a, b FROM (SELECT 1 AS a, 1 AS b) ANY FULL JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a, 1 AS b) ANY FULL JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; SELECT '-------Need push down-------'; -ANALYZE SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1'; +EXPLAIN SYNTAX SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1'; SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1'; -ANALYZE SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1; SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1; -ANALYZE SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; -ANALYZE SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; +EXPLAIN SYNTAX SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; -ANALYZE SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1; SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1; -- Optimize predicate expressions using tables -ANALYZE SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597) WHERE a = 3; +EXPLAIN SYNTAX SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597) WHERE a = 3; SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597) WHERE a = 3; -ANALYZE SELECT date, id, name, value FROM (SELECT date, name, value, min(id) AS id FROM test_00597 GROUP BY date, name, value) WHERE id = 1; +EXPLAIN SYNTAX SELECT date, id, name, value FROM (SELECT date, name, value, min(id) AS id FROM test_00597 GROUP BY date, name, value) WHERE id = 1; SELECT date, id, name, value FROM (SELECT date, name, value, min(id) AS id FROM test_00597 GROUP BY date, name, value) WHERE id = 1; -ANALYZE SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597 AS table_alias) AS outer_table_alias WHERE outer_table_alias.b = 3; +EXPLAIN SYNTAX SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597 AS table_alias) AS outer_table_alias WHERE outer_table_alias.b = 3; SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597 AS table_alias) AS outer_table_alias WHERE outer_table_alias.b = 3; -- Optimize predicate expression with asterisk -ANALYZE SELECT * FROM (SELECT * FROM test_00597) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) WHERE id = 1; SELECT * FROM (SELECT * FROM test_00597) WHERE id = 1; -- Optimize predicate expression with asterisk and nested subquery -ANALYZE SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597)) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597)) WHERE id = 1; SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597)) WHERE id = 1; -- Optimize predicate expression with qualified asterisk -ANALYZE SELECT * FROM (SELECT b.* FROM (SELECT * FROM test_00597) AS b) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT b.* FROM (SELECT * FROM test_00597) AS b) WHERE id = 1; SELECT * FROM (SELECT b.* FROM (SELECT * FROM test_00597) AS b) WHERE id = 1; -- Optimize predicate expression without asterisk -ANALYZE SELECT * FROM (SELECT date, id, name, value FROM test_00597) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT date, id, name, value FROM test_00597) WHERE id = 1; SELECT * FROM (SELECT date, id, name, value FROM test_00597) WHERE id = 1; -- Optimize predicate expression without asterisk and contains nested subquery -ANALYZE SELECT * FROM (SELECT date, id, name, value FROM (SELECT date, id, name, value FROM test_00597)) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT date, id, name, value FROM (SELECT date, id, name, value FROM test_00597)) WHERE id = 1; SELECT * FROM (SELECT date, id, name, value FROM (SELECT date, id, name, value FROM test_00597)) WHERE id = 1; -- Optimize predicate expression with qualified -ANALYZE SELECT * FROM (SELECT * FROM test_00597) AS b WHERE b.id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) AS b WHERE b.id = 1; SELECT * FROM (SELECT * FROM test_00597) AS b WHERE b.id = 1; -- Optimize predicate expression with qualified and nested subquery -ANALYZE SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a) AS b WHERE b.id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a) AS b WHERE b.id = 1; SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a) AS b WHERE b.id = 1; -- Optimize predicate expression with aggregate function -ANALYZE SELECT * FROM (SELECT id, date, min(value) AS value FROM test_00597 GROUP BY id, date) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT id, date, min(value) AS value FROM test_00597 GROUP BY id, date) WHERE id = 1; SELECT * FROM (SELECT id, date, min(value) AS value FROM test_00597 GROUP BY id, date) WHERE id = 1; -- Optimize predicate expression with union all query -ANALYZE SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHERE id = 1; SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHERE id = 1; -- Optimize predicate expression with join query -ANALYZE SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1; SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1; -ANALYZE SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; -- FIXME: no support for aliased tables for now. -ANALYZE SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING id WHERE value = 1; +EXPLAIN SYNTAX SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING id WHERE value = 1; SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING id WHERE value = 1; -- Optimize predicate expression with join and nested subquery -ANALYZE SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1; SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1; -- Optimize predicate expression with join query and qualified -ANALYZE SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1; SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1; -- Compatibility test -ANALYZE SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01'); +EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01'); SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01'); -ANALYZE SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; -- Explain with join subquery -ANALYZE SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1; SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1; DROP TABLE IF EXISTS test_00597; diff --git a/tests/queries/0_stateless/00808_not_optimize_predicate.sql b/tests/queries/0_stateless/00808_not_optimize_predicate.sql index 579fd00cb8c..7c1e57706e2 100644 --- a/tests/queries/0_stateless/00808_not_optimize_predicate.sql +++ b/tests/queries/0_stateless/00808_not_optimize_predicate.sql @@ -49,7 +49,7 @@ SELECT FROM numbers(2500) GROUP BY n; -SET force_primary_key = 1, enable_debug_queries = 1, enable_optimize_predicate_expression = 1; +SET force_primary_key = 1, enable_optimize_predicate_expression = 1; SELECT * FROM @@ -61,7 +61,7 @@ FROM ) WHERE (n >= 2) AND (n <= 5); -ANALYZE SELECT * +EXPLAIN SYNTAX SELECT * FROM ( SELECT diff --git a/tests/queries/0_stateless/00826_cross_to_inner_join.sql b/tests/queries/0_stateless/00826_cross_to_inner_join.sql index 618c0374a28..67471864686 100644 --- a/tests/queries/0_stateless/00826_cross_to_inner_join.sql +++ b/tests/queries/0_stateless/00826_cross_to_inner_join.sql @@ -1,4 +1,3 @@ -SET enable_debug_queries = 1; SET enable_optimize_predicate_expression = 0; select * from system.one l cross join system.one r; @@ -37,22 +36,22 @@ SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b; SELECT 'cross'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a; +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a; SELECT 'cross nullable'; -ANALYZE SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a; +EXPLAIN SYNTAX SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a; SELECT 'cross nullable vs not nullable'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b; +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b; SELECT 'cross self'; -ANALYZE SELECT * FROM t1_00826 x cross join t1_00826 y where x.a = y.a and x.b = y.b; +EXPLAIN SYNTAX SELECT * FROM t1_00826 x cross join t1_00826 y where x.a = y.a and x.b = y.b; SELECT 'cross one table expr'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t1_00826.b; +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t1_00826.b; SELECT 'cross multiple ands'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b; +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b; SELECT 'cross and inside and'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b)); +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b)); SELECT 'cross split conjunction'; -ANALYZE SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and t1_00826.a >= 1 and t2_00826.b > 0; +EXPLAIN SYNTAX SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and t1_00826.a >= 1 and t2_00826.b > 0; DROP TABLE t1_00826; DROP TABLE t2_00826; diff --git a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql index 710327ec0e4..5203376befd 100644 --- a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql +++ b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql @@ -1,4 +1,3 @@ -SET enable_debug_queries = 1; SET enable_optimize_predicate_expression = 0; DROP TABLE IF EXISTS t1; @@ -11,25 +10,25 @@ CREATE TABLE t2 (a UInt32, b Nullable(Int32)) ENGINE = Memory; CREATE TABLE t3 (a UInt32, b Nullable(Int32)) ENGINE = Memory; CREATE TABLE t4 (a UInt32, b Nullable(Int32)) ENGINE = Memory; -ANALYZE SELECT t1.a FROM t1, t2; -ANALYZE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a; -ANALYZE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b; -ANALYZE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a; -ANALYZE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a; -ANALYZE SELECT t1.a FROM t1, t2, t3, t4; -ANALYZE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4; +EXPLAIN SYNTAX SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4; -ANALYZE SELECT t1.a FROM t1, t2 CROSS JOIN t3; -ANALYZE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3; -- { serverError 48 } -ANALYZE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3; +EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 CROSS JOIN t3; +EXPLAIN SYNTAX SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3; -- { serverError 48 } +EXPLAIN SYNTAX SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3; INSERT INTO t1 values (1,1), (2,2), (3,3), (4,4); INSERT INTO t2 values (1,1), (1, Null); diff --git a/tests/queries/0_stateless/00908_analyze_query.sql b/tests/queries/0_stateless/00908_analyze_query.sql index ca8e834e108..6ef1cf48386 100644 --- a/tests/queries/0_stateless/00908_analyze_query.sql +++ b/tests/queries/0_stateless/00908_analyze_query.sql @@ -1,8 +1,6 @@ -set enable_debug_queries = 1; - DROP TABLE IF EXISTS a; CREATE TABLE a (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; -ANALYZE SELECT * FROM a; +EXPLAIN SYNTAX SELECT * FROM a; DROP TABLE a; diff --git a/tests/queries/0_stateless/01021_tuple_parser.sql b/tests/queries/0_stateless/01021_tuple_parser.sql index d0c837fae83..4fc7aa1e3ee 100644 --- a/tests/queries/0_stateless/01021_tuple_parser.sql +++ b/tests/queries/0_stateless/01021_tuple_parser.sql @@ -1,8 +1,6 @@ SELECT toTypeName((1,)), (1,); -SET enable_debug_queries = 1; - -ANALYZE SELECT (1,); +EXPLAIN SYNTAX SELECT (1,); DROP TABLE IF EXISTS tuple_values; diff --git a/tests/queries/0_stateless/01029_early_constant_folding.sql b/tests/queries/0_stateless/01029_early_constant_folding.sql index 52af4e4d75c..428c3625295 100644 --- a/tests/queries/0_stateless/01029_early_constant_folding.sql +++ b/tests/queries/0_stateless/01029_early_constant_folding.sql @@ -1,15 +1,13 @@ -SET enable_debug_queries = 1; - -- constant folding -ANALYZE SELECT 1 WHERE 1 = 0; +EXPLAIN SYNTAX SELECT 1 WHERE 1 = 0; -ANALYZE SELECT 1 WHERE 1 IN (0, 1, 2); +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN (0, 1, 2); -ANALYZE SELECT 1 WHERE 1 IN (0, 2) AND 2 = (SELECT 2); +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN (0, 2) AND 2 = (SELECT 2); -- no constant folding -ANALYZE SELECT 1 WHERE 1 IN ((SELECT arrayJoin([1, 2, 3])) AS subquery); +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN ((SELECT arrayJoin([1, 2, 3])) AS subquery); -ANALYZE SELECT 1 WHERE NOT ignore(); +EXPLAIN SYNTAX SELECT 1 WHERE NOT ignore(); diff --git a/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql index 18552a6591d..d59b8fc30ac 100644 --- a/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql +++ b/tests/queries/0_stateless/01056_predicate_optimizer_bugs.sql @@ -1,18 +1,17 @@ -SET enable_debug_queries = 1; SET enable_optimize_predicate_expression = 1; SET joined_subquery_requires_alias = 0; -- https://github.com/ClickHouse/ClickHouse/issues/3885 -- https://github.com/ClickHouse/ClickHouse/issues/5485 -ANALYZE SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0; +EXPLAIN SYNTAX SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0; SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0; -- https://github.com/ClickHouse/ClickHouse/issues/5682 -ANALYZE SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2; +EXPLAIN SYNTAX SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2; SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2; -- https://github.com/ClickHouse/ClickHouse/issues/6734 -ANALYZE SELECT alias AS name FROM ( SELECT name AS alias FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression'; +EXPLAIN SYNTAX SELECT alias AS name FROM ( SELECT name AS alias FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression'; SELECT alias AS name FROM ( SELECT name AS alias FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression'; -- https://github.com/ClickHouse/ClickHouse/issues/6767 @@ -38,7 +37,7 @@ DROP TABLE IF EXISTS t3; DROP TABLE IF EXISTS view1; -- https://github.com/ClickHouse/ClickHouse/issues/7136 -ANALYZE SELECT ccc FROM ( SELECT 1 AS ccc UNION ALL SELECT * FROM ( SELECT 2 AS ccc ) ANY INNER JOIN ( SELECT 2 AS ccc ) USING (ccc) ) WHERE ccc > 1; +EXPLAIN SYNTAX SELECT ccc FROM ( SELECT 1 AS ccc UNION ALL SELECT * FROM ( SELECT 2 AS ccc ) ANY INNER JOIN ( SELECT 2 AS ccc ) USING (ccc) ) WHERE ccc > 1; SELECT ccc FROM ( SELECT 1 AS ccc UNION ALL SELECT * FROM ( SELECT 2 AS ccc ) ANY INNER JOIN ( SELECT 2 AS ccc ) USING (ccc) ) WHERE ccc > 1; -- https://github.com/ClickHouse/ClickHouse/issues/5674 @@ -50,8 +49,8 @@ DROP TABLE IF EXISTS B; CREATE TABLE A (ts DateTime, id String, id_b String) ENGINE = MergeTree PARTITION BY toStartOfHour(ts) ORDER BY (ts,id); CREATE TABLE B (ts DateTime, id String, id_c String) ENGINE = MergeTree PARTITION BY toStartOfHour(ts) ORDER BY (ts,id); -ANALYZE SELECT ts, id, id_b, b.ts, b.id, id_c FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON b.id = a.id_b WHERE a.ts <= toDateTime('1970-01-01 03:00:00'); -ANALYZE SELECT ts AS `--a.ts`, id AS `--a.id`, id_b AS `--a.id_b`, b.ts AS `--b.ts`, b.id AS `--b.id`, id_c AS `--b.id_c` FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON `--b.id` = `--a.id_b` WHERE `--a.ts` <= toDateTime('1970-01-01 03:00:00'); +EXPLAIN SYNTAX SELECT ts, id, id_b, b.ts, b.id, id_c FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON b.id = a.id_b WHERE a.ts <= toDateTime('1970-01-01 03:00:00'); +EXPLAIN SYNTAX SELECT ts AS `--a.ts`, id AS `--a.id`, id_b AS `--a.id_b`, b.ts AS `--b.ts`, b.id AS `--b.id`, id_c AS `--b.id_c` FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON `--b.id` = `--a.id_b` WHERE `--a.ts` <= toDateTime('1970-01-01 03:00:00'); DROP TABLE IF EXISTS A; DROP TABLE IF EXISTS B; @@ -69,14 +68,14 @@ SELECT B, next_B FROM (SELECT A, B, neighbor(B, 1) AS next_B FROM (SELECT * FROM DROP TABLE IF EXISTS test; -ANALYZE SELECT * FROM (SELECT * FROM system.one) WHERE arrayMap(x -> x + 1, [dummy]) = [1]; +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM system.one) WHERE arrayMap(x -> x + 1, [dummy]) = [1]; SELECT * FROM (SELECT * FROM system.one) WHERE arrayMap(x -> x + 1, [dummy]) = [1]; -ANALYZE SELECT * FROM (SELECT 1 AS id, 2 AS value) INNER JOIN (SELECT 1 AS id, 3 AS value_1) USING id WHERE arrayMap(x -> x + value + value_1, [1]) = [6]; +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id, 2 AS value) INNER JOIN (SELECT 1 AS id, 3 AS value_1) USING id WHERE arrayMap(x -> x + value + value_1, [1]) = [6]; SELECT * FROM (SELECT 1 AS id, 2 AS value) INNER JOIN (SELECT 1 AS id, 3 AS value_1) USING id WHERE arrayMap(x -> x + value + value_1, [1]) = [6]; -- check order is preserved -ANALYZE SELECT * FROM system.one HAVING dummy > 0 AND dummy < 0; +EXPLAIN SYNTAX SELECT * FROM system.one HAVING dummy > 0 AND dummy < 0; -- from #10613 SELECT name, count() AS cnt diff --git a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql index fa55a47e768..d652cbeea5d 100644 --- a/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql +++ b/tests/queries/0_stateless/01076_predicate_optimizer_with_view.sql @@ -4,14 +4,13 @@ DROP TABLE IF EXISTS test_view; CREATE TABLE test(date Date, id Int8, name String, value Int64) ENGINE = MergeTree(date, (id, date), 8192); CREATE VIEW test_view AS SELECT * FROM test; -SET enable_debug_queries = 1; SET enable_optimize_predicate_expression = 1; -- Optimize predicate expression with view -ANALYZE SELECT * FROM test_view WHERE id = 1; -ANALYZE SELECT * FROM test_view WHERE id = 2; -ANALYZE SELECT id FROM test_view WHERE id = 1; -ANALYZE SELECT s.id FROM test_view AS s WHERE s.id = 1; +EXPLAIN SYNTAX SELECT * FROM test_view WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM test_view WHERE id = 2; +EXPLAIN SYNTAX SELECT id FROM test_view WHERE id = 1; +EXPLAIN SYNTAX SELECT s.id FROM test_view AS s WHERE s.id = 1; SELECT * FROM (SELECT toUInt64(b), sum(id) AS b FROM test) WHERE `toUInt64(sum(id))` = 3; -- { serverError 47 } diff --git a/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql b/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql index 9e03502c11b..644190cbddf 100644 --- a/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql +++ b/tests/queries/0_stateless/01083_cross_to_inner_with_like.sql @@ -4,12 +4,11 @@ DROP TABLE IF EXISTS r; CREATE TABLE n (k UInt32) ENGINE = Memory; CREATE TABLE r (k UInt32, name String) ENGINE = Memory; -SET enable_debug_queries = 1; SET enable_optimize_predicate_expression = 0; -ANALYZE SELECT * FROM n, r WHERE n.k = r.k AND r.name = 'A'; -ANALYZE SELECT * FROM n, r WHERE n.k = r.k AND r.name LIKE 'A%'; -ANALYZE SELECT * FROM n, r WHERE n.k = r.k AND r.name NOT LIKE 'A%'; +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name = 'A'; +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name LIKE 'A%'; +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name NOT LIKE 'A%'; DROP TABLE n; DROP TABLE r; diff --git a/tests/queries/0_stateless/01259_combinator_distinct.sql b/tests/queries/0_stateless/01259_combinator_distinct.sql index 1b6f887ff70..879c95a61fe 100644 --- a/tests/queries/0_stateless/01259_combinator_distinct.sql +++ b/tests/queries/0_stateless/01259_combinator_distinct.sql @@ -7,9 +7,8 @@ SELECT round(corrStable(x, y), 5) FROM (SELECT DISTINCT number % 10 AS x, number SELECT sum(DISTINCT y) FROM (SELECT number % 5 AS x, number % 15 AS y FROM numbers(1000)) GROUP BY x; -SET enable_debug_queries = 1; SELECT countIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); -ANALYZE SELECT countIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); +EXPLAIN SYNTAX SELECT countIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); SELECT sumIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); -ANALYZE SELECT sumIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); +EXPLAIN SYNTAX SELECT sumIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); diff --git a/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql index df762a0ee6d..ccfa08af7bc 100644 --- a/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql +++ b/tests/queries/0_stateless/01271_optimize_arithmetic_operations_in_aggr_func.sql @@ -1,61 +1,60 @@ -SET enable_debug_queries = 1; SET optimize_arithmetic_operations_in_aggregate_functions = 1; -ANALYZE SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n * 2), sum(2 * n), sum(n / 2), sum(1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n + 1), min(1 + n), min(n - 1), min(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n * 2), min(2 * n), min(n / 2), min(1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n + 1), max(1 + n), max(n - 1), max(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n * 2), max(2 * n), max(n / 2), max(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * 2), sum(2 * n), sum(n / 2), sum(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + 1), min(1 + n), min(n - 1), min(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * 2), min(2 * n), min(n / 2), min(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + 1), max(1 + n), max(n - 1), max(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * 2), max(2 * n), max(n / 2), max(1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n + -1), sum(-1 + n), sum(n - -1), sum(-1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n * -2), sum(-2 * n), sum(n / -2), sum(-1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n + -1), min(-1 + n), min(n - -1), min(-1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n * -2), min(-2 * n), min(n / -2), min(-1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n + -1), max(-1 + n), max(n - -1), max(-1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n * -2), max(-2 * n), max(n / -2), max(-1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n + -1), sum(-1 + n), sum(n - -1), sum(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * -2), sum(-2 * n), sum(n / -2), sum(-1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + -1), min(-1 + n), min(n - -1), min(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * -2), min(-2 * n), min(n / -2), min(-1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + -1), max(-1 + n), max(n - -1), max(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * -2), max(-2 * n), max(n / -2), max(-1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(abs(2) + 1), sum(abs(2) + n), sum(n - abs(2)), sum(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(abs(2) * 2), sum(abs(2) * n), sum(n / abs(2)), sum(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(abs(2) + 1), min(abs(2) + n), min(n - abs(2)), min(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(abs(2) * 2), min(abs(2) * n), min(n / abs(2)), min(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(abs(2) + 1), max(abs(2) + n), max(n - abs(2)), max(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(abs(2) * 2), max(abs(2) * n), max(n / abs(2)), max(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(2) + 1), sum(abs(2) + n), sum(n - abs(2)), sum(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(2) * 2), sum(abs(2) * n), sum(n / abs(2)), sum(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(2) + 1), min(abs(2) + n), min(n - abs(2)), min(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(2) * 2), min(abs(2) * n), min(n / abs(2)), min(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(2) + 1), max(abs(2) + n), max(n - abs(2)), max(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(2) * 2), max(abs(2) * n), max(n / abs(2)), max(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(abs(n) + 1), sum(abs(n) + n), sum(n - abs(n)), sum(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(abs(n) * 2), sum(abs(n) * n), sum(n / abs(n)), sum(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(abs(n) + 1), min(abs(n) + n), min(n - abs(n)), min(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(abs(n) * 2), min(abs(n) * n), min(n / abs(n)), min(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(abs(n) + 1), max(abs(n) + n), max(n - abs(n)), max(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(abs(n) * 2), max(abs(n) * n), max(n / abs(n)), max(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(n) + 1), sum(abs(n) + n), sum(n - abs(n)), sum(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(n) * 2), sum(abs(n) * n), sum(n / abs(n)), sum(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(n) + 1), min(abs(n) + n), min(n - abs(n)), min(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(n) * 2), min(abs(n) * n), min(n / abs(n)), min(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(n) + 1), max(abs(n) + n), max(n - abs(n)), max(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(n) * 2), max(abs(n) * n), max(n / abs(n)), max(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n*n + 1), sum(1 + n*n), sum(n*n - 1), sum(1 - n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n*n * 2), sum(2 * n*n), sum(n*n / 2), sum(1 / n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n*n + 1), min(1 + n*n), min(n*n - 1), min(1 - n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n*n * 2), min(2 * n*n), min(n*n / 2), min(1 / n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n*n + 1), max(1 + n*n), max(n*n - 1), max(1 - n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n*n * 2), max(2 * n*n), max(n*n / 2), max(1 / n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n*n + 1), sum(1 + n*n), sum(n*n - 1), sum(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n*n * 2), sum(2 * n*n), sum(n*n / 2), sum(1 / n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n*n + 1), min(1 + n*n), min(n*n - 1), min(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n*n * 2), min(2 * n*n), min(n*n / 2), min(1 / n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n*n + 1), max(1 + n*n), max(n*n - 1), max(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n*n * 2), max(2 * n*n), max(n*n / 2), max(1 / n*n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(1 + n + 1), sum(1 + 1 + n), sum(1 + n - 1), sum(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(1 + n * 2), sum(1 + 2 * n), sum(1 + n / 2), sum(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(1 + n + 1), min(1 + 1 + n), min(1 + n - 1), min(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(1 + n * 2), min(1 + 2 * n), min(1 + n / 2), min(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(1 + n + 1), max(1 + 1 + n), max(1 + n - 1), max(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(1 + n * 2), max(1 + 2 * n), max(1 + n / 2), max(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(1 + n + 1), sum(1 + 1 + n), sum(1 + n - 1), sum(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(1 + n * 2), sum(1 + 2 * n), sum(1 + n / 2), sum(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(1 + n + 1), min(1 + 1 + n), min(1 + n - 1), min(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(1 + n * 2), min(1 + 2 * n), min(1 + n / 2), min(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(1 + n + 1), max(1 + 1 + n), max(1 + n - 1), max(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(1 + n * 2), max(1 + 2 * n), max(1 + n / 2), max(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n + -1 + -1), sum(-1 + n + -1), sum(n - -1 + -1), sum(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n * -2 * -1), sum(-2 * n * -1), sum(n / -2 / -1), sum(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n + -1 + -1), min(-1 + n + -1), min(n - -1 + -1), min(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n * -2 * -1), min(-2 * n * -1), min(n / -2 / -1), min(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n + -1 + -1), max(-1 + n + -1), max(n - -1 + -1), max(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n * -2 * -1), max(-2 * n * -1), max(n / -2 / -1), max(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n + -1 + -1), sum(-1 + n + -1), sum(n - -1 + -1), sum(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * -2 * -1), sum(-2 * n * -1), sum(n / -2 / -1), sum(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + -1 + -1), min(-1 + n + -1), min(n - -1 + -1), min(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * -2 * -1), min(-2 * n * -1), min(n / -2 / -1), min(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + -1 + -1), max(-1 + n + -1), max(n - -1 + -1), max(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * -2 * -1), max(-2 * n * -1), max(n / -2 / -1), max(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n + 1) + sum(1 + n) + sum(n - 1) + sum(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT sum(n * 2) + sum(2 * n) + sum(n / 2) + sum(1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n + 1) + min(1 + n) + min(n - 1) + min(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT min(n * 2) + min(2 * n) + min(n / 2) + min(1 / n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n + 1) + max(1 + n) + max(n - 1) + max(1 - n) FROM (SELECT number n FROM numbers(10)); -ANALYZE SELECT max(n * 2) + max(2 * n) + max(n / 2) + max(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n + 1) + sum(1 + n) + sum(n - 1) + sum(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * 2) + sum(2 * n) + sum(n / 2) + sum(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + 1) + min(1 + n) + min(n - 1) + min(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * 2) + min(2 * n) + min(n / 2) + min(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + 1) + max(1 + n) + max(n - 1) + max(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * 2) + max(2 * n) + max(n / 2) + max(1 / n) FROM (SELECT number n FROM numbers(10)); SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); diff --git a/tests/queries/0_stateless/01300_group_by_other_keys.sql b/tests/queries/0_stateless/01300_group_by_other_keys.sql index 4dcd9cf1efe..22cff012e71 100644 --- a/tests/queries/0_stateless/01300_group_by_other_keys.sql +++ b/tests/queries/0_stateless/01300_group_by_other_keys.sql @@ -1,5 +1,4 @@ set optimize_group_by_function_keys = 1; -set enable_debug_queries = 1; SELECT round(max(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; @@ -8,11 +7,11 @@ SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (numb SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; -analyze SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; set optimize_group_by_function_keys = 0; @@ -22,9 +21,9 @@ SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (numb SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; -analyze SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; -- TODO - test with similar variables of different tables (collision) diff --git a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql index d97d80b2f12..e17438f749a 100644 --- a/tests/queries/0_stateless/01300_group_by_other_keys_having.sql +++ b/tests/queries/0_stateless/01300_group_by_other_keys_having.sql @@ -1,14 +1,13 @@ set optimize_group_by_function_keys = 1; -set enable_debug_queries = 1; SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -analyze SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; set optimize_group_by_function_keys = 0; @@ -16,6 +15,6 @@ SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (numb SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; -analyze SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; -analyze SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; diff --git a/tests/queries/0_stateless/01305_duplicate_order_by_and_distinct.sql b/tests/queries/0_stateless/01305_duplicate_order_by_and_distinct.sql index a660e5f0b77..3b13b208eb5 100644 --- a/tests/queries/0_stateless/01305_duplicate_order_by_and_distinct.sql +++ b/tests/queries/0_stateless/01305_duplicate_order_by_and_distinct.sql @@ -1,7 +1,6 @@ -set enable_debug_queries = 1; set optimize_duplicate_order_by_and_distinct = 1; -analyze SELECT DISTINCT * +EXPLAIN SYNTAX SELECT DISTINCT * FROM ( SELECT DISTINCT * @@ -31,7 +30,7 @@ ORDER BY number; set optimize_duplicate_order_by_and_distinct = 0; -analyze SELECT DISTINCT * +EXPLAIN SYNTAX SELECT DISTINCT * FROM ( SELECT DISTINCT * @@ -61,7 +60,7 @@ ORDER BY number; set optimize_duplicate_order_by_and_distinct = 1; -analyze SELECT DISTINCT * +EXPLAIN SYNTAX SELECT DISTINCT * FROM ( SELECT DISTINCT * @@ -93,7 +92,7 @@ ORDER BY number; set optimize_duplicate_order_by_and_distinct = 0; -analyze SELECT DISTINCT * +EXPLAIN SYNTAX SELECT DISTINCT * FROM ( SELECT DISTINCT * diff --git a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql index fab1179f502..2937e856bf5 100644 --- a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql +++ b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql @@ -1,5 +1,4 @@ set optimize_aggregators_of_group_by_keys = 1; -set enable_debug_queries = 1; set optimize_move_functions_out_of_any = 0; SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; @@ -8,10 +7,10 @@ SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY num SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); SELECT anyLast(number) FROM numbers(1) GROUP BY number; -analyze SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; -analyze SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; -analyze SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; -analyze SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); +EXPLAIN SYNTAX SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN SYNTAX SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); set optimize_aggregators_of_group_by_keys = 0; @@ -20,7 +19,7 @@ SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GRO SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); -analyze SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; -analyze SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; -analyze SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; -analyze SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); +EXPLAIN SYNTAX SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN SYNTAX SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); diff --git a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql index 7f4b5881104..516c3d26a75 100644 --- a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql +++ b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by.sql @@ -1,4 +1,3 @@ -SET enable_debug_queries = 1; SET optimize_monotonous_functions_in_order_by = 1; SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); @@ -14,19 +13,19 @@ SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; SELECT number FROM numbers(3) ORDER BY -number DESC; SELECT number FROM numbers(3) ORDER BY exp(number) DESC; SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); -analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); -analyze SELECT number FROM numbers(3) ORDER BY -number; -analyze SELECT number FROM numbers(3) ORDER BY exp(number); -analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); -analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY -number DESC; -analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC; -analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY -number; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY exp(number); +EXPLAIN SYNTAX SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +EXPLAIN SYNTAX SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY -number DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +EXPLAIN SYNTAX SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; SET optimize_monotonous_functions_in_order_by = 0; @@ -43,17 +42,17 @@ SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; SELECT number FROM numbers(3) ORDER BY -number DESC; SELECT number FROM numbers(3) ORDER BY exp(number) DESC; SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); -analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); -analyze SELECT number FROM numbers(3) ORDER BY -number; -analyze SELECT number FROM numbers(3) ORDER BY exp(number); -analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); -analyze SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; -analyze SELECT number FROM numbers(3) ORDER BY -number DESC; -analyze SELECT number FROM numbers(3) ORDER BY exp(number) DESC; -analyze SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY -number; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY exp(number); +EXPLAIN SYNTAX SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x, toFloat32(x); +EXPLAIN SYNTAX SELECT number AS x FROM numbers(3) ORDER BY toFloat32(x) as k, toFloat64(k); +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(toFloat64(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY abs(toFloat32(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY toFloat32(abs(number)) DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY -number DESC; +EXPLAIN SYNTAX SELECT number FROM numbers(3) ORDER BY exp(number) DESC; +EXPLAIN SYNTAX SELECT roundToExp2(number) AS x FROM numbers(3) ORDER BY x DESC, toFloat32(x) DESC; -- TODO: exp() should be monotonous function diff --git a/tests/queries/0_stateless/01322_any_input_optimize.sql b/tests/queries/0_stateless/01322_any_input_optimize.sql index c868bad659e..4c3345f4be4 100644 --- a/tests/queries/0_stateless/01322_any_input_optimize.sql +++ b/tests/queries/0_stateless/01322_any_input_optimize.sql @@ -1,32 +1,31 @@ -SET enable_debug_queries = 1; SET optimize_move_functions_out_of_any = 1; -ANALYZE SELECT any(number + number * 2) FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2); SELECT any(number + number * 2) FROM numbers(1, 2); -ANALYZE SELECT anyLast(number + number * 2) FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2); SELECT anyLast(number + number * 2) FROM numbers(1, 2); -ANALYZE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); +EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -ANALYZE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } SET optimize_move_functions_out_of_any = 0; -ANALYZE SELECT any(number + number * 2) FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2); SELECT any(number + number * 2) FROM numbers(1, 2); -ANALYZE SELECT anyLast(number + number * 2) FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2); SELECT anyLast(number + number * 2) FROM numbers(1, 2); -ANALYZE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); +EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); WITH any(number * 3) AS x SELECT x FROM numbers(1, 2); -ANALYZE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); +EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2); SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 } diff --git a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql index 89ee996c835..c810567f73a 100644 --- a/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql +++ b/tests/queries/0_stateless/01323_redundant_functions_in_order_by.sql @@ -3,7 +3,6 @@ DROP TABLE IF EXISTS test; CREATE TABLE test (key UInt64, a UInt8, b String, c Float64) ENGINE = MergeTree() ORDER BY key; INSERT INTO test SELECT number, number, toString(number), number from numbers(4); -set enable_debug_queries = 1; set optimize_redundant_functions_in_order_by = 1; SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); @@ -12,12 +11,12 @@ SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; SELECT key, a FROM test ORDER BY key, a, exp(key + a); SELECT key, a FROM test ORDER BY key, exp(key + a); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); -analyze SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; -analyze SELECT key, a FROM test ORDER BY key, a, exp(key + a); -analyze SELECT key, a FROM test ORDER BY key, exp(key + a); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +EXPLAIN SYNTAX SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, a, exp(key + a); +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, exp(key + a); set optimize_redundant_functions_in_order_by = 0; @@ -27,11 +26,11 @@ SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; SELECT key, a FROM test ORDER BY key, a, exp(key + a); SELECT key, a FROM test ORDER BY key, exp(key + a); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); -analyze SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); -analyze SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; -analyze SELECT key, a FROM test ORDER BY key, a, exp(key + a); -analyze SELECT key, a FROM test ORDER BY key, exp(key + a); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +EXPLAIN SYNTAX SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, a, exp(key + a); +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, exp(key + a); DROP TABLE test; diff --git a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql index e1c38393231..dcb082c650a 100644 --- a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql +++ b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql @@ -1,14 +1,13 @@ set optimize_if_transform_strings_to_enum = 1; -set enable_debug_queries = 1; SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -analyze SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; -analyze SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; set optimize_if_transform_strings_to_enum = 0; SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -analyze SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; -analyze SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/01372_wrong_order_by_removal.sql b/tests/queries/0_stateless/01372_wrong_order_by_removal.sql index 93f3388676b..3ca5b63f310 100644 --- a/tests/queries/0_stateless/01372_wrong_order_by_removal.sql +++ b/tests/queries/0_stateless/01372_wrong_order_by_removal.sql @@ -5,7 +5,5 @@ CREATE TEMPORARY TABLE moving_sum_num `v` UInt64 ); -SET enable_debug_queries = 1; - -- ORDER BY from subquery shall not be removed. -ANALYZE SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k; +EXPLAIN SYNTAX SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k; diff --git a/tests/queries/0_stateless/01390_remove_injective_in_uniq.sql b/tests/queries/0_stateless/01390_remove_injective_in_uniq.sql index ea272262a01..7753a8faea3 100644 --- a/tests/queries/0_stateless/01390_remove_injective_in_uniq.sql +++ b/tests/queries/0_stateless/01390_remove_injective_in_uniq.sql @@ -1,47 +1,46 @@ -set enable_debug_queries = 1; set optimize_injective_functions_inside_uniq = 1; -analyze select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) +EXPLAIN SYNTAX select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) from (select number % 2 as x from numbers(10)); -analyze select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) +EXPLAIN SYNTAX select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) from (select number % 2 as x, number % 3 y from numbers(10)); -analyze select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) +EXPLAIN SYNTAX select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) from (select number % 2 as x from numbers(10)); -analyze select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) +EXPLAIN SYNTAX select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) +EXPLAIN SYNTAX select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) +EXPLAIN SYNTAX select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(concatAssumeInjective('x', 'y')) from numbers(10); +EXPLAIN SYNTAX select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniq(concatAssumeInjective('x', 'y')) from numbers(10); set optimize_injective_functions_inside_uniq = 0; -analyze select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) +EXPLAIN SYNTAX select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) from (select number % 2 as x from numbers(10)); -analyze select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) +EXPLAIN SYNTAX select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) from (select number % 2 as x, number % 3 y from numbers(10)); -analyze select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) +EXPLAIN SYNTAX select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) from (select number % 2 as x from numbers(10)); -analyze select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) +EXPLAIN SYNTAX select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) +EXPLAIN SYNTAX select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) +EXPLAIN SYNTAX select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); -analyze select uniq(concatAssumeInjective('x', 'y')) from numbers(10); +EXPLAIN SYNTAX select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniq(concatAssumeInjective('x', 'y')) from numbers(10); diff --git a/tests/queries/0_stateless/01455_duplicate_distinct_optimization.sql b/tests/queries/0_stateless/01455_duplicate_distinct_optimization.sql index 10937819de2..6fbf80a4dc3 100644 --- a/tests/queries/0_stateless/01455_duplicate_distinct_optimization.sql +++ b/tests/queries/0_stateless/01455_duplicate_distinct_optimization.sql @@ -1,21 +1,20 @@ -SET enable_debug_queries = 1; SET optimize_duplicate_order_by_and_distinct = 1; -ANALYZE SELECT DISTINCT number FROM numbers(1); -ANALYZE SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1)); -ANALYZE SELECT DISTINCT number * 2 FROM (SELECT DISTINCT number * 2, number FROM numbers(1)); -ANALYZE SELECT DISTINCT number FROM (SELECT DISTINCT number * 2 AS number FROM numbers(1)); -ANALYZE SELECT DISTINCT b, a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100)); -ANALYZE SELECT DISTINCT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100)); -ANALYZE SELECT DISTINCT a FROM (SELECT DISTINCT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); -ANALYZE SELECT DISTINCT a FROM (SELECT DISTINCT a, b FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); -ANALYZE SELECT DISTINCT a, b FROM (SELECT DISTINCT b, a FROM (SELECT DISTINCT number a, number b FROM numbers(1))); -ANALYZE SELECT DISTINCT a, b FROM (SELECT b, a, a + b FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); -ANALYZE SELECT DISTINCT a FROM (SELECT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); -ANALYZE SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1)) t1 CROSS JOIN numbers(2) t2; -ANALYZE SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1) t1 CROSS JOIN numbers(2) t2); +EXPLAIN SYNTAX SELECT DISTINCT number FROM numbers(1); +EXPLAIN SYNTAX SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1)); +EXPLAIN SYNTAX SELECT DISTINCT number * 2 FROM (SELECT DISTINCT number * 2, number FROM numbers(1)); +EXPLAIN SYNTAX SELECT DISTINCT number FROM (SELECT DISTINCT number * 2 AS number FROM numbers(1)); +EXPLAIN SYNTAX SELECT DISTINCT b, a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100)); +EXPLAIN SYNTAX SELECT DISTINCT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100)); +EXPLAIN SYNTAX SELECT DISTINCT a FROM (SELECT DISTINCT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); +EXPLAIN SYNTAX SELECT DISTINCT a FROM (SELECT DISTINCT a, b FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); +EXPLAIN SYNTAX SELECT DISTINCT a, b FROM (SELECT DISTINCT b, a FROM (SELECT DISTINCT number a, number b FROM numbers(1))); +EXPLAIN SYNTAX SELECT DISTINCT a, b FROM (SELECT b, a, a + b FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); +EXPLAIN SYNTAX SELECT DISTINCT a FROM (SELECT a FROM (SELECT DISTINCT number % 2 AS a, number % 3 AS b FROM numbers(100))); +EXPLAIN SYNTAX SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1)) t1 CROSS JOIN numbers(2) t2; +EXPLAIN SYNTAX SELECT DISTINCT number FROM (SELECT DISTINCT number FROM numbers(1) t1 CROSS JOIN numbers(2) t2); -ANALYZE SELECT DISTINCT number FROM +EXPLAIN SYNTAX SELECT DISTINCT number FROM ( (SELECT DISTINCT number FROM numbers(1)) UNION ALL From b689d976f8f10dfadf1a4b901da8872be1dcd390 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 2 Nov 2020 15:52:50 +0300 Subject: [PATCH 427/432] Fix style --- src/Core/Field.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/Field.h b/src/Core/Field.h index be01352c168..46d7713e1e4 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -26,7 +26,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int LOGICAL_ERROR; extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int LOGICAL_ERROR; } template From 2eb76f28b904a2bd999ba5a91e05b455c030c362 Mon Sep 17 00:00:00 2001 From: alesapin Date: Mon, 2 Nov 2020 16:26:55 +0300 Subject: [PATCH 428/432] Fix time in test with TTL --- .../0_stateless/01493_alter_remove_properties_zookeeper.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql index c095c4216fd..b810e2a8737 100644 --- a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql +++ b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql @@ -28,7 +28,7 @@ TTL column_comment + INTERVAL 2 MONTH; SHOW CREATE TABLE r_prop_table1; SHOW CREATE TABLE r_prop_table2; -INSERT INTO r_prop_table1 (column_codec, column_comment, column_ttl) VALUES ('str', toDate('2020-10-01'), 1); +INSERT INTO r_prop_table1 (column_codec, column_comment, column_ttl) VALUES ('str', toDate('2100-01-01'), 1); SYSTEM SYNC REPLICA r_prop_table2; From 68fc76a751cfef8ae6a64465a59fa1d9247acd60 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 2 Nov 2020 19:20:27 +0300 Subject: [PATCH 429/432] Update 01455_opentelemetry_distributed.sh --- tests/queries/0_stateless/01455_opentelemetry_distributed.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index fe52b2b2032..08c8b1ce808 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -105,7 +105,7 @@ ${CLICKHOUSE_CLIENT} -q "system flush logs" ${CLICKHOUSE_CLIENT} -q " with count(*) as c -- expect 200 * 0.1 = 20 sampled events on average - select if(c > 5 and c < 35, 'OK', 'fail: ' || toString(c)) + select if(c > 1 and c < 50, 'OK', 'fail: ' || toString(c)) from system.opentelemetry_span_log array join attribute.names as name, attribute.values as value where name = 'clickhouse.query_id' From baa8958ad41ef300d9fb3614e50adb68befa0cae Mon Sep 17 00:00:00 2001 From: sevirov <72220289+sevirov@users.noreply.github.com> Date: Mon, 2 Nov 2020 19:32:43 +0300 Subject: [PATCH 430/432] DOCSUP-1830: Describe the system.parts_columns table (#16531) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update order-by.md Задокументировал параметры OFFSET и FETCH. * Update prewhere.md Поправляю битые ссылки. * Update prewhere.md Вернул изменения назад. * Update order-by.md Пытаюсь исправить битые ссылки. * Create parts_columns.md Сделал описание системной таблицы parts_columns. * Delete changes order-by.md Удалил изменения файла, не относящегося к этой ветке. * Update parts_columns.md Исправил линки. * Edit and translate russian parts_columns.md Выполнил перевел на русский язык. Co-authored-by: Dmitriy --- .../operations/system-tables/parts_columns.md | 148 ++++++++++++++++++ .../operations/system-tables/parts_columns.md | 148 ++++++++++++++++++ 2 files changed, 296 insertions(+) create mode 100644 docs/en/operations/system-tables/parts_columns.md create mode 100644 docs/ru/operations/system-tables/parts_columns.md diff --git a/docs/en/operations/system-tables/parts_columns.md b/docs/en/operations/system-tables/parts_columns.md new file mode 100644 index 00000000000..5c3dd7155f7 --- /dev/null +++ b/docs/en/operations/system-tables/parts_columns.md @@ -0,0 +1,148 @@ +# system.parts_columns {#system_tables-parts_columns} + +Contains information about parts and columns of [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) tables. + +Each row describes one data part. + +Columns: + +- `partition` ([String](../../sql-reference/data-types/string.md)) — The partition name. To learn what a partition is, see the description of the [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter) query. + + Formats: + + - `YYYYMM` for automatic partitioning by month. + - `any_string` when partitioning manually. + +- `name` ([String](../../sql-reference/data-types/string.md)) — Name of the data part. + +- `part_type` ([String](../../sql-reference/data-types/string.md)) — The data part storing format. + + Possible values: + + - `Wide` — Each column is stored in a separate file in a filesystem. + - `Compact` — All columns are stored in one file in a filesystem. + + Data storing format is controlled by the `min_bytes_for_wide_part` and `min_rows_for_wide_part` settings of the [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table. + +- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the data part is active. If a data part is active, it’s used in a table. Otherwise, it’s deleted. Inactive data parts remain after merging. + +- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of marks. To get the approximate number of rows in a data part, multiply `marks` by the index granularity (usually 8192) (this hint doesn’t work for adaptive granularity). + +- `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of rows. + +- `bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of all the data part files in bytes. + +- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of compressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of uncompressed data in the data part. All the auxiliary files (for example, files with marks) are not included. + +- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of the file with marks. + +- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — The time the directory with the data part was modified. This usually corresponds to the time of data part creation. + +- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — The time when the data part became inactive. + +- `refcount` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of places where the data part is used. A value greater than 2 indicates that the data part is used in queries or merges. + +- `min_date` ([Date](../../sql-reference/data-types/date.md)) — The minimum value of the date key in the data part. + +- `max_date` ([Date](../../sql-reference/data-types/date.md)) — The maximum value of the date key in the data part. + +- `partition_id` ([String](../../sql-reference/data-types/string.md)) — ID of the partition. + +- `min_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The minimum number of data parts that make up the current part after merging. + +- `max_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The maximum number of data parts that make up the current part after merging. + +- `level` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Depth of the merge tree. Zero means that the current part was created by insert rather than by merging other parts. + +- `data_version` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number that is used to determine which mutations should be applied to the data part (mutations with a version higher than `data_version`). + +- `primary_key_bytes_in_memory` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The amount of memory (in bytes) used by primary key values. + +- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The amount of memory (in bytes) reserved for primary key values. + +- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database. + +- `table` ([String](../../sql-reference/data-types/string.md)) — Name of the table. + +- `engine` ([String](../../sql-reference/data-types/string.md)) — Name of the table engine without parameters. + +- `disk_name` ([String](../../sql-reference/data-types/string.md)) — Name of a disk that stores the data part. + +- `path` ([String](../../sql-reference/data-types/string.md)) — Absolute path to the folder with data part files. + +- `column` ([String](../../sql-reference/data-types/string.md)) — Name of the column. + +- `type` ([String](../../sql-reference/data-types/string.md)) — Column type. + +- `column_position` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Ordinal position of a column in a table starting with 1. + +- `default_kind` ([String](../../sql-reference/data-types/string.md)) — Expression type (`DEFAULT`, `MATERIALIZED`, `ALIAS`) for the default value, or an empty string if it is not defined. + +- `default_expression` ([String](../../sql-reference/data-types/string.md)) — Expression for the default value, or an empty string if it is not defined. + +- `column_bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of the column in bytes. + +- `column_data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of compressed data in the column, in bytes. + +- `column_data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Total size of the decompressed data in the column, in bytes. + +- `column_marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of the column with marks, in bytes. + +- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Alias for `bytes_on_disk`. + +- `marks_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Alias for `marks_bytes`. + +**Example** + +``` sql +SELECT * FROM system.parts_columns LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +partition: tuple() +name: all_1_2_1 +part_type: Wide +active: 1 +marks: 2 +rows: 2 +bytes_on_disk: 155 +data_compressed_bytes: 56 +data_uncompressed_bytes: 4 +marks_bytes: 96 +modification_time: 2020-09-23 10:13:36 +remove_time: 2106-02-07 06:28:15 +refcount: 1 +min_date: 1970-01-01 +max_date: 1970-01-01 +partition_id: all +min_block_number: 1 +max_block_number: 2 +level: 1 +data_version: 1 +primary_key_bytes_in_memory: 2 +primary_key_bytes_in_memory_allocated: 64 +database: default +table: 53r93yleapyears +engine: MergeTree +disk_name: default +path: /var/lib/clickhouse/data/default/53r93yleapyears/all_1_2_1/ +column: id +type: Int8 +column_position: 1 +default_kind: +default_expression: +column_bytes_on_disk: 76 +column_data_compressed_bytes: 28 +column_data_uncompressed_bytes: 2 +column_marks_bytes: 48 +``` + +**See Also** + +- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md) + +[Original article](https://clickhouse.tech/docs/en/operations/system_tables/parts_columns) diff --git a/docs/ru/operations/system-tables/parts_columns.md b/docs/ru/operations/system-tables/parts_columns.md new file mode 100644 index 00000000000..db4d453e8f1 --- /dev/null +++ b/docs/ru/operations/system-tables/parts_columns.md @@ -0,0 +1,148 @@ +# system.parts_columns {#system_tables-parts_columns} + +Содержит информацию о кусках данных и столбцах таблиц семейства [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). + +Каждая строка описывает один кусок данных. + +Столбцы: + +- `partition` ([String](../../sql-reference/data-types/string.md)) — имя партиции. Что такое партиция вы можете узнать из описания запроса [ALTER](../../sql-reference/statements/alter/index.md#query_language_queries_alter). + + Форматы: + + - `YYYYMM` для автоматической схемы партиционирования по месяцам. + - `any_string` при партиционировании вручную. + +- `name` ([String](../../sql-reference/data-types/string.md)) — имя куска данных. + +- `part_type` ([String](../../sql-reference/data-types/string.md)) — формат хранения данных. + + Возможные значения: + + - `Wide` — каждая колонка хранится в отдельном файле. + - `Compact` — все колонки хранятся в одном файле. + + Формат хранения данных определяется настройками `min_bytes_for_wide_part` и `min_rows_for_wide_part` таблицы [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md). + +- `active` ([UInt8](../../sql-reference/data-types/int-uint.md)) — признак активности. Если кусок данных активен, то он используется таблицей, в противном случае он будет удален. Неактивные куски остаются после слияний. + +- `marks` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество засечек. Чтобы получить примерное количество строк в куске данных, умножьте `marks` на гранулированность индекса (обычно 8192). + +- `rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество строк. + +- `bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер всех файлов кусков данных в байтах. + +- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер сжатой информации в куске данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается. + +- `data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер распакованной информации в куске данных. Размер всех дополнительных файлов (например, файлов с засечками) не учитывается. + +- `marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер файла с засечками. + +- `modification_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время модификации директории с куском данных. Обычно соответствует времени создания куска. + +- `remove_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время, когда кусок данных стал неактивным. + +- `refcount` ([UInt32](../../sql-reference/data-types/int-uint.md)) — количество мест, в котором кусок данных используется. Значение больше 2 говорит о том, что кусок участвует в запросах или в слияниях. + +- `min_date` ([Date](../../sql-reference/data-types/date.md)) — минимальное значение ключа даты в куске данных. + +- `max_date` ([Date](../../sql-reference/data-types/date.md)) — максимальное значение ключа даты в куске данных. + +- `partition_id` ([String](../../sql-reference/data-types/string.md)) — ID партиции. + +- `min_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) — минимальное число кусков данных, из которых состоит текущий после слияния. + +- `max_block_number` ([UInt64](../../sql-reference/data-types/int-uint.md)) — максимальное число кусков данных, из которых состоит текущий после слияния. + +- `level` ([UInt32](../../sql-reference/data-types/int-uint.md)) — глубина дерева слияний. Если слияний не было, то `level=0`. + +- `data_version` ([UInt64](../../sql-reference/data-types/int-uint.md)) — число, которое используется для определения того, какие мутации необходимо применить к куску данных (мутации с версией большей, чем `data_version`). + +- `primary_key_bytes_in_memory` ([UInt64](../../sql-reference/data-types/int-uint.md)) — объём памяти в байтах, занимаемой значениями первичных ключей. + +- `primary_key_bytes_in_memory_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md)) — объём памяти в байтах, выделенный для размещения первичных ключей. + +- `database` ([String](../../sql-reference/data-types/string.md)) — имя базы данных. + +- `table` ([String](../../sql-reference/data-types/string.md)) — имя таблицы. + +- `engine` ([String](../../sql-reference/data-types/string.md)) — имя движка таблицы, без параметров. + +- `disk_name` ([String](../../sql-reference/data-types/string.md)) — имя диска, на котором находится кусок данных. + +- `path` ([String](../../sql-reference/data-types/string.md)) — абсолютный путь к папке с файлами кусков данных. + +- `column` ([String](../../sql-reference/data-types/string.md)) — имя столбца. + +- `type` ([String](../../sql-reference/data-types/string.md)) — тип столбца. + +- `column_position` ([UInt64](../../sql-reference/data-types/int-uint.md)) — порядковый номер столбца (нумерация начинается с 1). + +- `default_kind` ([String](../../sql-reference/data-types/string.md)) — тип выражения (`DEFAULT`, `MATERIALIZED`, `ALIAS`) для значения по умолчанию или пустая строка. + +- `default_expression` ([String](../../sql-reference/data-types/string.md)) — выражение для значения по умолчанию или пустая строка. + +- `column_bytes_on_disk` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер столбца в байтах. + +- `column_data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер сжатой информации в столбце в байтах. + +- `column_data_uncompressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — общий размер распакованной информации в столбце в байтах. + +- `column_marks_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — размер столбца с засечками в байтах. + +- `bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — алиас для `bytes_on_disk`. + +- `marks_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — алиас для `marks_bytes`. + +**Пример** + +``` sql +SELECT * FROM system.parts_columns LIMIT 1 FORMAT Vertical; +``` + +``` text +Row 1: +────── +partition: tuple() +name: all_1_2_1 +part_type: Wide +active: 1 +marks: 2 +rows: 2 +bytes_on_disk: 155 +data_compressed_bytes: 56 +data_uncompressed_bytes: 4 +marks_bytes: 96 +modification_time: 2020-09-23 10:13:36 +remove_time: 2106-02-07 06:28:15 +refcount: 1 +min_date: 1970-01-01 +max_date: 1970-01-01 +partition_id: all +min_block_number: 1 +max_block_number: 2 +level: 1 +data_version: 1 +primary_key_bytes_in_memory: 2 +primary_key_bytes_in_memory_allocated: 64 +database: default +table: 53r93yleapyears +engine: MergeTree +disk_name: default +path: /var/lib/clickhouse/data/default/53r93yleapyears/all_1_2_1/ +column: id +type: Int8 +column_position: 1 +default_kind: +default_expression: +column_bytes_on_disk: 76 +column_data_compressed_bytes: 28 +column_data_uncompressed_bytes: 2 +column_marks_bytes: 48 +``` + +**Смотрите также** + +- [Движок MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) + +[Оригинальная статья](https://clickhouse.tech/docs/en/operations/system_tables/parts_columns) From 6db9ee430065e0674d12f49d4c126f95d3bdacb0 Mon Sep 17 00:00:00 2001 From: damozhaeva <68770561+damozhaeva@users.noreply.github.com> Date: Mon, 2 Nov 2020 19:54:23 +0300 Subject: [PATCH 431/432] DOCSUP-3172: Edit and translate to Russian (#16153) * Edit and traslate to Russian. * Update docs/ru/engines/table-engines/integrations/kafka.md Co-authored-by: BayoNet * Update docs/ru/interfaces/http.md Co-authored-by: BayoNet * Update docs/ru/interfaces/http.md Co-authored-by: BayoNet * Ticket. * http. * kafka ru Co-authored-by: Daria Mozhaeva Co-authored-by: BayoNet --- .../mergetree-family/mergetree.md | 2 +- docs/en/interfaces/http.md | 4 +- .../operations/utilities/clickhouse-local.md | 2 +- .../sql-reference/statements/create/view.md | 2 +- .../table-engines/integrations/kafka.md | 16 +++++++ .../mergetree-family/mergetree.md | 1 + docs/ru/interfaces/http.md | 5 ++- .../operations/utilities/clickhouse-local.md | 42 +++++++++++++++---- .../sql-reference/statements/create/view.md | 2 +- 9 files changed, 62 insertions(+), 14 deletions(-) diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index c96e31a6eed..d48c05326c1 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -88,7 +88,7 @@ For a description of parameters, see the [CREATE query description](../../../sql - `index_granularity` — Maximum number of data rows between the marks of an index. Default value: 8192. See [Data Storage](#mergetree-data-storage). - `index_granularity_bytes` — Maximum size of data granules in bytes. Default value: 10Mb. To restrict the granule size only by number of rows, set to 0 (not recommended). See [Data Storage](#mergetree-data-storage). - - `min_index_granularity_bytes` — Min allowed size of data granules in bytes. Default value: 1024b. To provide safeguard against accidentally creating tables with very low index_granularity_bytes. See [Data Storage](#mergetree-data-storage). + - `min_index_granularity_bytes` — Min allowed size of data granules in bytes. Default value: 1024b. To provide a safeguard against accidentally creating tables with very low index_granularity_bytes. See [Data Storage](#mergetree-data-storage). - `enable_mixed_granularity_parts` — Enables or disables transitioning to control the granule size with the `index_granularity_bytes` setting. Before version 19.11, there was only the `index_granularity` setting for restricting granule size. The `index_granularity_bytes` setting improves ClickHouse performance when selecting data from tables with big rows (tens and hundreds of megabytes). If you have tables with big rows, you can enable this setting for the tables to improve the efficiency of `SELECT` queries. - `use_minimalistic_part_header_in_zookeeper` — Storage method of the data parts headers in ZooKeeper. If `use_minimalistic_part_header_in_zookeeper=1`, then ZooKeeper stores less data. For more information, see the [setting description](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) in “Server configuration parameters”. - `min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes. diff --git a/docs/en/interfaces/http.md b/docs/en/interfaces/http.md index 4840737fd48..310286e3d44 100644 --- a/docs/en/interfaces/http.md +++ b/docs/en/interfaces/http.md @@ -79,7 +79,7 @@ By default, data is returned in TabSeparated format (for more information, see t You use the FORMAT clause of the query to request any other format. -Also, you can use the ‘default_format’ URL parameter or ‘X-ClickHouse-Format’ header to specify a default format other than TabSeparated. +Also, you can use the ‘default_format’ URL parameter or the ‘X-ClickHouse-Format’ header to specify a default format other than TabSeparated. ``` bash $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- @@ -170,7 +170,7 @@ $ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gz !!! note "Note" Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly. -You can use the ‘database’ URL parameter or ‘X-ClickHouse-Database’ header to specify the default database. +You can use the ‘database’ URL parameter or the ‘X-ClickHouse-Database’ header to specify the default database. ``` bash $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- diff --git a/docs/en/operations/utilities/clickhouse-local.md b/docs/en/operations/utilities/clickhouse-local.md index 132f50fa24e..af3d06898fd 100644 --- a/docs/en/operations/utilities/clickhouse-local.md +++ b/docs/en/operations/utilities/clickhouse-local.md @@ -16,7 +16,7 @@ By default `clickhouse-local` does not have access to data on the same host, but !!! warning "Warning" It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. -For temporary data an unique temporary data directory is created by default. If you want to override this behavior the data directory can be explicitly specified with the `-- --path` option. +For temporary data, a unique temporary data directory is created by default. If you want to override this behavior, the data directory can be explicitly specified with the `-- --path` option. ## Usage {#usage} diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 17a6c26c084..4370735b8d9 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -15,7 +15,7 @@ Syntax: CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ... ``` -Normal views don’t store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. +Normal views don’t store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. As an example, assume you’ve created a view: diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index bcca349f743..940fee2452b 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -159,6 +159,22 @@ Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format В документе [librdkafka configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) можно увидеть список возможных опций конфигурации. Используйте подчеркивание (`_`) вместо точки в конфигурации ClickHouse. Например, `check.crcs=true` будет соответствовать `true`. +### Поддержка Kerberos {#kafka-kerberos-support} + +Чтобы начать работу с Kafka с поддержкой Kerberos, добавьте дочерний элемент `security_protocol` со значением `sasl_plaintext`. Этого будет достаточно, если получен тикет на получение тикета (ticket-granting ticket) Kerberos и он кэшируется средствами ОС. +ClickHouse может поддерживать учетные данные Kerberos с помощью файла keytab. Рассмотрим дочерние элементы `sasl_kerberos_service_name`, `sasl_kerberos_keytab`, `sasl_kerberos_principal` и `sasl.kerberos.kinit.cmd`. + +Пример: + +``` xml + + + SASL_PLAINTEXT + /home/kafkauser/kafkauser.keytab + kafkauser/kafkahost@EXAMPLE.COM + +``` + ## Виртуальные столбцы {#virtualnye-stolbtsy} - `_topic` — топик Kafka. diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index ef1e236a95c..bb076d480cd 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -79,6 +79,7 @@ ORDER BY expr - `index_granularity` — максимальное количество строк данных между засечками индекса. По умолчанию — 8192. Смотрите [Хранение данных](#mergetree-data-storage). - `index_granularity_bytes` — максимальный размер гранул данных в байтах. По умолчанию — 10Mb. Чтобы ограничить размер гранул только количеством строк, установите значение 0 (не рекомендовано). Смотрите [Хранение данных](#mergetree-data-storage). + - `min_index_granularity_bytes` — минимально допустимый размер гранул данных в байтах. Значение по умолчанию — 1024b. Для обеспечения защиты от случайного создания таблиц с очень низким значением `index_granularity_bytes`. Смотрите [Хранение данных](#mergetree-data-storage). - `enable_mixed_granularity_parts` — включает или выключает переход к ограничению размера гранул с помощью настройки `index_granularity_bytes`. Настройка `index_granularity_bytes` улучшает производительность ClickHouse при выборке данных из таблиц с большими (десятки и сотни мегабайтов) строками. Если у вас есть таблицы с большими строками, можно включить эту настройку, чтобы повысить эффективность запросов `SELECT`. - `use_minimalistic_part_header_in_zookeeper` — Способ хранения заголовков кусков данных в ZooKeeper. Если `use_minimalistic_part_header_in_zookeeper = 1`, то ZooKeeper хранит меньше данных. Подробнее читайте в [описании настройки](../../../operations/server-configuration-parameters/settings.md#server-settings-use_minimalistic_part_header_in_zookeeper) в разделе "Конфигурационные параметры сервера". - `min_merge_bytes_to_use_direct_io` — минимальный объём данных при слиянии, необходимый для прямого (небуферизованного) чтения/записи (direct I/O) на диск. При слиянии частей данных ClickHouse вычисляет общий объём хранения всех данных, подлежащих слиянию. Если общий объём хранения всех данных для чтения превышает `min_bytes_to_use_direct_io` байт, тогда ClickHouse использует флаг `O_DIRECT` при чтении данных с диска. Если `min_merge_bytes_to_use_direct_io = 0`, тогда прямой ввод-вывод отключен. Значение по умолчанию: `10 * 1024 * 1024 * 1024` байтов. diff --git a/docs/ru/interfaces/http.md b/docs/ru/interfaces/http.md index 279f2916c78..096fb6c05bc 100644 --- a/docs/ru/interfaces/http.md +++ b/docs/ru/interfaces/http.md @@ -76,8 +76,11 @@ ECT 1 ``` По умолчанию, данные возвращаются в формате TabSeparated (подробнее смотри раздел «Форматы»). + Можно попросить любой другой формат - с помощью секции FORMAT запроса. +Кроме того, вы можете использовать параметр URL-адреса `default_format` или заголовок `X-ClickHouse-Format`, чтобы указать формат по умолчанию, отличный от `TabSeparated`. + ``` bash $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @- ┏━━━┓ @@ -168,7 +171,7 @@ $ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gz !!! note "Примечание" Некоторые HTTP-клиенты могут по умолчанию распаковывать данные (`gzip` и `deflate`) с сервера в фоновом режиме и вы можете получить распакованные данные, даже если правильно используете настройки сжатия. -В параметре URL database может быть указана БД по умолчанию. +Вы можете использовать параметр URL `database` или заголовок `X-ClickHouse-Database`, чтобы указать БД по умолчанию. ``` bash $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @- diff --git a/docs/ru/operations/utilities/clickhouse-local.md b/docs/ru/operations/utilities/clickhouse-local.md index 962b63e2b70..2b5c9b119e2 100644 --- a/docs/ru/operations/utilities/clickhouse-local.md +++ b/docs/ru/operations/utilities/clickhouse-local.md @@ -14,6 +14,8 @@ toc_title: clickhouse-local !!! warning "Warning" Мы не рекомендуем подключать серверную конфигурацию к `clickhouse-local`, поскольку данные можно легко повредить неосторожными действиями. +Для временных данных по умолчанию создается специальный каталог. Если вы хотите обойти это действие, каталог данных можно указать с помощью опции `-- --path`. + ## Вызов программы {#vyzov-programmy} Основной формат вызова: @@ -39,25 +41,51 @@ $ clickhouse-local --structure "table_structure" --input-format "format_of_incom ## Примеры вызова {#primery-vyzova} ``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table" +$ echo -e "1,2\n3,4" | clickhouse-local --structure "a Int64, b Int64" \ + --input-format "CSV" --query "SELECT * FROM table" Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec. -1 2 -3 4 +1 2 +3 4 ``` Вызов выше эквивалентен следующему: ``` bash -$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table" +$ echo -e "1,2\n3,4" | clickhouse-local --query " + CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); + SELECT a, b FROM table; + DROP TABLE table" Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec. -1 2 -3 4 +1 2 +3 4 +``` + + +Необязательно использовать ключи `stdin` или `--file`. Вы можете открывать любое количество файлов с помощью [табличной функции `file`](../../sql-reference/table-functions/file.md): + +``` bash +$ echo 1 | tee 1.tsv +1 + +$ echo 2 | tee 2.tsv +2 + +$ clickhouse-local --query " + select * from file('1.tsv', TSV, 'a int') t1 + cross join file('2.tsv', TSV, 'b int') t2" +1 2 ``` А теперь давайте выведем на экран объём оперативной памяти, занимаемой пользователями (Unix): ``` bash -$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' \ + | clickhouse-local --structure "user String, mem Float64" \ + --query "SELECT user, round(sum(mem), 2) as memTotal + FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty" +``` + +``` text Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec. ┏━━━━━━━━━━┳━━━━━━━━━━┓ ┃ user ┃ memTotal ┃ diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index 891e33bc9b3..09026874948 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -13,7 +13,7 @@ toc_title: "\u041f\u0440\u0435\u0434\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] AS SELECT ... ``` -Normal views don’t store any data, they just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. +Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление - это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md). Для примера, пусть вы создали представление: From 70982fdc540ae0be874811c9c0b88ed36c297286 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 2 Nov 2020 20:45:51 +0300 Subject: [PATCH 432/432] Update version_date.tsv after release 20.3.21.2 --- utils/list-versions/version_date.tsv | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 37e3e412a63..a6ca642b985 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -28,6 +28,7 @@ v20.4.5.36-stable 2020-06-10 v20.4.4.18-stable 2020-05-26 v20.4.3.16-stable 2020-05-23 v20.4.2.9-stable 2020-05-12 +v20.3.21.2-lts 2020-11-02 v20.3.20.6-lts 2020-10-09 v20.3.19.4-lts 2020-09-18 v20.3.18.10-lts 2020-09-08