Merge pull request #45961 from ClickHouse/vdimir/sparkbar-fix

This commit is contained in:
Vladimir C 2023-02-06 11:52:15 +01:00 committed by GitHub
commit e3712bb2c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 229 additions and 214 deletions

View File

@ -1,5 +1,5 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sparkbar
slug: /en/sql-reference/aggregate-functions/reference/sparkbar
sidebar_position: 311
sidebar_label: sparkbar
---
@ -7,9 +7,11 @@ sidebar_label: sparkbar
# sparkbar
The function plots a frequency histogram for values `x` and the repetition rate `y` of these values over the interval `[min_x, max_x]`.
Repetitions for all `x` falling into the same bucket are averaged, so data should be pre-aggregated.
Negative repetitions are ignored.
If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end.
If no interval is specified, then the minimum `x` is used as the interval start, and the maximum `x` — as the interval end.
Otherwise, values outside the interval are ignored.
**Syntax**
@ -37,29 +39,24 @@ sparkbar(width[, min_x, max_x])(x, y)
Query:
``` sql
CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192;
INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11');
CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date;
SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data;
INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11');
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data;
SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date);
SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date);
```
Result:
``` text
┌─sparkbar(9)(event_date, cnt)─┐
│ │
│ ▁▅▄▃██▅ ▁ │
│ │
│ ▂▅▂▃▆█ ▂ │
└──────────────────────────────┘
┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐
│ │
│▁▄▄▂▅▇█▁ │
│ │
│ ▂▅▂▃▇▆█ │
└──────────────────────────────────────────────────────────────────────────┘
```

View File

@ -1,14 +1,15 @@
---
slug: /ru/sql-reference/aggregate-functions/reference/sparkbar
slug: /ru/sql-reference/aggregate-functions/reference/sparkbar
sidebar_position: 311
sidebar_label: sparkbar
---
# sparkbar {#sparkbar}
Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`.
Функция строит гистограмму частот по заданным значениям `x` и частоте повторения этих значений `y` на интервале `[min_x, max_x]`. Повторения для всех `x`, попавших в один бакет, усредняются, поэтому данные должны быть предварительно агрегированы. Отрицательные повторения игнорируются.
Если интервал для построения не указан, то в качестве нижней границы интервала будет взято минимальное значение `x`, а в качестве верхней границы — максимальное значение `x`.
Значения `x` вне указанного интервала игнорируются.
**Синтаксис**
@ -39,29 +40,23 @@ sparkbar(width[, min_x, max_x])(x, y)
Запрос:
``` sql
CREATE TABLE spark_bar_data (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192;
INSERT INTO spark_bar_data VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11');
CREATE TABLE spark_bar_data (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date;
SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_data;
INSERT INTO spark_bar_data VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11');
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_data;
SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date);
SELECT sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_data GROUP BY event_date);
```
Результат:
``` text
┌─sparkbar(9)(event_date, cnt)─┐
│ │
│ ▁▅▄▃██▅ ▁ │
│ │
│ ▂▅▂▃▆█ ▂ │
└──────────────────────────────┘
┌─sparkbar(9, toDate('2020-01-01'), toDate('2020-01-10'))(event_date, cnt)─┐
│ │
│▁▄▄▂▅▇█▁ │
│ │
│ ▂▅▂▃▇▆█ │
└──────────────────────────────────────────────────────────────────────────┘
```

View File

@ -50,11 +50,13 @@ AggregateFunctionPtr createAggregateFunctionSparkbar(const std::string & name, c
assertBinary(name, arguments);
if (params.size() != 1 && params.size() != 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "The number of params does not match for aggregate function {}", name);
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"The number of params does not match for aggregate function '{}', expected 1 or 3, got {}", name, params.size());
if (params.size() == 3)
{
if (params.at(1).getType() != arguments[0]->getDefault().getType() || params.at(2).getType() != arguments[0]->getDefault().getType())
if (params.at(1).getType() != arguments[0]->getDefault().getType() ||
params.at(2).getType() != arguments[0]->getDefault().getType())
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"The second and third parameters are not the same type as the first arguments for aggregate function {}", name);
@ -63,7 +65,6 @@ AggregateFunctionPtr createAggregateFunctionSparkbar(const std::string & name, c
return createAggregateFunctionSparkbarImpl(name, *arguments[0], *arguments[1], arguments, params);
}
}
void registerAggregateFunctionSparkbar(AggregateFunctionFactory & factory)

View File

@ -18,10 +18,15 @@
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
template<typename X, typename Y>
struct AggregateFunctionSparkbarData
{
/// TODO: calculate histogram instead of storing all points
using Points = HashMap<X, Y>;
Points points;
@ -31,20 +36,26 @@ struct AggregateFunctionSparkbarData
Y min_y = std::numeric_limits<Y>::max();
Y max_y = std::numeric_limits<Y>::lowest();
void insert(const X & x, const Y & y)
Y insert(const X & x, const Y & y)
{
auto result = points.insert({x, y});
if (!result.second)
result.first->getMapped() += y;
if (isNaN(y) || y <= 0)
return 0;
auto [it, inserted] = points.insert({x, y});
if (!inserted)
it->getMapped() += y;
return it->getMapped();
}
void add(X x, Y y)
{
insert(x, y);
auto new_y = insert(x, y);
min_x = std::min(x, min_x);
max_x = std::max(x, max_x);
min_y = std::min(y, min_y);
max_y = std::max(y, max_y);
max_y = std::max(new_y, max_y);
}
void merge(const AggregateFunctionSparkbarData & other)
@ -53,10 +64,14 @@ struct AggregateFunctionSparkbarData
return;
for (auto & point : other.points)
insert(point.getKey(), point.getMapped());
{
auto new_y = insert(point.getKey(), point.getMapped());
max_y = std::max(new_y, max_y);
}
min_x = std::min(other.min_x, min_x);
max_x = std::max(other.max_x, max_x);
min_y = std::min(other.min_y, min_y);
max_y = std::max(other.max_y, max_y);
}
@ -85,7 +100,6 @@ struct AggregateFunctionSparkbarData
size_t size;
readVarUInt(size, buf);
/// TODO Protection against huge size
X x;
Y y;
for (size_t i = 0; i < size; ++i)
@ -95,7 +109,6 @@ struct AggregateFunctionSparkbarData
insert(x, y);
}
}
};
template<typename X, typename Y>
@ -104,16 +117,17 @@ class AggregateFunctionSparkbar final
{
private:
size_t width;
X min_x;
X max_x;
bool specified_min_max_x;
const size_t width = 0;
template <class T>
size_t updateFrame(ColumnString::Chars & frame, const T value) const
/// Range for x specified in parameters.
const bool is_specified_range_x = false;
const X begin_x = std::numeric_limits<X>::min();
const X end_x = std::numeric_limits<X>::max();
size_t updateFrame(ColumnString::Chars & frame, Y value) const
{
static constexpr std::array<std::string_view, 9> bars{" ", "", "", "", "", "", "", "", ""};
const auto & bar = (isNaN(value) || value > 8 || value < 1) ? bars[0] : bars[static_cast<UInt8>(value)];
const auto & bar = (isNaN(value) || value < 1 || 8 < value) ? bars[0] : bars[static_cast<UInt8>(value)];
frame.insert(bar.begin(), bar.end());
return bar.size();
}
@ -122,161 +136,108 @@ private:
* The minimum value of y is rendered as the lowest height "",
* the maximum value of y is rendered as the highest height "", and the middle value will be rendered proportionally.
* If a bucket has no y value, it will be rendered as " ".
* If the actual number of buckets is greater than the specified bucket, it will be compressed by width.
* For example, there are actually 11 buckets, specify 10 buckets, and divide the 11 buckets as follows (11/10):
* 0.0-1.1, 1.1-2.2, 2.2-3.3, 3.3-4.4, 4.4-5.5, 5.5-6.6, 6.6-7.7, 7.7-8.8, 8.8-9.9, 9.9-11.
* The y value of the first bucket will be calculated as follows:
* the actual y value of the first position + the actual second position y*0.1, and the remaining y*0.9 is reserved for the next bucket.
* The next bucket will use the last y*0.9 + the actual third position y*0.2, and the remaining y*0.8 will be reserved for the next bucket. And so on.
*/
void render(ColumnString & to_column, const AggregateFunctionSparkbarData<X, Y> & data) const
{
size_t sz = 0;
auto & values = to_column.getChars();
auto & offsets = to_column.getOffsets();
auto update_column = [&] ()
if (data.points.empty())
{
values.push_back('\0');
offsets.push_back(offsets.empty() ? 1 : offsets.back() + 1);
return;
}
auto from_x = is_specified_range_x ? begin_x : data.min_x;
auto to_x = is_specified_range_x ? end_x : data.max_x;
if (from_x >= to_x)
{
size_t sz = updateFrame(values, 8);
values.push_back('\0');
offsets.push_back(offsets.empty() ? sz + 1 : offsets.back() + sz + 1);
};
if (data.points.empty() || !width)
return update_column();
size_t diff_x;
X min_x_local;
if (specified_min_max_x)
{
diff_x = max_x - min_x;
min_x_local = min_x;
}
else
{
diff_x = data.max_x - data.min_x;
min_x_local = data.min_x;
return;
}
if ((diff_x + 1) <= width)
{
Y min_y = data.min_y;
Y max_y = data.max_y;
Float64 diff_y = max_y - min_y;
PaddedPODArray<Y> histogram(width, 0);
PaddedPODArray<UInt64> fhistogram(width, 0);
if (diff_y != 0.0)
for (const auto & point : data.points)
{
if (point.getKey() < from_x || to_x < point.getKey())
continue;
X delta = to_x - from_x;
if (delta < std::numeric_limits<X>::max())
delta = delta + 1;
X value = point.getKey() - from_x;
Float64 w = histogram.size();
size_t index = std::min<size_t>(static_cast<size_t>(w / delta * value), histogram.size() - 1);
if (std::numeric_limits<Y>::max() - histogram[index] > point.getMapped())
{
for (size_t i = 0; i <= diff_x; ++i)
{
auto it = data.points.find(static_cast<X>(min_x_local + i));
bool found = it != data.points.end();
sz += updateFrame(values, found ? std::round(((it->getMapped() - min_y) / diff_y) * 7) + 1 : 0.0);
}
histogram[index] += point.getMapped();
fhistogram[index] += 1;
}
else
{
for (size_t i = 0; i <= diff_x; ++i)
sz += updateFrame(values, data.points.has(min_x_local + static_cast<X>(i)) ? 1 : 0);
/// In case of overflow, just saturate
histogram[index] = std::numeric_limits<Y>::max();
}
}
else
for (size_t i = 0; i < histogram.size(); ++i)
{
// begin reshapes to width buckets
Float64 multiple_d = (diff_x + 1) / static_cast<Float64>(width);
std::optional<Float64> min_y;
std::optional<Float64> max_y;
std::optional<Float64> new_y;
std::vector<std::optional<Float64>> new_points;
new_points.reserve(width);
std::pair<size_t, Float64> bound{0, 0.0};
size_t cur_bucket_num = 0;
// upper bound for bucket
auto upper_bound = [&](size_t bucket_num)
{
bound.second = (bucket_num + 1) * multiple_d;
bound.first = static_cast<size_t>(std::floor(bound.second));
};
upper_bound(cur_bucket_num);
for (size_t i = 0; i <= (diff_x + 1); ++i)
{
if (i == bound.first) // is bound
{
Float64 proportion = bound.second - bound.first;
auto it = data.points.find(min_x_local + static_cast<X>(i));
bool found = (it != data.points.end());
if (found && proportion > 0)
new_y = new_y.value_or(0) + it->getMapped() * proportion;
if (new_y)
{
Float64 avg_y = new_y.value() / multiple_d;
new_points.emplace_back(avg_y);
// If min_y has no value, or if the avg_y of the current bucket is less than min_y, update it.
if (!min_y || avg_y < min_y)
min_y = avg_y;
if (!max_y || avg_y > max_y)
max_y = avg_y;
}
else
{
new_points.emplace_back();
}
// next bucket
new_y = found ? ((1 - proportion) * it->getMapped()) : std::optional<Float64>();
upper_bound(++cur_bucket_num);
}
else
{
auto it = data.points.find(min_x_local + static_cast<X>(i));
if (it != data.points.end())
new_y = new_y.value_or(0) + it->getMapped();
}
}
if (!min_y || !max_y) // No value is set
return update_column();
Float64 diff_y = max_y.value() - min_y.value();
auto update_frame = [&] (const std::optional<Float64> & point_y)
{
sz += updateFrame(values, point_y ? std::round(((point_y.value() - min_y.value()) / diff_y) * 7) + 1 : 0);
};
auto update_frame_for_constant = [&] (const std::optional<Float64> & point_y)
{
sz += updateFrame(values, point_y ? 1 : 0);
};
if (diff_y != 0.0)
std::for_each(new_points.begin(), new_points.end(), update_frame);
else
std::for_each(new_points.begin(), new_points.end(), update_frame_for_constant);
if (fhistogram[i] > 0)
histogram[i] /= fhistogram[i];
}
update_column();
Y y_max = 0;
for (auto & y : histogram)
{
if (isNaN(y) || y <= 0)
continue;
y_max = std::max(y_max, y);
}
if (y_max == 0)
{
values.push_back('\0');
offsets.push_back(offsets.empty() ? 1 : offsets.back() + 1);
return;
}
for (auto & y : histogram)
{
if (isNaN(y) || y <= 0)
y = 0;
else
y = y * 7 / y_max + 1;
}
size_t sz = 0;
for (const auto & y : histogram)
sz += updateFrame(values, y);
values.push_back('\0');
offsets.push_back(offsets.empty() ? sz + 1 : offsets.back() + sz + 1);
}
public:
AggregateFunctionSparkbar(const DataTypes & arguments, const Array & params)
: IAggregateFunctionDataHelper<AggregateFunctionSparkbarData<X, Y>, AggregateFunctionSparkbar>(
arguments, params, std::make_shared<DataTypeString>())
: IAggregateFunctionDataHelper<AggregateFunctionSparkbarData<X, Y>, AggregateFunctionSparkbar>(arguments, params, std::make_shared<DataTypeString>())
, width(params.empty() ? 0 : params.at(0).safeGet<UInt64>())
, is_specified_range_x(params.size() >= 3)
, begin_x(is_specified_range_x ? static_cast<X>(params.at(1).safeGet<X>()) : std::numeric_limits<X>::min())
, end_x(is_specified_range_x ? static_cast<X>(params.at(2).safeGet<X>()) : std::numeric_limits<X>::max())
{
width = params.at(0).safeGet<UInt64>();
if (params.size() == 3)
{
specified_min_max_x = true;
min_x = static_cast<X>(params.at(1).safeGet<X>());
max_x = static_cast<X>(params.at(2).safeGet<X>());
}
else
{
specified_min_max_x = false;
min_x = std::numeric_limits<X>::min();
max_x = std::numeric_limits<X>::max();
}
if (width < 2 || 1024 < width)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter width must be in range [2, 1024]");
if (begin_x >= end_x)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter `min_x` must be less than `max_x`");
}
String getName() const override
@ -287,7 +248,7 @@ public:
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * /*arena*/) const override
{
X x = assert_cast<const ColumnVector<X> *>(columns[0])->getData()[row_num];
if (min_x <= x && x <= max_x)
if (begin_x <= x && x <= end_x)
{
Y y = assert_cast<const ColumnVector<Y> *>(columns[1])->getData()[row_num];
this->data(place).add(x, y);

View File

@ -1,26 +1,69 @@
▁█
▃█▁
▄▅█▁
▄▄█▇▁
▃▄▅█▃▁
▂▅▃▇█▁▂
▂▅▃▅██ ▁
▁▅▄▃██▅ ▁
▁▄▄▂▅▇█▂ ▂
-- { echoOn }
SELECT sparkbar(2)(event_date,cnt) FROM spark_bar_test;
▅█
SELECT sparkbar(3)(event_date,cnt) FROM spark_bar_test;
▄█▃
SELECT sparkbar(4)(event_date,cnt) FROM spark_bar_test;
▄▅█▃
SELECT sparkbar(5)(event_date,cnt) FROM spark_bar_test;
▃▂▆█▂
SELECT sparkbar(6)(event_date,cnt) FROM spark_bar_test;
▃▄▆█ ▃
SELECT sparkbar(7)(event_date,cnt) FROM spark_bar_test;
▂▃▃▆█ ▂
SELECT sparkbar(8)(event_date,cnt) FROM spark_bar_test;
▂▅▂▇▆█ ▂
SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_test;
▂▅▂▃▆█ ▂
SELECT sparkbar(10)(event_date,cnt) FROM spark_bar_test;
▂▅▂▃▇▆█ ▂
SELECT sparkbar(11)(event_date,cnt) FROM spark_bar_test;
▁▄▅▂▃▇▆█ ▂
SELECT sparkbar(11,2,5)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,3,7)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,4,11)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-05'))(event_date,cnt) FROM spark_bar_test;
▆ █ ▃ ▅
SELECT sparkbar(11,toDate('2020-01-03'),toDate('2020-01-07'))(event_date,cnt) FROM spark_bar_test;
▆ ▃ ▄ █ ▇
SELECT sparkbar(11,toDate('2020-01-04'),toDate('2020-01-11'))(event_date,cnt) FROM spark_bar_test;
▂▃▇ ▆█ ▂
SELECT sparkbar(2,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test;
▄█
SELECT sparkbar(2,toDate('2020-01-02'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test;
▄█
SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test;
▄▅█
SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
▃▅█
SELECT sparkbar(4,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test;
▃▄▆█
SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
▃▄▆█
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
▂▅▂▃▇▆█
WITH number DIV 50 AS k, number % 50 AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k;
0 ▁▁▁▁▂▂▂▃▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇█
1 ▁▁▁▁▂▂▂▃▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇█
SELECT sparkbar(128, 0, 9223372036854775806)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100);
SELECT sparkbar(128)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100);
SELECT sparkbar(9)(x, y) FROM (SELECT * FROM Values('x UInt64, y UInt8', (18446744073709551615,255), (0,0), (0,0), (4036797895307271799,254)));
▇ █
SELECT sparkbar(8, 0, 7)((number + 1) % 8, 1), sparkbar(8, 0, 7)((number + 2) % 8, 1), sparkbar(8, 0, 7)((number + 3) % 8, 1) FROM numbers(7);
███████ █ ██████ ██ █████
SELECT sparkbar(2)(number, -number) FROM numbers(10);
SELECT sparkbar(10)(number, number - 7) FROM numbers(10);
▄█
SELECT sparkbar(1024)(number, number) FROM numbers(1024);
▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▂▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▅▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇█
SELECT sparkbar(1024)(number, 1) FROM numbers(1024);
████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
SELECT sparkbar(1024)(number, 0) FROM numbers(1024);
▆█▁▃
▅▁▂█▇
▁▂▇▆█ ▁
▁█
▁█
▁▄█
▂█▁
▁▃▅█
▁▃▅█
0 ▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇▇██
1 ▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▅▆▆▆▇▇▇▇██

View File

@ -4,7 +4,8 @@ CREATE TABLE spark_bar_test (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree
INSERT INTO spark_bar_test VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11');
SELECT sparkbar(1)(event_date,cnt) FROM spark_bar_test;
-- { echoOn }
SELECT sparkbar(2)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(3)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(4)(event_date,cnt) FROM spark_bar_test;
@ -20,7 +21,6 @@ SELECT sparkbar(11,2,5)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,3,7)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,4,11)(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-02'))(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-05'))(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,toDate('2020-01-03'),toDate('2020-01-07'))(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(11,toDate('2020-01-04'),toDate('2020-01-11'))(event_date,cnt) FROM spark_bar_test;
@ -31,14 +31,32 @@ SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-09'))(event_date,cnt) FRO
SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(4,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test;
SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
DROP TABLE IF EXISTS spark_bar_test;
SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test;
WITH number DIV 50 AS k, number % 50 AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k;
-- OOM guard
DROP TABLE IF EXISTS spark_bar_oom;
CREATE TABLE spark_bar_oom (x UInt64, y UInt8) Engine=MergeTree ORDER BY tuple();
INSERT INTO spark_bar_oom VALUES (18446744073709551615,255),(0,0),(0,0),(4036797895307271799,163);
SELECT sparkbar(9)(x,y) FROM spark_bar_oom SETTINGS max_memory_usage = 100000000; -- { serverError 241 }
DROP TABLE IF EXISTS spark_bar_oom;
SELECT sparkbar(128, 0, 9223372036854775806)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100);
SELECT sparkbar(128)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100);
SELECT sparkbar(9)(x, y) FROM (SELECT * FROM Values('x UInt64, y UInt8', (18446744073709551615,255), (0,0), (0,0), (4036797895307271799,254)));
SELECT sparkbar(8, 0, 7)((number + 1) % 8, 1), sparkbar(8, 0, 7)((number + 2) % 8, 1), sparkbar(8, 0, 7)((number + 3) % 8, 1) FROM numbers(7);
SELECT sparkbar(2)(number, -number) FROM numbers(10);
SELECT sparkbar(10)(number, number - 7) FROM numbers(10);
SELECT sparkbar(1024)(number, number) FROM numbers(1024);
SELECT sparkbar(1024)(number, 1) FROM numbers(1024);
SELECT sparkbar(1024)(number, 0) FROM numbers(1024);
-- { echoOff }
SELECT sparkbar(0)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS }
SELECT sparkbar(1)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS }
SELECT sparkbar(1025)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS }
SELECT sparkbar(2, 10, 9)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS }
SELECT sparkbar(2, -5, -1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT sparkbar(2, -5, 1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT sparkbar(2)(toInt32(number), number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT sparkbar(2, 0)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT sparkbar(2, 0, 5, 8)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
DROP TABLE IF EXISTS spark_bar_test;