mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-12 09:22:05 +00:00
Fix fuzz errors in sumMap
This commit is contained in:
parent
20f6c9933d
commit
fbed8bb426
@ -54,6 +54,8 @@ struct AggregateFunctionMapData
|
|||||||
* ([1,2,3,4,5,6,7,8,9,10],[10,10,45,20,35,20,15,30,20,20])
|
* ([1,2,3,4,5,6,7,8,9,10],[10,10,45,20,35,20,15,30,20,20])
|
||||||
*
|
*
|
||||||
* minMap and maxMap share the same idea, but calculate min and max correspondingly.
|
* minMap and maxMap share the same idea, but calculate min and max correspondingly.
|
||||||
|
*
|
||||||
|
* NOTE: The implementation of these functions are "amateur grade" - not efficient and low quality.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
template <typename T, typename Derived, typename Visitor, bool overflow, bool tuple_argument, bool compact>
|
template <typename T, typename Derived, typename Visitor, bool overflow, bool tuple_argument, bool compact>
|
||||||
@ -129,9 +131,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void add(AggregateDataPtr place, const IColumn** _columns, const size_t row_num, Arena *) const override
|
void add(AggregateDataPtr place, const IColumn ** columns_, const size_t row_num, Arena *) const override
|
||||||
{
|
{
|
||||||
const auto & columns = getArgumentColumns(_columns);
|
const auto & columns = getArgumentColumns(columns_);
|
||||||
|
|
||||||
// Column 0 contains array of keys of known type
|
// Column 0 contains array of keys of known type
|
||||||
const ColumnArray & array_column0 = assert_cast<const ColumnArray &>(*columns[0]);
|
const ColumnArray & array_column0 = assert_cast<const ColumnArray &>(*columns[0]);
|
||||||
@ -157,13 +159,13 @@ public:
|
|||||||
// Insert column values for all keys
|
// Insert column values for all keys
|
||||||
for (size_t i = 0; i < keys_vec_size; ++i)
|
for (size_t i = 0; i < keys_vec_size; ++i)
|
||||||
{
|
{
|
||||||
auto value = value_column.operator[](values_vec_offset + i);
|
auto value = value_column[values_vec_offset + i];
|
||||||
auto key = key_column.operator[](keys_vec_offset + i).get<T>();
|
auto key = key_column[keys_vec_offset + i].get<T>();
|
||||||
|
|
||||||
if (!keepKey(key))
|
if (!keepKey(key))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
typename std::decay_t<decltype(merged_maps)>::iterator it;
|
decltype(merged_maps.begin()) it;
|
||||||
if constexpr (IsDecimalNumber<T>)
|
if constexpr (IsDecimalNumber<T>)
|
||||||
{
|
{
|
||||||
// FIXME why is storing NearestFieldType not enough, and we
|
// FIXME why is storing NearestFieldType not enough, and we
|
||||||
@ -174,10 +176,16 @@ public:
|
|||||||
else
|
else
|
||||||
it = merged_maps.find(key);
|
it = merged_maps.find(key);
|
||||||
|
|
||||||
if (it != merged_maps.end() && !it->second[col].isNull())
|
if (it != merged_maps.end())
|
||||||
{
|
{
|
||||||
|
if (!value.isNull())
|
||||||
|
{
|
||||||
|
if (it->second[col].isNull())
|
||||||
|
it->second[col] = value;
|
||||||
|
else
|
||||||
applyVisitor(Visitor(value), it->second[col]);
|
applyVisitor(Visitor(value), it->second[col]);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Create a value array for this key
|
// Create a value array for this key
|
||||||
@ -185,7 +193,7 @@ public:
|
|||||||
new_values.resize(values_types.size());
|
new_values.resize(values_types.size());
|
||||||
for (size_t k = 0; k < new_values.size(); ++k)
|
for (size_t k = 0; k < new_values.size(); ++k)
|
||||||
{
|
{
|
||||||
new_values[k] = (k == col && !value.isNull()) ? value : values_types[k]->getDefault();
|
new_values[k] = (k == col) ? value : values_types[k]->getDefault();
|
||||||
}
|
}
|
||||||
|
|
||||||
if constexpr (IsDecimalNumber<T>)
|
if constexpr (IsDecimalNumber<T>)
|
||||||
@ -260,6 +268,8 @@ public:
|
|||||||
|
|
||||||
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
|
||||||
{
|
{
|
||||||
|
size_t num_columns = values_types.size();
|
||||||
|
|
||||||
// Final step does compaction of keys that have zero values, this mutates the state
|
// Final step does compaction of keys that have zero values, this mutates the state
|
||||||
auto & merged_maps = this->data(place).merged_maps;
|
auto & merged_maps = this->data(place).merged_maps;
|
||||||
|
|
||||||
@ -270,9 +280,9 @@ public:
|
|||||||
{
|
{
|
||||||
// Key is not compacted if it has at least one non-zero value
|
// Key is not compacted if it has at least one non-zero value
|
||||||
bool erase = true;
|
bool erase = true;
|
||||||
for (size_t col = 0; col < values_types.size(); ++col)
|
for (size_t col = 0; col < num_columns; ++col)
|
||||||
{
|
{
|
||||||
if (it->second[col] != values_types[col]->getDefault())
|
if (!it->second[col].isNull() && it->second[col] != values_types[col]->getDefault())
|
||||||
{
|
{
|
||||||
erase = false;
|
erase = false;
|
||||||
break;
|
break;
|
||||||
@ -297,7 +307,7 @@ public:
|
|||||||
to_keys_offsets.push_back(to_keys_offsets.back() + size);
|
to_keys_offsets.push_back(to_keys_offsets.back() + size);
|
||||||
to_keys_col.reserve(size);
|
to_keys_col.reserve(size);
|
||||||
|
|
||||||
for (size_t col = 0; col < values_types.size(); ++col)
|
for (size_t col = 0; col < num_columns; ++col)
|
||||||
{
|
{
|
||||||
auto & to_values_arr = assert_cast<ColumnArray &>(to_tuple.getColumn(col + 1));
|
auto & to_values_arr = assert_cast<ColumnArray &>(to_tuple.getColumn(col + 1));
|
||||||
auto & to_values_offsets = to_values_arr.getOffsets();
|
auto & to_values_offsets = to_values_arr.getOffsets();
|
||||||
@ -312,9 +322,12 @@ public:
|
|||||||
to_keys_col.insert(elem.first);
|
to_keys_col.insert(elem.first);
|
||||||
|
|
||||||
// Write 0..n arrays of values
|
// Write 0..n arrays of values
|
||||||
for (size_t col = 0; col < values_types.size(); ++col)
|
for (size_t col = 0; col < num_columns; ++col)
|
||||||
{
|
{
|
||||||
auto & to_values_col = assert_cast<ColumnArray &>(to_tuple.getColumn(col + 1)).getData();
|
auto & to_values_col = assert_cast<ColumnArray &>(to_tuple.getColumn(col + 1)).getData();
|
||||||
|
if (elem.second[col].isNull())
|
||||||
|
to_values_col.insertDefault();
|
||||||
|
else
|
||||||
to_values_col.insert(elem.second[col]);
|
to_values_col.insert(elem.second[col]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
tests/queries/0_stateless/01634_sum_map_nulls.reference
Normal file
4
tests/queries/0_stateless/01634_sum_map_nulls.reference
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
([1,2],[2,1],[9,0])
|
||||||
|
([1,2],[2,1],[-1,0])
|
||||||
|
([1,2],[2,1],[9,10])
|
||||||
|
([1,2],[2,1],[-1,10])
|
5
tests/queries/0_stateless/01634_sum_map_nulls.sql
Normal file
5
tests/queries/0_stateless/01634_sum_map_nulls.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, null, 10]);
|
||||||
|
SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, null, null]);
|
||||||
|
SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [null, null, null]); -- { serverError 43 }
|
||||||
|
SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, 10, 10]);
|
||||||
|
SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, 10, null]);
|
1
tests/queries/0_stateless/01635_sum_map_fuzz.reference
Normal file
1
tests/queries/0_stateless/01635_sum_map_fuzz.reference
Normal file
@ -0,0 +1 @@
|
|||||||
|
([1,2],[1,2],[1,0])
|
6
tests/queries/0_stateless/01635_sum_map_fuzz.sql
Normal file
6
tests/queries/0_stateless/01635_sum_map_fuzz.sql
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
SELECT finalizeAggregation(*) FROM (select initializeAggregation('sumMapState', [1, 2], [1, 2], [1, null]));
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS sum_map_overflow;
|
||||||
|
CREATE TABLE sum_map_overflow(events Array(UInt8), counts Array(UInt8)) ENGINE = Log;
|
||||||
|
SELECT [NULL], sumMapWithOverflow(events, [NULL], [[(NULL)]], counts) FROM sum_map_overflow; -- { serverError 43 }
|
||||||
|
DROP TABLE sum_map_overflow;
|
Loading…
Reference in New Issue
Block a user