mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Revert "optimize aggfunc column data copy (#19407)"
This reverts commit 7e3186c826
.
This commit is contained in:
parent
f6dc005ce2
commit
00e6b6232f
@ -75,28 +75,8 @@ void ColumnAggregateFunction::set(const AggregateFunctionPtr & func_)
|
||||
ColumnAggregateFunction::~ColumnAggregateFunction()
|
||||
{
|
||||
if (!func->hasTrivialDestructor() && !src)
|
||||
{
|
||||
if (copiedDataInfo.empty())
|
||||
{
|
||||
for (auto * val : data)
|
||||
{
|
||||
func->destroy(val);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t pos;
|
||||
for (Map::iterator it = copiedDataInfo.begin(), it_end = copiedDataInfo.end(); it != it_end; ++it)
|
||||
{
|
||||
pos = it->getValue().second;
|
||||
if (data[pos] != nullptr)
|
||||
{
|
||||
func->destroy(data[pos]);
|
||||
data[pos] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (auto * val : data)
|
||||
func->destroy(val);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::addArena(ConstArenaPtr arena_)
|
||||
@ -475,37 +455,14 @@ void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
|
||||
/// (only as a whole, see comment above).
|
||||
ensureOwnership();
|
||||
insertDefault();
|
||||
insertCopyFrom(assert_cast<const ColumnAggregateFunction &>(from).data[n]);
|
||||
insertMergeFrom(from, n);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::insertFrom(ConstAggregateDataPtr place)
|
||||
{
|
||||
ensureOwnership();
|
||||
insertDefault();
|
||||
insertCopyFrom(place);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::insertCopyFrom(ConstAggregateDataPtr place)
|
||||
{
|
||||
Map::LookupResult result;
|
||||
result = copiedDataInfo.find(place);
|
||||
if (result == nullptr)
|
||||
{
|
||||
copiedDataInfo[place] = data.size()-1;
|
||||
func->merge(data.back(), place, &createOrGetArena());
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t pos = result->getValue().second;
|
||||
if (pos != data.size() - 1)
|
||||
{
|
||||
data[data.size() - 1] = data[pos];
|
||||
}
|
||||
else /// insert same data to same pos, merge them.
|
||||
{
|
||||
func->merge(data.back(), place, &createOrGetArena());
|
||||
}
|
||||
}
|
||||
insertMergeFrom(place);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::insertMergeFrom(ConstAggregateDataPtr place)
|
||||
@ -740,4 +697,5 @@ MutableColumnPtr ColumnAggregateFunction::cloneResized(size_t size) const
|
||||
return cloned_col;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,8 +13,6 @@
|
||||
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -84,17 +82,6 @@ private:
|
||||
/// Name of the type to distinguish different aggregation states.
|
||||
String type_string;
|
||||
|
||||
/// MergedData records, used to avoid duplicated data copy.
|
||||
///key: src pointer, val: pos in current column.
|
||||
using Map = HashMap<
|
||||
ConstAggregateDataPtr,
|
||||
size_t,
|
||||
DefaultHash<ConstAggregateDataPtr>,
|
||||
HashTableGrower<3>,
|
||||
HashTableAllocatorWithStackMemory<sizeof(std::pair<ConstAggregateDataPtr, size_t>) * (1 << 3)>>;
|
||||
|
||||
Map copiedDataInfo;
|
||||
|
||||
ColumnAggregateFunction() {}
|
||||
|
||||
/// Create a new column that has another column as a source.
|
||||
@ -153,8 +140,6 @@ public:
|
||||
|
||||
void insertFrom(ConstAggregateDataPtr place);
|
||||
|
||||
void insertCopyFrom(ConstAggregateDataPtr place);
|
||||
|
||||
/// Merge state at last row with specified state in another column.
|
||||
void insertMergeFrom(ConstAggregateDataPtr place);
|
||||
|
||||
|
@ -69,16 +69,11 @@ namespace ZeroTraits
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
inline bool check(const T x) { return x == 0; }
|
||||
bool check(const T x) { return x == 0; }
|
||||
|
||||
template <typename T>
|
||||
inline void set(T & x) { x = 0; }
|
||||
void set(T & x) { x = 0; }
|
||||
|
||||
template <>
|
||||
inline bool check(const char * x) { return x == nullptr; }
|
||||
|
||||
template <>
|
||||
inline void set(const char *& x){ x = nullptr; }
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,24 +0,0 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<create_query>drop table if EXISTS test_bm2;</create_query>
|
||||
<create_query>drop table if EXISTS test_bm_join2;</create_query>
|
||||
<create_query>create table test_bm2(
|
||||
dim UInt64,
|
||||
id UInt64)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY( dim )
|
||||
SETTINGS index_granularity = 8192;
|
||||
</create_query>
|
||||
<create_query>
|
||||
create table test_bm_join2(
|
||||
dim UInt64,
|
||||
ids AggregateFunction(groupBitmap, UInt64) )
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY(dim)
|
||||
SETTINGS index_granularity = 8192;
|
||||
</create_query>
|
||||
<fill_query>insert into test_bm2 SELECT 1,number FROM numbers(0, 1000)</fill_query>
|
||||
<fill_query>insert into test_bm_join2 SELECT 1, bitmapBuild(range(toUInt64(0),toUInt64(11000000)))</fill_query>
|
||||
<query>select a.dim,bitmapCardinality(b.ids) from test_bm2 a left join test_bm_join2 b using(dim)</query>
|
||||
<drop_query>drop table if exists test_bm2</drop_query>
|
||||
<drop_query>drop table if exists test_bm_join2</drop_query>
|
||||
</test>
|
@ -19,7 +19,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -38,7 +38,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -76,7 +76,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -115,7 +115,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -134,7 +134,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -153,7 +153,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -172,7 +172,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -191,7 +191,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(200000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -230,7 +230,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(2000000)
|
||||
FROM numbers_mt(20000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
@ -249,7 +249,7 @@
|
||||
toInt256(number) as d,
|
||||
toString(number) as f,
|
||||
toFixedString(f, 20) as g
|
||||
FROM numbers_mt(20000000)
|
||||
FROM numbers_mt(100000000)
|
||||
SETTINGS max_threads = 8
|
||||
FORMAT Null
|
||||
</query>
|
||||
|
Loading…
Reference in New Issue
Block a user