2015-01-03 03:18:49 +00:00
|
|
|
#pragma once
|
|
|
|
|
2018-07-04 17:02:47 +00:00
|
|
|
#include <unordered_map>
|
|
|
|
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Core/Types.h>
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-02-16 16:39:39 +00:00
|
|
|
class ReadBuffer;
|
|
|
|
class WriteBuffer;
|
2016-01-11 21:46:36 +00:00
|
|
|
|
2017-04-30 13:50:16 +00:00
|
|
|
/** More information about the block.
|
2015-01-03 03:18:49 +00:00
|
|
|
*/
|
|
|
|
struct BlockInfo
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
/** is_overflows:
|
2017-04-30 13:50:16 +00:00
|
|
|
* After running GROUP BY ... WITH TOTALS with the max_rows_to_group_by and group_by_overflow_mode = 'any' settings,
|
2017-05-09 19:07:35 +00:00
|
|
|
* a row is inserted in the separate block with aggregated values that have not passed max_rows_to_group_by.
|
2017-04-30 13:50:16 +00:00
|
|
|
* If it is such a block, then is_overflows is set to true for it.
|
2017-04-01 07:20:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/** bucket_num:
|
2017-04-30 13:50:16 +00:00
|
|
|
* When using the two-level aggregation method, data with different key groups are scattered across different buckets.
|
|
|
|
* In this case, the bucket number is indicated here. It is used to optimize the merge for distributed aggregation.
|
|
|
|
* Otherwise -1.
|
2017-04-01 07:20:54 +00:00
|
|
|
*/
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2016-02-16 16:39:39 +00:00
|
|
|
#define APPLY_FOR_BLOCK_INFO_FIELDS(M) \
|
2017-04-01 07:20:54 +00:00
|
|
|
M(bool, is_overflows, false, 1) \
|
|
|
|
M(Int32, bucket_num, -1, 2)
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
#define DECLARE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
|
2017-04-01 07:20:54 +00:00
|
|
|
TYPE NAME = DEFAULT;
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
APPLY_FOR_BLOCK_INFO_FIELDS(DECLARE_FIELD)
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
#undef DECLARE_FIELD
|
|
|
|
|
2017-05-09 19:07:35 +00:00
|
|
|
/// Write the values in binary form. NOTE: You could use protobuf, but it would be overkill for this case.
|
2017-04-01 07:20:54 +00:00
|
|
|
void write(WriteBuffer & out) const;
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2017-05-09 19:07:35 +00:00
|
|
|
/// Read the values in binary form.
|
2017-04-01 07:20:54 +00:00
|
|
|
void read(ReadBuffer & in);
|
2015-01-03 03:18:49 +00:00
|
|
|
};
|
|
|
|
|
2018-07-04 17:02:47 +00:00
|
|
|
/// Block extention to support delayed defaults.
|
|
|
|
/// It's expected that it would be lots unset defaults or none.
|
|
|
|
/// NOTE It's possible to make better solution for sparse values.
|
|
|
|
class BlockDelayedDefaults
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
using BitMask = std::vector<bool>;
|
|
|
|
using MaskById = std::unordered_map<size_t, BitMask>;
|
|
|
|
|
|
|
|
const BitMask & getColumnBitmask(size_t column_idx) const;
|
|
|
|
void setBit(size_t column_idx, size_t row_idx);
|
|
|
|
bool empty() const { return columns_defaults.empty(); }
|
|
|
|
size_t size() const { return columns_defaults.size(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
MaskById columns_defaults;
|
|
|
|
};
|
|
|
|
|
2015-01-03 03:18:49 +00:00
|
|
|
}
|