2015-01-03 03:18:49 +00:00
|
|
|
#pragma once
|
|
|
|
|
2018-07-04 17:02:47 +00:00
|
|
|
#include <unordered_map>
|
|
|
|
|
2020-09-15 09:55:57 +00:00
|
|
|
#include <common/types.h>
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-02-16 16:39:39 +00:00
|
|
|
class ReadBuffer;
|
|
|
|
class WriteBuffer;
|
2016-01-11 21:46:36 +00:00
|
|
|
|
2017-04-30 13:50:16 +00:00
|
|
|
/** More information about the block.
|
2015-01-03 03:18:49 +00:00
|
|
|
*/
|
|
|
|
struct BlockInfo
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
/** is_overflows:
|
2017-04-30 13:50:16 +00:00
|
|
|
* After running GROUP BY ... WITH TOTALS with the max_rows_to_group_by and group_by_overflow_mode = 'any' settings,
|
2017-05-09 19:07:35 +00:00
|
|
|
* a row is inserted in the separate block with aggregated values that have not passed max_rows_to_group_by.
|
2017-04-30 13:50:16 +00:00
|
|
|
* If it is such a block, then is_overflows is set to true for it.
|
2017-04-01 07:20:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/** bucket_num:
|
2017-04-30 13:50:16 +00:00
|
|
|
* When using the two-level aggregation method, data with different key groups are scattered across different buckets.
|
|
|
|
* In this case, the bucket number is indicated here. It is used to optimize the merge for distributed aggregation.
|
|
|
|
* Otherwise -1.
|
2017-04-01 07:20:54 +00:00
|
|
|
*/
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2016-02-16 16:39:39 +00:00
|
|
|
#define APPLY_FOR_BLOCK_INFO_FIELDS(M) \
|
2017-04-01 07:20:54 +00:00
|
|
|
M(bool, is_overflows, false, 1) \
|
|
|
|
M(Int32, bucket_num, -1, 2)
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
#define DECLARE_FIELD(TYPE, NAME, DEFAULT, FIELD_NUM) \
|
2017-04-01 07:20:54 +00:00
|
|
|
TYPE NAME = DEFAULT;
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
APPLY_FOR_BLOCK_INFO_FIELDS(DECLARE_FIELD)
|
2015-01-03 03:18:49 +00:00
|
|
|
|
|
|
|
#undef DECLARE_FIELD
|
|
|
|
|
2017-05-09 19:07:35 +00:00
|
|
|
/// Write the values in binary form. NOTE: You could use protobuf, but it would be overkill for this case.
|
2017-04-01 07:20:54 +00:00
|
|
|
void write(WriteBuffer & out) const;
|
2015-01-03 03:18:49 +00:00
|
|
|
|
2017-05-09 19:07:35 +00:00
|
|
|
/// Read the values in binary form.
|
2017-04-01 07:20:54 +00:00
|
|
|
void read(ReadBuffer & in);
|
2015-01-03 03:18:49 +00:00
|
|
|
};
|
|
|
|
|
2020-08-08 00:47:03 +00:00
|
|
|
/// Block extension to support delayed defaults. AddingDefaultsBlockInputStream uses it to replace missing values with column defaults.
|
2018-11-15 15:57:20 +00:00
|
|
|
class BlockMissingValues
|
2018-07-04 17:02:47 +00:00
|
|
|
{
|
|
|
|
public:
|
2018-07-11 14:11:47 +00:00
|
|
|
using RowsBitMask = std::vector<bool>; /// a bit per row for a column
|
2018-07-04 17:02:47 +00:00
|
|
|
|
2020-03-03 14:25:28 +00:00
|
|
|
/// Get mask for column, column_idx is index inside corresponding block
|
2018-07-11 14:11:47 +00:00
|
|
|
const RowsBitMask & getDefaultsBitmask(size_t column_idx) const;
|
2020-03-03 14:25:28 +00:00
|
|
|
/// Check that we have to replace default value at least in one of columns
|
|
|
|
bool hasDefaultBits(size_t column_idx) const;
|
2018-07-04 17:02:47 +00:00
|
|
|
void setBit(size_t column_idx, size_t row_idx);
|
2018-12-04 20:10:43 +00:00
|
|
|
bool empty() const { return rows_mask_by_column_id.empty(); }
|
|
|
|
size_t size() const { return rows_mask_by_column_id.size(); }
|
|
|
|
void clear() { rows_mask_by_column_id.clear(); }
|
2018-07-04 17:02:47 +00:00
|
|
|
|
|
|
|
private:
|
2018-07-11 14:11:47 +00:00
|
|
|
using RowsMaskByColumnId = std::unordered_map<size_t, RowsBitMask>;
|
|
|
|
|
2018-12-04 20:10:43 +00:00
|
|
|
/// If rows_mask_by_column_id[column_id][row_id] is true related value in Block should be replaced with column default.
|
2018-07-11 14:11:47 +00:00
|
|
|
/// It could contain less columns and rows then related block.
|
2018-12-04 20:10:43 +00:00
|
|
|
RowsMaskByColumnId rows_mask_by_column_id;
|
2018-07-04 17:02:47 +00:00
|
|
|
};
|
|
|
|
|
2015-01-03 03:18:49 +00:00
|
|
|
}
|