mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-02 20:42:04 +00:00
improve by use memcpy
This commit is contained in:
parent
89a733a2f5
commit
e29b78d20b
@ -521,7 +521,9 @@ class IColumn;
|
||||
M(Bool, formatdatetime_parsedatetime_m_is_month_name, true, "Formatter '%M' in functions 'formatDateTime()' and 'parseDateTime()' print/parse the month name instead of minutes.", 0) \
|
||||
M(Bool, parsedatetime_parse_without_leading_zeros, true, "Formatters '%c', '%l' and '%k' in function 'parseDateTime()' parse months and hours without leading zeros.", 0) \
|
||||
M(Bool, formatdatetime_format_without_leading_zeros, false, "Formatters '%c', '%l' and '%k' in function 'formatDateTime()' print months and hours without leading zeros.", 0) \
|
||||
\
|
||||
M(Bool, allow_execute_multiif_columnar_by_memcpy, false, "Allow execute multiIf function columnar by memcpy", 0) \
|
||||
M(Bool, formatdatetime_f_prints_single_zero, false, "Formatter '%f' in function 'formatDateTime()' produces a single zero instead of six zeros if the formatted value has no fractional seconds.", 0) \
|
||||
M(Bool, formatdatetime_parsedatetime_m_is_month_name, true, "Formatter '%M' in functions 'formatDateTime()' and 'parseDateTime()' produces the month name instead of minutes.", 0) \
|
||||
M(UInt64, max_partitions_per_insert_block, 100, "Limit maximum number of partitions in single INSERTed block. Zero means unlimited. Throw exception if the block contains too many partitions. This setting is a safety threshold, because using large number of partitions is a common misconception.", 0) \
|
||||
M(Bool, throw_on_max_partitions_per_insert_block, true, "Used with max_partitions_per_insert_block. If true (default), an exception will be thrown when max_partitions_per_insert_block is reached. If false, details of the insert query reaching this limit with the number of partitions will be logged. This can be useful if you're trying to understand the impact on users when changing max_partitions_per_insert_block.", 0) \
|
||||
M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited.", 0) \
|
||||
|
@ -256,13 +256,13 @@ public:
|
||||
MutableColumnPtr res = ColumnVector<TYPE>::create(rows); \
|
||||
if (!result_type->isNullable()) \
|
||||
{ \
|
||||
executeInstructionsColumnar<TYPE, INDEX>(instructions, rows, res); \
|
||||
executeInstructionsColumnar<TYPE, INDEX>(instructions, rows, res, settings.allow_execute_multiif_columnar_by_memcpy); \
|
||||
return std::move(res); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
MutableColumnPtr null_map = ColumnUInt8::create(rows); \
|
||||
executeInstructionsColumnarForNullable<TYPE, INDEX>(instructions, rows, res, null_map); \
|
||||
executeInstructionsColumnarForNullable<TYPE, INDEX>(instructions, rows, res, null_map, settings.allow_execute_multiif_columnar_by_memcpy); \
|
||||
return ColumnNullable::create(std::move(res), std::move(null_map)); \
|
||||
} \
|
||||
}
|
||||
@ -383,7 +383,8 @@ private:
|
||||
}
|
||||
|
||||
template<typename T, typename S>
|
||||
static void executeInstructionsColumnarForNullable(std::vector<Instruction> & instructions, size_t rows, const MutableColumnPtr & res, const MutableColumnPtr & null_map)
|
||||
static void executeInstructionsColumnarForNullable(std::vector<Instruction> & instructions, size_t rows,
|
||||
const MutableColumnPtr & res, const MutableColumnPtr & null_map, bool execute_by_memcpy)
|
||||
{
|
||||
PaddedPODArray<S> inserts(rows, static_cast<S>(instructions.size()));
|
||||
calculateInserts(instructions, rows, inserts);
|
||||
@ -414,27 +415,87 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
if (!execute_by_memcpy)
|
||||
{
|
||||
auto & instruction = instructions[inserts[row_i]];
|
||||
size_t index = instruction.source_is_constant ? 0 : row_i;
|
||||
res_data[row_i] = data_cols[inserts[row_i]]->getData()[index];
|
||||
null_map_data[row_i] = null_map_cols[inserts[row_i]] ? null_map_cols[inserts[row_i]]->getData()[index] : 0;
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
{
|
||||
auto & instruction = instructions[inserts[row_i]];
|
||||
size_t index = instruction.source_is_constant ? 0 : row_i;
|
||||
res_data[row_i] = data_cols[inserts[row_i]]->getData()[index];
|
||||
null_map_data[row_i] = null_map_cols[inserts[row_i]] ? null_map_cols[inserts[row_i]]->getData()[index] : 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t insert_start_pos = 0;
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
{
|
||||
S curr_insert = inserts[row_i];
|
||||
if (row_i != rows -1 && curr_insert == inserts[row_i + 1])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (instructions[curr_insert].source_is_constant)
|
||||
{
|
||||
for (size_t i = insert_start_pos; i <= row_i; ++i)
|
||||
{
|
||||
res_data[i] = data_cols[curr_insert]->getData()[0];
|
||||
null_map_data[i] = null_map_cols[curr_insert]->getData()[0];
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(res_data.data() + insert_start_pos,
|
||||
data_cols[curr_insert]->getData().data() + insert_start_pos, sizeof(T) * (row_i + 1 - insert_start_pos));
|
||||
memcpy(null_map_data.data() + insert_start_pos,
|
||||
null_map_cols[curr_insert]->getData().data() + insert_start_pos, sizeof(UInt8) * (row_i + 1 - insert_start_pos));
|
||||
}
|
||||
insert_start_pos = row_i + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename S>
|
||||
static void executeInstructionsColumnar(std::vector<Instruction> & instructions, size_t rows, const MutableColumnPtr & res)
|
||||
static void executeInstructionsColumnar(std::vector<Instruction> & instructions, size_t rows, const MutableColumnPtr & res, bool execute_by_memcpy)
|
||||
{
|
||||
PaddedPODArray<S> inserts(rows, static_cast<S>(instructions.size()));
|
||||
calculateInserts(instructions, rows, inserts);
|
||||
|
||||
PaddedPODArray<T> & res_data = assert_cast<ColumnVector<T> &>(*res).getData();
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
if (!execute_by_memcpy)
|
||||
{
|
||||
auto & instruction = instructions[inserts[row_i]];
|
||||
auto ref = instruction.source->getDataAt(row_i);
|
||||
res_data[row_i] = *reinterpret_cast<const T*>(ref.data);
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
{
|
||||
auto & instruction = instructions[inserts[row_i]];
|
||||
auto ref = instruction.source->getDataAt(row_i);
|
||||
res_data[row_i] = *reinterpret_cast<const T*>(ref.data);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t insert_start_pos = 0;
|
||||
std::vector<const ColumnVector<T> *> data_cols(instructions.size());
|
||||
for (size_t i = 0; i < instructions.size(); ++i)
|
||||
{
|
||||
data_cols[i] = assert_cast<const ColumnVector<T> *>(instructions[i].source.get());
|
||||
}
|
||||
for (size_t row_i = 0; row_i < rows; ++row_i)
|
||||
{
|
||||
S curr_insert = inserts[row_i];
|
||||
if (row_i != rows -1 && curr_insert == inserts[row_i + 1])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(res_data.data() + insert_start_pos,
|
||||
data_cols[curr_insert]->getData().data() + insert_start_pos, sizeof(T) * (row_i + 1 - insert_start_pos));
|
||||
insert_start_pos = row_i + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user