mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Add test and set setting to 0 in some other tests
This commit is contained in:
parent
264e9daf6f
commit
53b8ff42c2
@ -204,6 +204,9 @@ BlocksWithPartition MergeTreeDataWriter::splitBlockIntoParts(const Block & block
|
|||||||
|
|
||||||
Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_description, Names & partition_key_columns, IColumn::Permutation ** permutation)
|
Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_description, Names & partition_key_columns, IColumn::Permutation ** permutation)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
LOG_DEBUG(log, "Apply merging algorithm on inserted data");
|
||||||
|
|
||||||
size_t block_size = block.rows();
|
size_t block_size = block.rows();
|
||||||
|
|
||||||
auto get_merging_algorithm = [&]() -> std::shared_ptr<IMergingAlgorithm>
|
auto get_merging_algorithm = [&]() -> std::shared_ptr<IMergingAlgorithm>
|
||||||
@ -215,23 +218,23 @@ Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
case MergeTreeData::MergingParams::Replacing:
|
case MergeTreeData::MergingParams::Replacing:
|
||||||
return std::make_shared<ReplacingSortedAlgorithm>(
|
return std::make_shared<ReplacingSortedAlgorithm>(
|
||||||
block, 1, sort_description, data.merging_params.version_column, block_size);
|
block, 1, sort_description, data.merging_params.version_column, block_size + 1);
|
||||||
case MergeTreeData::MergingParams::Collapsing:
|
case MergeTreeData::MergingParams::Collapsing:
|
||||||
return std::make_shared<CollapsingSortedAlgorithm>(
|
return std::make_shared<CollapsingSortedAlgorithm>(
|
||||||
block, 1, sort_description, data.merging_params.sign_column,
|
block, 1, sort_description, data.merging_params.sign_column,
|
||||||
false, block_size, &Poco::Logger::get("MergeTreeBlockOutputStream"));
|
false, block_size + 1, &Poco::Logger::get("MergeTreeBlockOutputStream"));
|
||||||
case MergeTreeData::MergingParams::Summing:
|
case MergeTreeData::MergingParams::Summing:
|
||||||
return std::make_shared<SummingSortedAlgorithm>(
|
return std::make_shared<SummingSortedAlgorithm>(
|
||||||
block, 1, sort_description, data.merging_params.columns_to_sum,
|
block, 1, sort_description, data.merging_params.columns_to_sum,
|
||||||
partition_key_columns, block_size);
|
partition_key_columns, block_size + 1);
|
||||||
case MergeTreeData::MergingParams::Aggregating:
|
case MergeTreeData::MergingParams::Aggregating:
|
||||||
return std::make_shared<AggregatingSortedAlgorithm>(block, 1, sort_description, block_size);
|
return std::make_shared<AggregatingSortedAlgorithm>(block, 1, sort_description, block_size + 1);
|
||||||
case MergeTreeData::MergingParams::VersionedCollapsing:
|
case MergeTreeData::MergingParams::VersionedCollapsing:
|
||||||
return std::make_shared<VersionedCollapsingAlgorithm>(
|
return std::make_shared<VersionedCollapsingAlgorithm>(
|
||||||
block, 1, sort_description, data.merging_params.sign_column, block_size);
|
block, 1, sort_description, data.merging_params.sign_column, block_size + 1);
|
||||||
case MergeTreeData::MergingParams::Graphite:
|
case MergeTreeData::MergingParams::Graphite:
|
||||||
return std::make_shared<GraphiteRollupSortedAlgorithm>(
|
return std::make_shared<GraphiteRollupSortedAlgorithm>(
|
||||||
block, 1, sort_description, block_size, data.merging_params.graphite_params, time(nullptr));
|
block, 1, sort_description, block_size + 1, data.merging_params.graphite_params, time(nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
@ -243,6 +246,8 @@ Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_
|
|||||||
|
|
||||||
Chunk chunk(block.getColumns(), block_size);
|
Chunk chunk(block.getColumns(), block_size);
|
||||||
|
|
||||||
|
LOG_DEBUG(log, "chunk size before merge {}, block rows {}", chunk.getNumRows(), block_size);
|
||||||
|
|
||||||
IMergingAlgorithm::Input input;
|
IMergingAlgorithm::Input input;
|
||||||
input.set(std::move(chunk));
|
input.set(std::move(chunk));
|
||||||
input.permutation = *permutation;
|
input.permutation = *permutation;
|
||||||
@ -255,6 +260,9 @@ Block MergeTreeDataWriter::mergeBlock(const Block & block, SortDescription sort_
|
|||||||
while (!status.is_finished)
|
while (!status.is_finished)
|
||||||
status = merging_algorithm->merge();
|
status = merging_algorithm->merge();
|
||||||
|
|
||||||
|
|
||||||
|
LOG_DEBUG(log, "chunk size after merge {}", status.chunk.getNumRows());
|
||||||
|
|
||||||
/// Merged Block is sorted and we don't need to use permutation anymore
|
/// Merged Block is sorted and we don't need to use permutation anymore
|
||||||
*permutation = nullptr;
|
*permutation = nullptr;
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS merge_tree;
|
DROP TABLE IF EXISTS merge_tree;
|
||||||
DROP TABLE IF EXISTS collapsing_merge_tree;
|
DROP TABLE IF EXISTS collapsing_merge_tree;
|
||||||
DROP TABLE IF EXISTS versioned_collapsing_merge_tree;
|
DROP TABLE IF EXISTS versioned_collapsing_merge_tree;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS summing_composite_key;
|
DROP TABLE IF EXISTS summing_composite_key;
|
||||||
CREATE TABLE summing_composite_key (d Date, k UInt64, FirstMap Nested(k1 UInt32, k2ID Int8, s Float64), SecondMap Nested(k1ID UInt64, k2Key String, k3Type Int32, s Int64)) ENGINE = SummingMergeTree(d, k, 1);
|
CREATE TABLE summing_composite_key (d Date, k UInt64, FirstMap Nested(k1 UInt32, k2ID Int8, s Float64), SecondMap Nested(k1ID UInt64, k2Key String, k3Type Int32, s Int64)) ENGINE = SummingMergeTree(d, k, 1);
|
||||||
|
|
||||||
|
@ -43,13 +43,13 @@ $CLICKHOUSE_CLIENT -q "INSERT INTO $name (date, Sign, ki) SELECT
|
|||||||
toDate(0) AS date,
|
toDate(0) AS date,
|
||||||
toInt8(1) AS Sign,
|
toInt8(1) AS Sign,
|
||||||
toUInt64(0) AS ki
|
toUInt64(0) AS ki
|
||||||
FROM system.numbers LIMIT 9000"
|
FROM system.numbers LIMIT 9000" --server_logs_file=/dev/null
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "INSERT INTO $name (date, Sign, ki) SELECT
|
$CLICKHOUSE_CLIENT -q "INSERT INTO $name (date, Sign, ki) SELECT
|
||||||
toDate(0) AS date,
|
toDate(0) AS date,
|
||||||
toInt8(1) AS Sign,
|
toInt8(1) AS Sign,
|
||||||
number AS ki
|
number AS ki
|
||||||
FROM system.numbers LIMIT 9000, 9000"
|
FROM system.numbers LIMIT 9000, 9000" --server_logs_file=/dev/null
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "INSERT INTO $name SELECT
|
$CLICKHOUSE_CLIENT -q "INSERT INTO $name SELECT
|
||||||
toDate(0) AS date,
|
toDate(0) AS date,
|
||||||
@ -68,7 +68,7 @@ number AS di09,
|
|||||||
number AS di10,
|
number AS di10,
|
||||||
[number, number+1] AS \`n.i\`,
|
[number, number+1] AS \`n.i\`,
|
||||||
[hex(number), hex(number+1)] AS \`n.s\`
|
[hex(number), hex(number+1)] AS \`n.s\`
|
||||||
FROM system.numbers LIMIT $res_rows"
|
FROM system.numbers LIMIT $res_rows" --server_logs_file=/dev/null
|
||||||
|
|
||||||
while [[ $(get_num_parts) -ne 1 ]] ; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE $name PARTITION 197001" --server_logs_file=/dev/null; done
|
while [[ $(get_num_parts) -ne 1 ]] ; do $CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE $name PARTITION 197001" --server_logs_file=/dev/null; done
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
SELECT '*** Replicated with sampling ***';
|
SELECT '*** Replicated with sampling ***';
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test.replicated_with_sampling;
|
DROP TABLE IF EXISTS test.replicated_with_sampling;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
set optimize_on_insert = 0;
|
||||||
|
|
||||||
drop table if exists mult_tab;
|
drop table if exists mult_tab;
|
||||||
create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version);
|
create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version);
|
||||||
insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10;
|
insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
set optimize_on_insert = 0;
|
||||||
|
|
||||||
drop table if exists tab_00577;
|
drop table if exists tab_00577;
|
||||||
create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0;
|
create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0;
|
||||||
insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1);
|
insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1);
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test_00616;
|
DROP TABLE IF EXISTS test_00616;
|
||||||
DROP TABLE IF EXISTS replacing_00616;
|
DROP TABLE IF EXISTS replacing_00616;
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS partitioned_by_tuple;
|
DROP TABLE IF EXISTS partitioned_by_tuple;
|
||||||
|
|
||||||
CREATE TABLE partitioned_by_tuple (d Date, x UInt8, w String, y UInt8) ENGINE SummingMergeTree (y) PARTITION BY (d, x) ORDER BY (d, x, w);
|
CREATE TABLE partitioned_by_tuple (d Date, x UInt8, w String, y UInt8) ENGINE SummingMergeTree (y) PARTITION BY (d, x) ORDER BY (d, x, w);
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS partitioned_by_tuple_replica1_00661;
|
DROP TABLE IF EXISTS partitioned_by_tuple_replica1_00661;
|
||||||
DROP TABLE IF EXISTS partitioned_by_tuple_replica2_00661;
|
DROP TABLE IF EXISTS partitioned_by_tuple_replica2_00661;
|
||||||
CREATE TABLE partitioned_by_tuple_replica1_00661(d Date, x UInt8, w String, y UInt8) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/test/partitioned_by_tuple_00661', '1') PARTITION BY (d, x) ORDER BY (d, x, w);
|
CREATE TABLE partitioned_by_tuple_replica1_00661(d Date, x UInt8, w String, y UInt8) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/test/partitioned_by_tuple_00661', '1') PARTITION BY (d, x) ORDER BY (d, x, w);
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
SET send_logs_level = 'fatal';
|
SET send_logs_level = 'fatal';
|
||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS old_style;
|
DROP TABLE IF EXISTS old_style;
|
||||||
CREATE TABLE old_style(d Date, x UInt32) ENGINE MergeTree(d, x, 8192);
|
CREATE TABLE old_style(d Date, x UInt32) ENGINE MergeTree(d, x, 8192);
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimze_on_insert = 0;
|
||||||
|
|
||||||
SET send_logs_level = 'fatal';
|
SET send_logs_level = 'fatal';
|
||||||
|
|
||||||
DROP TABLE IF EXISTS old_style;
|
DROP TABLE IF EXISTS old_style;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
select '-- SummingMergeTree with Nullable column without duplicates.';
|
select '-- SummingMergeTree with Nullable column without duplicates.';
|
||||||
|
|
||||||
drop table if exists tst;
|
drop table if exists tst;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS data_01285;
|
DROP TABLE IF EXISTS data_01285;
|
||||||
|
|
||||||
SET max_threads=1;
|
SET max_threads=1;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS tags;
|
DROP TABLE IF EXISTS tags;
|
||||||
|
|
||||||
CREATE TABLE tags (
|
CREATE TABLE tags (
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SET optimize_on_insert = 0;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS tt_01373;
|
DROP TABLE IF EXISTS tt_01373;
|
||||||
|
|
||||||
CREATE TABLE tt_01373
|
CREATE TABLE tt_01373
|
||||||
|
13
tests/queries/0_stateless/01560_optimize_on_insert.reference
Normal file
13
tests/queries/0_stateless/01560_optimize_on_insert.reference
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Replacing Merge Tree
|
||||||
|
1 2020-01-02 00:00:00
|
||||||
|
1 2020-01-01 00:00:00
|
||||||
|
Collapsing Merge Tree
|
||||||
|
1 1 2020-01-01 00:00:00
|
||||||
|
Versioned Collapsing Merge Tree
|
||||||
|
1 1 2 2020-01-01 00:00:00
|
||||||
|
Summing Merge Tree
|
||||||
|
1 6 2020-01-02 00:00:00
|
||||||
|
1 6 2020-01-01 00:00:00
|
||||||
|
Aggregating Merge Tree
|
||||||
|
1 5 2020-01-02 00:00:00
|
||||||
|
1 5 2020-01-01 00:00:00
|
35
tests/queries/0_stateless/01560_optimize_on_insert.sql
Normal file
35
tests/queries/0_stateless/01560_optimize_on_insert.sql
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
SELECT 'Replacing Merge Tree';
|
||||||
|
DROP TABLE IF EXISTS replacing_merge_tree;
|
||||||
|
CREATE TABLE replacing_merge_tree (key UInt32, date Datetime) ENGINE=ReplacingMergeTree() PARTITION BY date ORDER BY key;
|
||||||
|
INSERT INTO replacing_merge_tree VALUES (1, '2020-01-01'), (1, '2020-01-02'), (1, '2020-01-01'), (1, '2020-01-02');
|
||||||
|
SELECT * FROM replacing_merge_tree;
|
||||||
|
DROP TABLE replacing_merge_tree;
|
||||||
|
|
||||||
|
SELECT 'Collapsing Merge Tree';
|
||||||
|
DROP TABLE IF EXISTS collapsing_merge_tree;
|
||||||
|
CREATE TABLE collapsing_merge_tree (key UInt32, sign Int8, date Datetime) ENGINE=CollapsingMergeTree(sign) PARTITION BY date ORDER BY key;
|
||||||
|
INSERT INTO collapsing_merge_tree VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-02'), (1, -1, '2020-01-01'), (1, -1, '2020-01-02'), (1, 1, '2020-01-01');
|
||||||
|
SELECT * FROM collapsing_merge_tree;
|
||||||
|
DROP TABLE collapsing_merge_tree;
|
||||||
|
|
||||||
|
SELECT 'Versioned Collapsing Merge Tree';
|
||||||
|
DROP TABLE IF EXISTS versioned_collapsing_merge_tree;
|
||||||
|
CREATE TABLE versioned_collapsing_merge_tree (key UInt32, sign Int8, version Int32, date Datetime) ENGINE=VersionedCollapsingMergeTree(sign, version) PARTITION BY date ORDER BY (key, version);
|
||||||
|
INSERT INTO versioned_collapsing_merge_tree VALUES (1, 1, 1, '2020-01-01'), (1, -1, 1, '2020-01-01'), (1, 1, 2, '2020-01-01');
|
||||||
|
SELECT * FROM versioned_collapsing_merge_tree;
|
||||||
|
DROP TABLE versioned_collapsing_merge_tree;
|
||||||
|
|
||||||
|
SELECT 'Summing Merge Tree';
|
||||||
|
DROP TABLE IF EXISTS summing_merge_tree;
|
||||||
|
CREATE TABLE summing_merge_tree (key UInt32, val UInt32, date Datetime) ENGINE=SummingMergeTree(val) PARTITION BY date ORDER BY key;
|
||||||
|
INSERT INTO summing_merge_tree VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-02'), (1, 5, '2020-01-01'), (1, 5, '2020-01-02');
|
||||||
|
SELECT * FROM summing_merge_tree;
|
||||||
|
DROP TABLE summing_merge_tree;
|
||||||
|
|
||||||
|
SELECT 'Aggregating Merge Tree';
|
||||||
|
DROP TABLE IF EXISTS aggregating_merge_tree;
|
||||||
|
CREATE TABLE aggregating_merge_tree (key UInt32, val SimpleAggregateFunction(max, UInt32), date Datetime) ENGINE=AggregatingMergeTree() PARTITION BY date ORDER BY key;
|
||||||
|
INSERT INTO aggregating_merge_tree VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-02'), (1, 5, '2020-01-01'), (1, 5, '2020-01-02');
|
||||||
|
SELECT * FROM aggregating_merge_tree;
|
||||||
|
DROP TABLE aggregating_merge_tree;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user