mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
make 01275_parallel_mv.sql.j2 stable
This commit is contained in:
parent
5daa28d5c5
commit
ef4f34545e
@ -794,7 +794,7 @@ PushingToLiveViewSink::PushingToLiveViewSink(const Block & header, StorageLiveVi
|
|||||||
void PushingToLiveViewSink::consume(Chunk & chunk)
|
void PushingToLiveViewSink::consume(Chunk & chunk)
|
||||||
{
|
{
|
||||||
Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0);
|
Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0);
|
||||||
live_view.writeBlock(live_view, getHeader().cloneWithColumns(chunk.detachColumns()), std::move(chunk.getChunkInfos()), context);
|
live_view.writeBlock(live_view, getHeader().cloneWithColumns(chunk.getColumns()), std::move(chunk.getChunkInfos()), context);
|
||||||
|
|
||||||
if (auto process = context->getProcessListElement())
|
if (auto process = context->getProcessListElement())
|
||||||
process->updateProgressIn(local_progress);
|
process->updateProgressIn(local_progress);
|
||||||
@ -818,7 +818,7 @@ void PushingToWindowViewSink::consume(Chunk & chunk)
|
|||||||
{
|
{
|
||||||
Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0);
|
Progress local_progress(chunk.getNumRows(), chunk.bytes(), 0);
|
||||||
StorageWindowView::writeIntoWindowView(
|
StorageWindowView::writeIntoWindowView(
|
||||||
window_view, getHeader().cloneWithColumns(chunk.detachColumns()), std::move(chunk.getChunkInfos()), context);
|
window_view, getHeader().cloneWithColumns(chunk.getColumns()), std::move(chunk.getChunkInfos()), context);
|
||||||
|
|
||||||
if (auto process = context->getProcessListElement())
|
if (auto process = context->getProcessListElement())
|
||||||
process->updateProgressIn(local_progress);
|
process->updateProgressIn(local_progress);
|
||||||
|
@ -2,13 +2,18 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
select 'optimize_trivial_insert_select=0', 'max_insert_threads=0';
|
|
||||||
optimize_trivial_insert_select=0 max_insert_threads=0
|
|
||||||
|
|
||||||
|
select 'optimize_trivial_insert_select=0', 'max_insert_threads=0', 'iteration_num=';
|
||||||
|
optimize_trivial_insert_select=0 max_insert_threads=0 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_1',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=0,
|
parallel_view_processing=0,
|
||||||
optimize_trivial_insert_select=0,
|
optimize_trivial_insert_select=0,
|
||||||
max_insert_threads=0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=0;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -22,17 +27,17 @@ select count() from testX;
|
|||||||
200
|
200
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
200
|
200
|
||||||
select count() from testXB;
|
|
||||||
0
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
200
|
200
|
||||||
select 'optimize_trivial_insert_select=0', 'max_insert_threads=5';
|
select 'optimize_trivial_insert_select=0', 'max_insert_threads=5', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=0 max_insert_threads=5
|
optimize_trivial_insert_select=0 max_insert_threads=5 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_2',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=0,
|
parallel_view_processing=0,
|
||||||
optimize_trivial_insert_select=0,
|
optimize_trivial_insert_select=0,
|
||||||
max_insert_threads=5; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=5;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -46,17 +51,17 @@ select count() from testX;
|
|||||||
400
|
400
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
400
|
400
|
||||||
select count() from testXB;
|
|
||||||
0
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
400
|
400
|
||||||
select 'optimize_trivial_insert_select=1', 'max_insert_threads=0';
|
select 'optimize_trivial_insert_select=1', 'max_insert_threads=0', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=1 max_insert_threads=0
|
optimize_trivial_insert_select=1 max_insert_threads=0 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_3',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=0,
|
parallel_view_processing=0,
|
||||||
optimize_trivial_insert_select=1,
|
optimize_trivial_insert_select=1,
|
||||||
max_insert_threads=0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=0;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -70,17 +75,17 @@ select count() from testX;
|
|||||||
600
|
600
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
600
|
600
|
||||||
select count() from testXB;
|
|
||||||
0
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
600
|
600
|
||||||
select 'optimize_trivial_insert_select=1', 'max_insert_threads=5';
|
select 'optimize_trivial_insert_select=1', 'max_insert_threads=5', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=1 max_insert_threads=5
|
optimize_trivial_insert_select=1 max_insert_threads=5 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_4',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=0,
|
parallel_view_processing=0,
|
||||||
optimize_trivial_insert_select=1,
|
optimize_trivial_insert_select=1,
|
||||||
max_insert_threads=5; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=5;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -94,17 +99,17 @@ select count() from testX;
|
|||||||
800
|
800
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
800
|
800
|
||||||
select count() from testXB;
|
|
||||||
0
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
800
|
800
|
||||||
select 'optimize_trivial_insert_select=0', 'max_insert_threads=0';
|
select 'optimize_trivial_insert_select=0', 'max_insert_threads=0', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=0 max_insert_threads=0
|
optimize_trivial_insert_select=0 max_insert_threads=0 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_5',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=1,
|
parallel_view_processing=1,
|
||||||
optimize_trivial_insert_select=0,
|
optimize_trivial_insert_select=0,
|
||||||
max_insert_threads=0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=0;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -118,17 +123,17 @@ select count() from testX;
|
|||||||
1000
|
1000
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
1000
|
1000
|
||||||
select count() from testXB;
|
|
||||||
0
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
1000
|
1000
|
||||||
select 'optimize_trivial_insert_select=0', 'max_insert_threads=5';
|
select 'optimize_trivial_insert_select=0', 'max_insert_threads=5', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=0 max_insert_threads=5
|
optimize_trivial_insert_select=0 max_insert_threads=5 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_6',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=1,
|
parallel_view_processing=1,
|
||||||
optimize_trivial_insert_select=0,
|
optimize_trivial_insert_select=0,
|
||||||
max_insert_threads=5; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=5;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -139,20 +144,20 @@ select peak_threads_usage from system.query_log where
|
|||||||
Settings['max_insert_threads'] = '5';
|
Settings['max_insert_threads'] = '5';
|
||||||
12
|
12
|
||||||
select count() from testX;
|
select count() from testX;
|
||||||
1190
|
1200
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
1130
|
1200
|
||||||
select count() from testXB;
|
|
||||||
60
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
1130
|
1200
|
||||||
select 'optimize_trivial_insert_select=1', 'max_insert_threads=0';
|
select 'optimize_trivial_insert_select=1', 'max_insert_threads=0', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=1 max_insert_threads=0
|
optimize_trivial_insert_select=1 max_insert_threads=0 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_7',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=1,
|
parallel_view_processing=1,
|
||||||
optimize_trivial_insert_select=1,
|
optimize_trivial_insert_select=1,
|
||||||
max_insert_threads=0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=0;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -163,20 +168,20 @@ select peak_threads_usage from system.query_log where
|
|||||||
Settings['max_insert_threads'] = '0';
|
Settings['max_insert_threads'] = '0';
|
||||||
2
|
2
|
||||||
select count() from testX;
|
select count() from testX;
|
||||||
1390
|
1400
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
1330
|
1400
|
||||||
select count() from testXB;
|
|
||||||
60
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
1330
|
1400
|
||||||
select 'optimize_trivial_insert_select=1', 'max_insert_threads=5';
|
select 'optimize_trivial_insert_select=1', 'max_insert_threads=5', 'iteration_num=';
|
||||||
optimize_trivial_insert_select=1 max_insert_threads=5
|
optimize_trivial_insert_select=1 max_insert_threads=5 iteration_num=
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_8',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing=1,
|
parallel_view_processing=1,
|
||||||
optimize_trivial_insert_select=1,
|
optimize_trivial_insert_select=1,
|
||||||
max_insert_threads=5; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads=5;
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -187,10 +192,8 @@ select peak_threads_usage from system.query_log where
|
|||||||
Settings['max_insert_threads'] = '5';
|
Settings['max_insert_threads'] = '5';
|
||||||
7
|
7
|
||||||
select count() from testX;
|
select count() from testX;
|
||||||
1590
|
1600
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
1480
|
1600
|
||||||
select count() from testXB;
|
|
||||||
160
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
1490
|
1600
|
||||||
|
@ -5,11 +5,12 @@
|
|||||||
-- avoid settings randomization by clickhouse-test
|
-- avoid settings randomization by clickhouse-test
|
||||||
set max_threads = 10;
|
set max_threads = 10;
|
||||||
|
|
||||||
|
|
||||||
-- more blocks to process
|
-- more blocks to process
|
||||||
set max_block_size = 10;
|
set max_block_size = 10;
|
||||||
set min_insert_block_size_rows = 10;
|
set min_insert_block_size_rows = 10;
|
||||||
|
|
||||||
|
set materialized_views_ignore_errors = 1;
|
||||||
|
|
||||||
drop table if exists testX;
|
drop table if exists testX;
|
||||||
drop table if exists testXA;
|
drop table if exists testXA;
|
||||||
drop table if exists testXB;
|
drop table if exists testXB;
|
||||||
@ -21,17 +22,25 @@ create materialized view testXA engine=MergeTree order by tuple() as select slee
|
|||||||
create materialized view testXB engine=MergeTree order by tuple() as select sleep(0.2), throwIf(A=1) from testX;
|
create materialized view testXB engine=MergeTree order by tuple() as select sleep(0.2), throwIf(A=1) from testX;
|
||||||
create materialized view testXC engine=MergeTree order by tuple() as select sleep(0.1) from testX;
|
create materialized view testXC engine=MergeTree order by tuple() as select sleep(0.1) from testX;
|
||||||
|
|
||||||
|
|
||||||
|
{% set count = namespace(value=0) %}
|
||||||
|
|
||||||
-- { echoOn }
|
-- { echoOn }
|
||||||
{% for parallel_view_processing in [0, 1] %}
|
{% for parallel_view_processing in [0, 1] %}
|
||||||
{% for optimize_trivial_insert_select in [0, 1] %}
|
{% for optimize_trivial_insert_select in [0, 1] %}
|
||||||
{% for max_insert_threads in [0, 5] %}
|
{% for max_insert_threads in [0, 5] %}
|
||||||
select 'optimize_trivial_insert_select={{ optimize_trivial_insert_select }}', 'max_insert_threads={{ max_insert_threads }}';
|
|
||||||
|
{% set count.value = count.value + 1 %}
|
||||||
|
|
||||||
|
select 'optimize_trivial_insert_select={{ optimize_trivial_insert_select }}', 'max_insert_threads={{ max_insert_threads }}', 'iteration_num={{ iteration_num }}';
|
||||||
|
|
||||||
insert into testX select number from numbers(200) settings
|
insert into testX select number from numbers(200) settings
|
||||||
|
send_logs_level='fatal',
|
||||||
|
insert_deduplication_token='UT_{{ count.value }}',
|
||||||
log_queries=1,
|
log_queries=1,
|
||||||
parallel_view_processing={{ parallel_view_processing }},
|
parallel_view_processing={{ parallel_view_processing }},
|
||||||
optimize_trivial_insert_select={{ optimize_trivial_insert_select }},
|
optimize_trivial_insert_select={{ optimize_trivial_insert_select }},
|
||||||
max_insert_threads={{ max_insert_threads }}; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
|
max_insert_threads={{ max_insert_threads }};
|
||||||
system flush logs;
|
system flush logs;
|
||||||
select peak_threads_usage from system.query_log where
|
select peak_threads_usage from system.query_log where
|
||||||
current_database = currentDatabase() and
|
current_database = currentDatabase() and
|
||||||
@ -43,7 +52,6 @@ select peak_threads_usage from system.query_log where
|
|||||||
|
|
||||||
select count() from testX;
|
select count() from testX;
|
||||||
select count() from testXA;
|
select count() from testXA;
|
||||||
select count() from testXB;
|
|
||||||
select count() from testXC;
|
select count() from testXC;
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
12
tests/result
Normal file
12
tests/result
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
Using queries from 'queries' directory
|
||||||
|
Connecting to ClickHouse server...... OK
|
||||||
|
Connected to server 24.7.1.1 @ 246f421f2402799fd11b22a608b4d0d497cb8438 chesema-processor-onCancel
|
||||||
|
|
||||||
|
Running 1 stateless tests (MainProcess).
|
||||||
|
|
||||||
|
00993_system_parts_race_condition_drop_zookeeper: [ OK ]
|
||||||
|
|
||||||
|
1 tests passed. 0 tests skipped. 124.59 s elapsed (MainProcess).
|
||||||
|
|
||||||
|
0 tests passed. 0 tests skipped. 0.00 s elapsed (MainProcess).
|
||||||
|
All tests have finished.
|
Loading…
Reference in New Issue
Block a user