mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-18 05:32:52 +00:00
b4afc49d3c
sed -i s'/^<test.*$/<test>/g' tests/performance/*.xml WITH ceil(max(q[3]), 1) AS h SELECT concat('sed -i s\'/^<test.*$/<test max_ignored_relative_change="', toString(h), '">/g\' tests/performance/', test, '.xml') AS s FROM ( SELECT test, query_index, count(*), min(event_time), max(event_time) AS t, arrayMap(x -> floor(x, 3), quantiles(0, 0.5, 0.95, 1)(stat_threshold)) AS q, median(stat_threshold) AS m FROM perftest.query_metrics_v2 WHERE ((pr_number != 0) AND (event_date > '2021-01-01')) AND (metric = 'client_time') AND (abs(diff) < 0.05) AND (old_value > 0.1) GROUP BY test, query_index, query_display_name HAVING (t > '2021-01-01 00:00:00') AND ((q[3]) > 0.1) ORDER BY test DESC ) GROUP BY test ORDER BY h DESC FORMAT PrettySpace
44 lines
1.9 KiB
XML
44 lines
1.9 KiB
XML
<test max_ignored_relative_change="0.2">
|
|
<create_query>
|
|
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
|
PARTITION BY toYYYYMM(EventDate)
|
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
|
SAMPLE BY intHash32(UserID)
|
|
SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0
|
|
</create_query>
|
|
<create_query>
|
|
CREATE TABLE hits_compact AS hits_10m_single ENGINE = MergeTree()
|
|
PARTITION BY toYYYYMM(EventDate)
|
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
|
SAMPLE BY intHash32(UserID)
|
|
SETTINGS min_bytes_for_wide_part = '10M'
|
|
</create_query>
|
|
<create_query>
|
|
CREATE TABLE hits_memory AS hits_10m_single ENGINE = MergeTree()
|
|
PARTITION BY toYYYYMM(EventDate)
|
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
|
SAMPLE BY intHash32(UserID)
|
|
SETTINGS min_bytes_for_compact_part = '1M', min_bytes_for_wide_part = '10M', in_memory_parts_enable_wal = 1
|
|
</create_query>
|
|
<create_query>
|
|
CREATE TABLE hits_buffer AS hits_10m_single
|
|
ENGINE = Buffer(default, hits_wide, 1, 0, 0, 10000, 10000, 0, 0)
|
|
</create_query>
|
|
|
|
<!-- Emulate writing many parts with 1 row, because single insert query is too fast -->
|
|
<settings>
|
|
<max_block_size>1</max_block_size>
|
|
<min_insert_block_size_rows>1</min_insert_block_size_rows>
|
|
</settings>
|
|
|
|
<!-- 100 parts -->
|
|
<query>INSERT INTO hits_wide(UserID) SELECT rand() FROM numbers(100)</query>
|
|
<query>INSERT INTO hits_compact(UserID) SELECT rand() FROM numbers(1000)</query>
|
|
<query>INSERT INTO hits_buffer(UserID) SELECT rand() FROM numbers(100)</query>
|
|
|
|
<drop_query>DROP TABLE IF EXISTS hits_wide</drop_query>
|
|
<drop_query>DROP TABLE IF EXISTS hits_compact</drop_query>
|
|
<drop_query>DROP TABLE IF EXISTS hits_memory</drop_query>
|
|
<drop_query>DROP TABLE IF EXISTS hits_buffer</drop_query>
|
|
</test>
|