ClickHouse/tests/performance/codecs_int_insert.xml

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

55 lines
2.1 KiB
XML
Raw Normal View History

2021-05-20 14:14:55 +00:00
<test>
2020-05-09 01:28:31 +00:00
<settings>
<allow_suspicious_codecs>1</allow_suspicious_codecs>
</settings>
<substitutions>
<substitution>
<name>codec</name>
<values>
<value>NONE</value> <!-- as a baseline -->
<value>LZ4</value>
<value>ZSTD</value>
<value>Delta</value>
<value>T64</value>
<value>DoubleDelta</value>
<value>Gorilla</value>
</values>
</substitution>
<substitution>
<name>type</name>
<values>
<value>UInt64</value>
</values>
</substitution>
<substitution>
<name>seq_type</name>
<values>
<value>seq</value>
<value>mon</value>
<value>rnd</value>
</values>
</substitution>
<substitution>
<name>num_rows</name>
<values>
2020-06-08 13:57:33 +00:00
<value>20000000</value>
</values>
</substitution>
</substitutions>
2020-02-13 23:11:43 +00:00
<create_query>CREATE TABLE IF NOT EXISTS codec_{seq_type}_{type}_{codec} (n {type} CODEC({codec}))
ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple()
SETTINGS parts_to_delay_insert = 5000, parts_to_throw_insert = 5000;</create_query>
2020-02-13 18:42:55 +00:00
<create_query>system stop merges</create_query>
<!-- Using limit to make query finite, allowing it to be run multiple times in a loop, reducing mean error -->
<query>INSERT INTO codec_seq_{type}_{codec} (n) SELECT number FROM system.numbers LIMIT {num_rows} SETTINGS max_threads=1</query>
<query>INSERT INTO codec_mon_{type}_{codec} (n) SELECT number*512+(intHash64(number)%512) FROM system.numbers LIMIT {num_rows} SETTINGS max_threads=1</query>
<query>INSERT INTO codec_rnd_{type}_{codec} (n) SELECT intHash64(number) FROM system.numbers LIMIT {num_rows} SETTINGS max_threads=1</query>
2020-02-13 18:42:55 +00:00
<drop_query>system start merges</drop_query>
<drop_query>DROP TABLE IF EXISTS codec_{seq_type}_{type}_{codec}</drop_query>
</test>