ClickHouse/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql

98 lines
3.3 KiB
MySQL
Raw Normal View History

2021-09-12 12:35:27 +00:00
-- Tags: no-parallel, no-fasttest
DROP TABLE IF EXISTS file;
DROP TABLE IF EXISTS url;
DROP TABLE IF EXISTS view;
DROP TABLE IF EXISTS buffer;
DROP TABLE IF EXISTS merge;
DROP TABLE IF EXISTS merge_tf;
DROP TABLE IF EXISTS distributed;
DROP TABLE IF EXISTS distributed_tf;
DROP TABLE IF EXISTS rich_syntax;
DROP DICTIONARY IF EXISTS dict;
CREATE TABLE file (n Int8) ENGINE = File(upper('tsv') || 'WithNames' || 'AndTypes');
CREATE TABLE buffer (n Int8) ENGINE = Buffer(currentDatabase(), file, 16, 10, 200, 10000, 1000000, 10000000, 1000000000);
2020-02-20 20:24:04 +00:00
CREATE TABLE merge (n Int8) ENGINE = Merge('', lower('DISTRIBUTED'));
CREATE TABLE merge_tf as merge(currentDatabase(), '.*');
2020-02-21 13:44:44 +00:00
CREATE TABLE distributed (n Int8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'fi' || 'le');
2020-02-26 14:13:41 +00:00
CREATE TABLE distributed_tf as cluster('test' || '_' || 'shard_localhost', '', 'buf' || 'fer');
2020-02-20 20:24:04 +00:00
2020-02-26 14:13:41 +00:00
INSERT INTO buffer VALUES (1);
DETACH TABLE buffer; -- trigger flushing
ATTACH TABLE buffer;
CREATE TABLE url (n UInt64, col String) ENGINE=URL
2020-02-20 20:24:04 +00:00
(
replace
(
2020-02-26 14:13:41 +00:00
'https://localhost:8443/?query=' || 'select n, _table from ' || currentDatabase() || '.merge format CSV', ' ', '+'
2020-02-20 20:24:04 +00:00
),
CSV
);
2020-02-26 14:13:41 +00:00
CREATE VIEW view AS SELECT toInt64(n) as n FROM (SELECT toString(n) as n from merge WHERE _table != 'qwerty' ORDER BY _table) UNION ALL SELECT * FROM file;
2020-02-20 20:24:04 +00:00
-- The following line is needed just to disable checking stderr for emptiness
SELECT nonexistentsomething; -- { serverError 47 }
2020-02-26 14:13:41 +00:00
CREATE DICTIONARY dict (n UInt64, col String DEFAULT '42') PRIMARY KEY n
2020-08-15 03:10:57 +00:00
SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9440 SECURE 1 USER 'default' TABLE 'url')) LIFETIME(1) LAYOUT(CACHE(SIZE_IN_CELLS 1));
2020-02-20 20:24:04 +00:00
2020-02-26 14:13:41 +00:00
-- dict --> url --> merge |-> distributed -> file (1)
-- |-> distributed_tf -> buffer -> file (1)
2020-02-20 20:24:04 +00:00
-- TODO make fuzz test from this
CREATE TABLE rich_syntax as remote
(
'localhos{x|y|t}',
cluster
(
2020-02-21 13:44:44 +00:00
'test' || '_' || 'shard_localhost',
2020-02-20 20:24:04 +00:00
remote
(
'127.0.0.{1..4}',
if
(
2020-02-26 14:13:41 +00:00
toString(40 + 2) NOT IN ('hello', dictGetString(currentDatabase() || '.dict', 'col', toUInt64('0001'))),
2020-02-20 20:24:04 +00:00
currentDatabase(),
'FAIL'
),
2020-02-26 14:13:41 +00:00
extract('123view456', '[a-z]+')
2020-02-20 20:24:04 +00:00
)
)
);
2020-02-26 14:13:41 +00:00
SHOW CREATE file;
SHOW CREATE buffer;
SHOW CREATE merge;
SHOW CREATE merge_tf;
SHOW CREATE distributed;
SHOW CREATE distributed_tf;
2020-02-20 20:24:04 +00:00
SHOW CREATE url;
SHOW CREATE rich_syntax;
SHOW CREATE VIEW view;
2020-02-26 14:13:41 +00:00
SHOW CREATE dict;
2020-02-20 20:24:04 +00:00
2020-02-26 14:13:41 +00:00
INSERT INTO buffer VALUES (1);
-- remote(localhost) --> cluster(test_shard_localhost) |-> remote(127.0.0.1) --> view |-> subquery --> merge |-> distributed --> file (1)
-- | | |-> distributed_tf -> buffer (1) -> file (1)
-- | |-> file (1)
-- |-> remote(127.0.0.2) --> ...
SELECT sum(n) from rich_syntax;
-- Clear cache to avoid future errors in the logs
SYSTEM DROP DNS CACHE;
DROP TABLE file;
DROP TABLE url;
DROP TABLE view;
DROP TABLE buffer;
DROP TABLE merge;
DROP TABLE merge_tf;
DROP TABLE distributed;
DROP TABLE distributed_tf;
DROP TABLE rich_syntax;
DROP DICTIONARY dict;