Merge branch 'master' into add_conversion_stream

This commit is contained in:
alesapin 2020-04-07 14:57:58 +03:00
commit 5f3bcf198b
21 changed files with 154 additions and 72 deletions

View File

@ -148,7 +148,7 @@ function run_tests
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n") TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
# the grep is to filter out set -x output and keep only time output # the grep is to filter out set -x output and keep only time output
{ time "$script_dir/perf.py" "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue { time "$script_dir/perf.py" --host=localhost --port=9001 --host=localhost --port=9002 "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue
# The test completed with zero status, so we treat stderr as warnings # The test completed with zero status, so we treat stderr as warnings
mv "$test_name-err.log" "$test_name-warn.log" mv "$test_name-err.log" "$test_name-warn.log"

View File

@ -23,8 +23,8 @@ report_stage_end('start')
parser = argparse.ArgumentParser(description='Run performance test.') parser = argparse.ArgumentParser(description='Run performance test.')
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.") parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.") parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.') parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.')
parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.') parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.')
args = parser.parse_args() args = parser.parse_args()

View File

@ -62,12 +62,14 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \ ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \ ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \ ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \

View File

@ -34,6 +34,7 @@ toc_title: Client Libraries
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Ruby - Ruby
- [ClickHouse (Ruby)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickhouse](https://github.com/IMSMWU/RClickhouse)

View File

@ -36,6 +36,7 @@ toc_title: Bibliotecas de clientes
- [Cualquier evento-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [Cualquier evento-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Rubí - Rubí
- [Haga clic en Casa (Ruby)](https://github.com/shlima/click_house) - [Haga clic en Casa (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [Sistema abierto.](https://github.com/hannesmuehleisen/clickhouse-r) - [Sistema abierto.](https://github.com/hannesmuehleisen/clickhouse-r)
- [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse) - [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse)

View File

@ -37,6 +37,7 @@ toc_title: "\u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0647\u0627\u06CC
- [هرفنت-کلیکهاوس](https://metacpan.org/release/AnyEvent-ClickHouse) - [هرفنت-کلیکهاوس](https://metacpan.org/release/AnyEvent-ClickHouse)
- روبی - روبی
- [تاتر (روبی)](https://github.com/shlima/click_house) - [تاتر (روبی)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [کلیک تحقیق](https://github.com/hannesmuehleisen/clickhouse-r) - [کلیک تحقیق](https://github.com/hannesmuehleisen/clickhouse-r)
- [خانه روستایی](https://github.com/IMSMWU/RClickhouse) - [خانه روستایی](https://github.com/IMSMWU/RClickhouse)

View File

@ -36,6 +36,7 @@ toc_title: "Biblioth\xE8ques Clientes"
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Rubis - Rubis
- [ClickHouse (Ruby)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickhouse](https://github.com/IMSMWU/RClickhouse)

View File

@ -36,6 +36,7 @@ toc_title: "\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8"
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Ruby - Ruby
- [クリックハウス(ruby)](https://github.com/shlima/click_house) - [クリックハウス(ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [クリックハウス-r](https://github.com/hannesmuehleisen/clickhouse-r) - [クリックハウス-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [Rクリックハウス](https://github.com/IMSMWU/RClickhouse) - [Rクリックハウス](https://github.com/IMSMWU/RClickhouse)

View File

@ -29,6 +29,7 @@
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Ruby - Ruby
- [ClickHouse (Ruby)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickhouse](https://github.com/IMSMWU/RClickhouse)

View File

@ -28,6 +28,7 @@
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
- Ruby - Ruby
- [ClickHouse (Ruby)](https://github.com/shlima/click_house) - [ClickHouse (Ruby)](https://github.com/shlima/click_house)
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
- R - R
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
- [RClickhouse](https://github.com/IMSMWU/RClickhouse) - [RClickhouse](https://github.com/IMSMWU/RClickhouse)

View File

@ -275,15 +275,20 @@ void GroupingAggregatedTransform::work()
{ {
if (!single_level_chunks.empty()) if (!single_level_chunks.empty())
{ {
auto & header = getOutputs().front().getHeader(); auto & header = getInputs().front().getHeader(); /// Take header from input port. Output header is empty.
auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns()); auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns());
single_level_chunks.pop_back(); single_level_chunks.pop_back();
auto blocks = params->aggregator.convertBlockToTwoLevel(block); auto blocks = params->aggregator.convertBlockToTwoLevel(block);
for (auto & cur_block : blocks) for (auto & cur_block : blocks)
{ {
if (!cur_block)
continue;
Int32 bucket = cur_block.info.bucket_num; Int32 bucket = cur_block.info.bucket_num;
chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows())); auto chunk_info = std::make_shared<AggregatedChunkInfo>();
chunk_info->bucket_num = bucket;
chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows(), std::move(chunk_info)));
} }
} }
} }

View File

@ -0,0 +1,7 @@
<yandex>
<users>
<default>
<access_management>1</access_management>
</default>
</users>
</yandex>

20
tests/config/clusters.xml Normal file
View File

@ -0,0 +1,20 @@
<yandex>
<remote_servers>
<test_cluster_two_shards_different_databases>
<shard>
<replica>
<default_database>shard_0</default_database>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<default_database>shard_1</default_database>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_two_shards_different_databases>
</remote_servers>
</yandex>

View File

@ -7,7 +7,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>decimals</table> <table>decimals</table>
</clickhouse> </clickhouse>
</source> </source>
@ -45,7 +45,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>decimals</table> <table>decimals</table>
</clickhouse> </clickhouse>
</source> </source>
@ -83,7 +83,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>decimals</table> <table>decimals</table>
</clickhouse> </clickhouse>
</source> </source>
@ -121,7 +121,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>decimals</table> <table>decimals</table>
</clickhouse> </clickhouse>
</source> </source>
@ -162,7 +162,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>decimals</table> <table>decimals</table>
</clickhouse> </clickhouse>
</source> </source>

View File

@ -7,7 +7,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>
@ -70,7 +70,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>
@ -133,7 +133,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>
@ -196,7 +196,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>
@ -259,7 +259,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>
@ -325,7 +325,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>ints</table> <table>ints</table>
</clickhouse> </clickhouse>
</source> </source>

View File

@ -7,7 +7,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -35,7 +35,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -63,7 +63,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -91,7 +91,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -122,7 +122,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -153,7 +153,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>
@ -184,7 +184,7 @@
<port>9000</port> <port>9000</port>
<user>default</user> <user>default</user>
<password></password> <password></password>
<db>test_00950</db> <db>system</db>
<table>strings</table> <table>strings</table>
</clickhouse> </clickhouse>
</source> </source>

View File

@ -1,6 +1,5 @@
-- Must use `test_00950` database and these tables - they're configured in tests/*_dictionary.xml -- Must use `system` database and these tables - they're configured in tests/*_dictionary.xml
create database if not exists test_00950; use system;
use test_00950;
drop table if exists ints; drop table if exists ints;
drop table if exists strings; drop table if exists strings;
drop table if exists decimals; drop table if exists decimals;
@ -270,7 +269,14 @@ select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64
dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)),
dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)); dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1));
drop table ints; --
drop table strings; -- Keep the tables, so that the dictionaries can be reloaded correctly and
drop table decimals; -- SYSTEM RELOAD DICTIONARIES doesn't break.
drop database test_00950; -- We could also:
-- * drop the dictionaries -- not possible, they are configured in a .xml;
-- * switch dictionaries to DDL syntax so that they can be dropped -- tedious,
-- because there are a couple dozens of them, and also we need to have some
-- .xml dictionaries in tests so that we test backward compatibility with this
-- format;
-- * unload dictionaries -- no command for that.
--

View File

@ -1,44 +1,44 @@
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
DROP DATABASE IF EXISTS database_for_dict; DROP DATABASE IF EXISTS db_01048;
CREATE DATABASE database_for_dict Engine = Ordinary; CREATE DATABASE db_01048 Engine = Ordinary;
DROP TABLE IF EXISTS database_for_dict.t; DROP TABLE IF EXISTS db_01048.t_01048;
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
CREATE TABLE database_for_dict.t (x UInt8) ENGINE = Memory; CREATE TABLE db_01048.t_01048 (x UInt8) ENGINE = Memory;
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
DROP TABLE database_for_dict.t; DROP TABLE db_01048.t_01048;
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
DROP DICTIONARY IF EXISTS t; DROP DICTIONARY IF EXISTS t_01048;
CREATE TEMPORARY TABLE t (x UInt8); CREATE TEMPORARY TABLE t_01048 (x UInt8);
EXISTS t; -- Does not work for temporary tables. Maybe have to fix. EXISTS t_01048; -- Does not work for temporary tables. Maybe have to fix.
EXISTS TABLE t; EXISTS TABLE t_01048;
EXISTS DICTIONARY t; EXISTS DICTIONARY t_01048;
CREATE DICTIONARY database_for_dict.t (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000); CREATE DICTIONARY db_01048.t_01048 (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000);
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; -- Dictionaries are tables as well. But not all tables are dictionaries. EXISTS TABLE db_01048.t_01048; -- Dictionaries are tables as well. But not all tables are dictionaries.
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
-- But dictionary-tables cannot be dropped as usual tables. -- But dictionary-tables cannot be dropped as usual tables.
DROP TABLE database_for_dict.t; -- { serverError 60 } DROP TABLE db_01048.t_01048; -- { serverError 60 }
DROP DICTIONARY database_for_dict.t; DROP DICTIONARY db_01048.t_01048;
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;
DROP DATABASE database_for_dict; DROP DATABASE db_01048;
EXISTS database_for_dict.t; EXISTS db_01048.t_01048;
EXISTS TABLE database_for_dict.t; EXISTS TABLE db_01048.t_01048;
EXISTS DICTIONARY database_for_dict.t; EXISTS DICTIONARY db_01048.t_01048;

View File

@ -1,7 +1,7 @@
CREATE TABLE t (a Int) ENGINE = Log; CREATE TABLE aine (a Int) ENGINE = Log;
ATTACH TABLE t; -- { serverError 57 } ATTACH TABLE aine; -- { serverError 57 }
ATTACH TABLE IF NOT EXISTS t; ATTACH TABLE IF NOT EXISTS aine;
DETACH TABLE t; DETACH TABLE aine;
ATTACH TABLE IF NOT EXISTS t; ATTACH TABLE IF NOT EXISTS aine;
EXISTS TABLE t; EXISTS TABLE aine;
DROP TABLE t; DROP TABLE aine;

View File

@ -0,0 +1,10 @@
0 2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
9 1

View File

@ -0,0 +1,25 @@
set send_logs_level = 'error';
create database if not exists shard_0;
create database if not exists shard_1;
drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient;
drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient;
drop table if exists ma_dist;
create table shard_0.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x;
create table shard_1.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x;
insert into shard_0.shard_01231_distributed_aggregation_memory_efficient select * from numbers(1);
insert into shard_1.shard_01231_distributed_aggregation_memory_efficient select * from numbers(10);
create table ma_dist (x UInt64) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 'shard_01231_distributed_aggregation_memory_efficient');
set distributed_aggregation_memory_efficient = 1;
set group_by_two_level_threshold = 2;
set max_bytes_before_external_group_by = 16;
select x, count() from ma_dist group by x order by x;
drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient;
drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient;