mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into small_fixes_in_dict_parser
This commit is contained in:
commit
9bc0ab2eed
@ -15,8 +15,6 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [ClickHouse Online Meetup (in Russian)](https://events.yandex.ru/events/click-house-onlajn-vs-03-04-2020) on April 3, 2020.
|
||||
* [Talk on Saint HighLoad++ (online in Russian)](https://www.highload.ru/spb/2020/abstracts/6647) on April 6, 2020.
|
||||
* [ClickHouse in Avito (online in Russian)](https://avitotech.timepad.ru/event/1290051/) on April 9, 2020.
|
||||
* [ClickHouse Workshop in Novosibirsk](https://2020.codefest.ru/lecture/1628) on TBD date.
|
||||
* [Yandex C++ Open-Source Sprints in Moscow](https://events.yandex.ru/events/otkrytyj-kod-v-yandek-28-03-2020) on TBD date.
|
||||
|
@ -148,7 +148,7 @@ function run_tests
|
||||
|
||||
TIMEFORMAT=$(printf "$test_name\t%%3R\t%%3U\t%%3S\n")
|
||||
# the grep is to filter out set -x output and keep only time output
|
||||
{ time "$script_dir/perf.py" "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue
|
||||
{ time "$script_dir/perf.py" --host=localhost --port=9001 --host=localhost --port=9002 "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; } 2>&1 >/dev/null | grep -v ^+ >> "wall-clock-times.tsv" || continue
|
||||
|
||||
# The test completed with zero status, so we treat stderr as warnings
|
||||
mv "$test_name-err.log" "$test_name-warn.log"
|
||||
|
@ -23,8 +23,8 @@ report_stage_end('start')
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'], help="Server hostname. Parallel to '--port'.")
|
||||
parser.add_argument('--port', nargs='*', default=[9001, 9002], help="Server port. Parallel to '--host'.")
|
||||
parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.")
|
||||
parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.")
|
||||
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)), help='Number of query runs per server. Defaults to CHPC_RUNS environment variable.')
|
||||
parser.add_argument('--no-long', type=bool, default=True, help='Skip the tests tagged as long.')
|
||||
args = parser.parse_args()
|
||||
|
@ -62,12 +62,14 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
|
||||
|
@ -34,6 +34,7 @@ toc_title: Client Libraries
|
||||
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Ruby
|
||||
- [ClickHouse (Ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -837,30 +837,57 @@ WHERE
|
||||
|
||||
If this query doesn’t return anything, it means that everything is fine.
|
||||
|
||||
## system.settings {#system-settings}
|
||||
## system.settings {#system-tables-system-settings}
|
||||
|
||||
Contains information about settings that are currently in use.
|
||||
I.e. used for executing the query you are using to read from the system.settings table.
|
||||
Contains information about session settings for current user.
|
||||
|
||||
Columns:
|
||||
|
||||
- `name` (String) — Setting name.
|
||||
- `value` (String) — Setting value.
|
||||
- `description` (String) — Setting description.
|
||||
- `type` (String) — Setting type (implementation specific string value).
|
||||
- `changed` (UInt8) — Whether the setting was explicitly defined in the config or explicitly changed.
|
||||
- `min` (Nullable(String)) — Get minimum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)).
|
||||
- `max` (Nullable(String)) — Get maximum allowed value (if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings)).
|
||||
- `readonly` (UInt8) — Can user change this setting (for more info, look into [constraints](settings/constraints_on_settings.md#constraints-on-settings)).
|
||||
- `name` ([String](../data_types/string.md)) — Setting name.
|
||||
- `value` ([String](../data_types/string.md)) — Setting value.
|
||||
- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether a setting is changed from its default value.
|
||||
- `description` ([String](../data_types/string.md)) — Short setting description.
|
||||
- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Minimum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no minimum value, contains [NULL](../query_language/syntax.md#null-literal).
|
||||
- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — Maximum value of the setting, if any is set via [constraints](settings/constraints_on_settings.md#constraints-on-settings). If the setting has no maximum value, contains [NULL](../query_language/syntax.md#null-literal).
|
||||
- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Shows whether the current user can change the setting:
|
||||
- `0` — Current user can change the setting.
|
||||
- `1` — Current user can't change the setting.
|
||||
|
||||
Example:
|
||||
|
||||
**Example**
|
||||
|
||||
The following example shows how to get information about settings which name contains `min_i`.
|
||||
|
||||
```sql
|
||||
SELECT name, value
|
||||
SELECT *
|
||||
FROM system.settings
|
||||
WHERE changed
|
||||
WHERE name LIKE '%min_i%'
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
|
||||
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
Using of `WHERE changed` can be useful, for example, when you want to check:
|
||||
|
||||
- Whether settings in configuration files are loaded correctly and are in use.
|
||||
- Settings that changed in the current session.
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.settings WHERE changed AND name='load_balancing'
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [Settings](settings/index.md#settings)
|
||||
- [Permissions for Queries](settings/permissions_for_queries.md#settings_readonly)
|
||||
- [Constraints on Settings](settings/constraints_on_settings.md)
|
||||
|
||||
## system.table_engines
|
||||
``` text
|
||||
┌─name───────────────────┬─value───────┐
|
||||
│ max_threads │ 8 │
|
||||
|
@ -527,6 +527,49 @@ Calculates the average.
|
||||
Only works for numbers.
|
||||
The result is always Float64.
|
||||
|
||||
|
||||
## avgWeighted {#avgweighted}
|
||||
|
||||
Calculates the [weighted arithmetic mean](https://en.wikipedia.org/wiki/Weighted_arithmetic_mean).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
avgWeighted(x, weight)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Values. [Integer](../data_types/int_uint.md) or [floating-point](../data_types/float.md).
|
||||
- `weight` — Weights of the values. [Integer](../data_types/int_uint.md) or [floating-point](../data_types/float.md).
|
||||
|
||||
Type of `x` and `weight` must be the same.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Weighted mean.
|
||||
- `NaN`. If all the weights are equal to 0.
|
||||
|
||||
Type: [Float64](../data_types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT avgWeighted(x, w)
|
||||
FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2))
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─avgWeighted(x, weight)─┐
|
||||
│ 8 │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## uniq {#agg_function-uniq}
|
||||
|
||||
Calculates the approximate number of different values of the argument.
|
||||
|
@ -36,6 +36,7 @@ toc_title: Bibliotecas de clientes
|
||||
- [Cualquier evento-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Rubí
|
||||
- [Haga clic en Casa (Ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [Sistema abierto.](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [Bienvenidos al Portal de Licitación Electrónica de Licitación Electrónica](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -37,6 +37,7 @@ toc_title: "\u06A9\u062A\u0627\u0628\u062E\u0627\u0646\u0647 \u0647\u0627\u06CC
|
||||
- [هرفنت-کلیکهاوس](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- روبی
|
||||
- [تاتر (روبی)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [کلیک تحقیق](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [خانه روستایی](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -36,6 +36,7 @@ toc_title: "Biblioth\xE8ques Clientes"
|
||||
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Rubis
|
||||
- [ClickHouse (Ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -36,6 +36,7 @@ toc_title: "\u30AF\u30E9\u30A4\u30A2\u30F3\u30C8"
|
||||
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Ruby
|
||||
- [クリックハウス(ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [クリックハウス-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [Rクリックハウス](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -29,6 +29,7 @@
|
||||
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Ruby
|
||||
- [ClickHouse (Ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -882,27 +882,57 @@ WHERE
|
||||
|
||||
Если этот запрос ничего не возвращает - значит всё хорошо.
|
||||
|
||||
## system.settings {#system-settings}
|
||||
## system.settings {#system-tables-system-settings}
|
||||
|
||||
Содержит информацию о настройках, используемых в данный момент.
|
||||
То есть, используемых для выполнения запроса, с помощью которого вы читаете из таблицы system.settings.
|
||||
Содержит информацию о сессионных настройках для текущего пользователя.
|
||||
|
||||
Столбцы:
|
||||
|
||||
``` text
|
||||
name String - имя настройки
|
||||
value String - значение настройки
|
||||
changed UInt8 - была ли настройка явно задана в конфиге или изменена явным образом
|
||||
```
|
||||
- `name` ([String](../data_types/string.md)) — имя настройки.
|
||||
- `value` ([String](../data_types/string.md)) — значение настройки.
|
||||
- `changed` ([UInt8](../data_types/int_uint.md#uint-ranges)) — показывает, изменена ли настройка по отношению к значению по умолчанию.
|
||||
- `description` ([String](../data_types/string.md)) — краткое описание настройки.
|
||||
- `min` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — минимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal).
|
||||
- `max` ([Nullable](../data_types/nullable.md)([String](../data_types/string.md))) — максимальное значение настройки, если задано [ограничение](settings/constraints_on_settings.md#constraints-on-settings). Если нет, то поле содержит [NULL](../query_language/syntax.md#null-literal).
|
||||
- `readonly` ([UInt8](../data_types/int_uint.md#uint-ranges)) — Показывает, может ли пользователь изменять настройку:
|
||||
- `0` — Текущий пользователь может изменять настройку.
|
||||
- `1` — Текущий пользователь не может изменять настройку.
|
||||
|
||||
Пример:
|
||||
**Пример**
|
||||
|
||||
Пример показывает как получить информацию о настройках, имена которых содержат `min_i`.
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.settings
|
||||
WHERE changed
|
||||
WHERE name LIKE '%min_i%'
|
||||
```
|
||||
|
||||
```text
|
||||
┌─name────────────────────────────────────────┬─value─────┬─changed─┬─description───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─min──┬─max──┬─readonly─┐
|
||||
│ min_insert_block_size_rows │ 1048576 │ 0 │ Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ min_insert_block_size_bytes │ 268435456 │ 0 │ Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
│ read_backoff_min_interval_between_events_ms │ 1000 │ 0 │ Settings to reduce the number of threads in case of slow reads. Do not pay attention to the event, if the previous one has passed less than a certain amount of time. │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 0 │
|
||||
└─────────────────────────────────────────────┴───────────┴─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
Использование `WHERE changed` может быть полезно, например, если необходимо проверить:
|
||||
|
||||
- Что настройки корректно загрузились из конфигурационного файла и используются.
|
||||
- Настройки, изменённые в текущей сессии.
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.settings WHERE changed AND name='load_balancing'
|
||||
```
|
||||
|
||||
|
||||
**Cм. также**
|
||||
|
||||
- [Настройки](settings/index.md#settings)
|
||||
- [Разрешения для запросов](settings/permissions_for_queries.md#settings_readonly)
|
||||
- [Ограничения для значений настроек](settings/constraints_on_settings.md)
|
||||
|
||||
## system.table_engines
|
||||
``` text
|
||||
┌─name───────────────────┬─value───────┬─changed─┐
|
||||
│ max_threads │ 8 │ 1 │
|
||||
|
@ -523,6 +523,49 @@ FROM (
|
||||
Работает только для чисел.
|
||||
Результат всегда Float64.
|
||||
|
||||
|
||||
## avgWeighted {#avgweighted}
|
||||
|
||||
Вычисляет [среднее арифметическое взвешенное](https://ru.wikipedia.org/wiki/Среднее_арифметическое_взвешенное).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
avgWeighted(x, weight)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — Значения. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md).
|
||||
- `weight` — Веса отдельных значений. [Целые числа](../../data_types/int_uint.md) или [числа с плавающей запятой](../../data_types/float.md).
|
||||
|
||||
Типы параметров должны совпадать.
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Среднее арифметическое взвешенное.
|
||||
- `NaN`, если все веса равны 0.
|
||||
|
||||
Тип: [Float64](../../data_types/float.md)
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT avgWeighted(x, w)
|
||||
FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2))
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌─avgWeighted(x, weight)─┐
|
||||
│ 8 │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## uniq {#agg_function-uniq}
|
||||
|
||||
Приближённо вычисляет количество различных значений аргумента.
|
||||
|
@ -1,8 +1,8 @@
|
||||
Babel==2.8.0
|
||||
backports-abc==0.5
|
||||
backports.functools-lru-cache==1.6.1
|
||||
beautifulsoup4==4.8.2
|
||||
certifi==2019.11.28
|
||||
beautifulsoup4==4.9.0
|
||||
certifi==2020.4.5.1
|
||||
chardet==3.0.4
|
||||
click==7.1.1
|
||||
closure==20191111
|
||||
|
@ -1,5 +1,5 @@
|
||||
Babel==2.8.0
|
||||
certifi==2019.11.28
|
||||
certifi==2020.4.5.1
|
||||
chardet==3.0.4
|
||||
googletrans==2.4.0
|
||||
idna==2.9
|
||||
|
@ -28,6 +28,7 @@
|
||||
- [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse)
|
||||
- Ruby
|
||||
- [ClickHouse (Ruby)](https://github.com/shlima/click_house)
|
||||
- [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord)
|
||||
- R
|
||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
||||
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
||||
|
@ -244,14 +244,17 @@ DatabasePtr DatabaseCatalog::detachDatabase(const String & database_name, bool d
|
||||
if (database_name == TEMPORARY_DATABASE)
|
||||
throw Exception("Cannot detach database with temporary tables.", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
std::shared_ptr<IDatabase> db;
|
||||
{
|
||||
std::lock_guard lock{databases_mutex};
|
||||
assertDatabaseExistsUnlocked(database_name);
|
||||
auto db = databases.find(database_name)->second;
|
||||
db = databases.find(database_name)->second;
|
||||
|
||||
if (check_empty && !db->empty(*global_context))
|
||||
throw Exception("New table appeared in database being dropped or detached. Try again.", ErrorCodes::DATABASE_NOT_EMPTY);
|
||||
|
||||
databases.erase(database_name);
|
||||
}
|
||||
|
||||
db->shutdown();
|
||||
|
||||
|
@ -893,7 +893,13 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
default_totals = true;
|
||||
}
|
||||
|
||||
bool inflating_join = join && !typeid_cast<Join *>(join.get());
|
||||
bool inflating_join = false;
|
||||
if (join)
|
||||
{
|
||||
inflating_join = true;
|
||||
if (auto * hash_join = typeid_cast<Join *>(join.get()))
|
||||
inflating_join = isCross(hash_join->getKind());
|
||||
}
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType type)
|
||||
{
|
||||
|
@ -37,6 +37,16 @@ namespace ErrorCodes
|
||||
extern const int TYPE_MISMATCH;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct NotProcessedCrossJoin : public ExtraBlock
|
||||
{
|
||||
size_t left_position;
|
||||
size_t right_block;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
static ColumnPtr filterWithBlanks(ColumnPtr src_column, const IColumn::Filter & filter, bool inverse_filter = false)
|
||||
{
|
||||
@ -1055,53 +1065,81 @@ void Join::joinBlockImpl(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Join::joinBlockImplCross(Block & block) const
|
||||
void Join::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const
|
||||
{
|
||||
/// Add new columns to the block.
|
||||
size_t max_joined_block_rows = table_join->maxJoinedBlockRows();
|
||||
size_t start_left_row = 0;
|
||||
size_t start_right_block = 0;
|
||||
if (not_processed)
|
||||
{
|
||||
auto & continuation = static_cast<NotProcessedCrossJoin &>(*not_processed);
|
||||
start_left_row = continuation.left_position;
|
||||
start_right_block = continuation.right_block;
|
||||
not_processed.reset();
|
||||
}
|
||||
|
||||
size_t num_existing_columns = block.columns();
|
||||
size_t num_columns_to_add = sample_block_with_columns_to_add.columns();
|
||||
|
||||
ColumnRawPtrs src_left_columns;
|
||||
MutableColumns dst_columns;
|
||||
|
||||
{
|
||||
src_left_columns.reserve(num_existing_columns);
|
||||
dst_columns.reserve(num_existing_columns + num_columns_to_add);
|
||||
|
||||
for (const ColumnWithTypeAndName & left_column : block)
|
||||
{
|
||||
src_left_columns.push_back(left_column.column.get());
|
||||
dst_columns.emplace_back(src_left_columns.back()->cloneEmpty());
|
||||
}
|
||||
|
||||
for (const ColumnWithTypeAndName & right_column : sample_block_with_columns_to_add)
|
||||
dst_columns.emplace_back(right_column.column->cloneEmpty());
|
||||
|
||||
for (auto & dst : dst_columns)
|
||||
dst->reserve(max_joined_block_rows);
|
||||
}
|
||||
|
||||
size_t rows_left = block.rows();
|
||||
size_t rows_added = 0;
|
||||
|
||||
ColumnRawPtrs src_left_columns(num_existing_columns);
|
||||
MutableColumns dst_columns(num_existing_columns + num_columns_to_add);
|
||||
|
||||
for (size_t i = 0; i < num_existing_columns; ++i)
|
||||
{
|
||||
src_left_columns[i] = block.getByPosition(i).column.get();
|
||||
dst_columns[i] = src_left_columns[i]->cloneEmpty();
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < num_columns_to_add; ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & src_column = sample_block_with_columns_to_add.getByPosition(i);
|
||||
dst_columns[num_existing_columns + i] = src_column.column->cloneEmpty();
|
||||
block.insert(src_column);
|
||||
}
|
||||
|
||||
/// NOTE It would be better to use `reserve`, as well as `replicate` methods to duplicate the values of the left block.
|
||||
|
||||
for (size_t i = 0; i < rows_left; ++i)
|
||||
for (size_t left_row = start_left_row; left_row < rows_left; ++left_row)
|
||||
{
|
||||
size_t block_number = 0;
|
||||
for (const Block & block_right : data->blocks)
|
||||
{
|
||||
++block_number;
|
||||
if (block_number < start_right_block)
|
||||
continue;
|
||||
|
||||
size_t rows_right = block_right.rows();
|
||||
rows_added += rows_right;
|
||||
|
||||
for (size_t col_num = 0; col_num < num_existing_columns; ++col_num)
|
||||
for (size_t j = 0; j < rows_right; ++j)
|
||||
dst_columns[col_num]->insertFrom(*src_left_columns[col_num], i);
|
||||
dst_columns[col_num]->insertManyFrom(*src_left_columns[col_num], left_row, rows_right);
|
||||
|
||||
for (size_t col_num = 0; col_num < num_columns_to_add; ++col_num)
|
||||
{
|
||||
const IColumn * column_right = block_right.getByPosition(col_num).column.get();
|
||||
const IColumn & column_right = *block_right.getByPosition(col_num).column;
|
||||
dst_columns[num_existing_columns + col_num]->insertRangeFrom(column_right, 0, rows_right);
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < rows_right; ++j)
|
||||
dst_columns[num_existing_columns + col_num]->insertFrom(*column_right, j);
|
||||
}
|
||||
start_right_block = 0;
|
||||
|
||||
if (rows_added > max_joined_block_rows)
|
||||
{
|
||||
not_processed = std::make_shared<NotProcessedCrossJoin>(
|
||||
NotProcessedCrossJoin{{block.cloneEmpty()}, left_row, block_number + 1});
|
||||
not_processed->block.swap(block);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (const ColumnWithTypeAndName & src_column : sample_block_with_columns_to_add)
|
||||
block.insert(src_column);
|
||||
|
||||
block = block.cloneWithColumns(std::move(dst_columns));
|
||||
}
|
||||
|
||||
@ -1160,7 +1198,7 @@ void Join::joinGet(Block & block, const String & column_name) const
|
||||
}
|
||||
|
||||
|
||||
void Join::joinBlock(Block & block, ExtraBlockPtr &)
|
||||
void Join::joinBlock(Block & block, ExtraBlockPtr & not_processed)
|
||||
{
|
||||
std::shared_lock lock(data->rwlock);
|
||||
|
||||
@ -1175,7 +1213,7 @@ void Join::joinBlock(Block & block, ExtraBlockPtr &)
|
||||
/// Joined
|
||||
}
|
||||
else if (kind == ASTTableJoin::Kind::Cross)
|
||||
joinBlockImplCross(block);
|
||||
joinBlockImplCross(block, not_processed);
|
||||
else
|
||||
throw Exception("Logical error: unknown combination of JOIN", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ private:
|
||||
const Block & block_with_columns_to_add,
|
||||
const Maps & maps) const;
|
||||
|
||||
void joinBlockImplCross(Block & block) const;
|
||||
void joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) const;
|
||||
|
||||
template <typename Maps>
|
||||
void joinGetImpl(Block & block, const String & column_name, const Maps & maps) const;
|
||||
|
@ -275,15 +275,20 @@ void GroupingAggregatedTransform::work()
|
||||
{
|
||||
if (!single_level_chunks.empty())
|
||||
{
|
||||
auto & header = getOutputs().front().getHeader();
|
||||
auto & header = getInputs().front().getHeader(); /// Take header from input port. Output header is empty.
|
||||
auto block = header.cloneWithColumns(single_level_chunks.back().detachColumns());
|
||||
single_level_chunks.pop_back();
|
||||
auto blocks = params->aggregator.convertBlockToTwoLevel(block);
|
||||
|
||||
for (auto & cur_block : blocks)
|
||||
{
|
||||
if (!cur_block)
|
||||
continue;
|
||||
|
||||
Int32 bucket = cur_block.info.bucket_num;
|
||||
chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows()));
|
||||
auto chunk_info = std::make_shared<AggregatedChunkInfo>();
|
||||
chunk_info->bucket_num = bucket;
|
||||
chunks_map[bucket].emplace_back(Chunk(cur_block.getColumns(), cur_block.rows(), std::move(chunk_info)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
7
tests/config/access_management.xml
Normal file
7
tests/config/access_management.xml
Normal file
@ -0,0 +1,7 @@
|
||||
<yandex>
|
||||
<users>
|
||||
<default>
|
||||
<access_management>1</access_management>
|
||||
</default>
|
||||
</users>
|
||||
</yandex>
|
20
tests/config/clusters.xml
Normal file
20
tests/config/clusters.xml
Normal file
@ -0,0 +1,20 @@
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<test_cluster_two_shards_different_databases>
|
||||
<shard>
|
||||
<replica>
|
||||
<default_database>shard_0</default_database>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<default_database>shard_1</default_database>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_different_databases>
|
||||
</remote_servers>
|
||||
</yandex>
|
@ -7,7 +7,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>decimals</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -45,7 +45,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>decimals</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -83,7 +83,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>decimals</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -121,7 +121,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>decimals</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -162,7 +162,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>decimals</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
|
@ -7,7 +7,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -70,7 +70,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -133,7 +133,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -196,7 +196,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -259,7 +259,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -325,7 +325,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>ints</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
|
@ -7,7 +7,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -35,7 +35,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -63,7 +63,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -91,7 +91,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -122,7 +122,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -153,7 +153,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
@ -184,7 +184,7 @@
|
||||
<port>9000</port>
|
||||
<user>default</user>
|
||||
<password></password>
|
||||
<db>test_00950</db>
|
||||
<db>system</db>
|
||||
<table>strings</table>
|
||||
</clickhouse>
|
||||
</source>
|
||||
|
@ -10,7 +10,7 @@ import subprocess
|
||||
import sys
|
||||
|
||||
CUR_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../../"))
|
||||
DEFAULT_CLICKHOUSE_ROOT = os.path.abspath(os.path.join(CUR_FILE_DIR, "../../"))
|
||||
CURRENT_WORK_DIR = os.getcwd()
|
||||
CONTAINER_NAME = "clickhouse_integration_tests"
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
-- Must use `test_00950` database and these tables - they're configured in tests/*_dictionary.xml
|
||||
create database if not exists test_00950;
|
||||
use test_00950;
|
||||
-- Must use `system` database and these tables - they're configured in tests/*_dictionary.xml
|
||||
use system;
|
||||
drop table if exists ints;
|
||||
drop table if exists strings;
|
||||
drop table if exists decimals;
|
||||
@ -270,7 +269,14 @@ select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64
|
||||
dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)),
|
||||
dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1));
|
||||
|
||||
drop table ints;
|
||||
drop table strings;
|
||||
drop table decimals;
|
||||
drop database test_00950;
|
||||
--
|
||||
-- Keep the tables, so that the dictionaries can be reloaded correctly and
|
||||
-- SYSTEM RELOAD DICTIONARIES doesn't break.
|
||||
-- We could also:
|
||||
-- * drop the dictionaries -- not possible, they are configured in a .xml;
|
||||
-- * switch dictionaries to DDL syntax so that they can be dropped -- tedious,
|
||||
-- because there are a couple dozens of them, and also we need to have some
|
||||
-- .xml dictionaries in tests so that we test backward compatibility with this
|
||||
-- format;
|
||||
-- * unload dictionaries -- no command for that.
|
||||
--
|
||||
|
@ -1,44 +1,44 @@
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
DROP DATABASE IF EXISTS database_for_dict;
|
||||
CREATE DATABASE database_for_dict Engine = Ordinary;
|
||||
DROP DATABASE IF EXISTS db_01048;
|
||||
CREATE DATABASE db_01048 Engine = Ordinary;
|
||||
|
||||
DROP TABLE IF EXISTS database_for_dict.t;
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
DROP TABLE IF EXISTS db_01048.t_01048;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
CREATE TABLE database_for_dict.t (x UInt8) ENGINE = Memory;
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
CREATE TABLE db_01048.t_01048 (x UInt8) ENGINE = Memory;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
DROP TABLE database_for_dict.t;
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
DROP TABLE db_01048.t_01048;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
DROP DICTIONARY IF EXISTS t;
|
||||
CREATE TEMPORARY TABLE t (x UInt8);
|
||||
EXISTS t; -- Does not work for temporary tables. Maybe have to fix.
|
||||
EXISTS TABLE t;
|
||||
EXISTS DICTIONARY t;
|
||||
DROP DICTIONARY IF EXISTS t_01048;
|
||||
CREATE TEMPORARY TABLE t_01048 (x UInt8);
|
||||
EXISTS t_01048; -- Does not work for temporary tables. Maybe have to fix.
|
||||
EXISTS TABLE t_01048;
|
||||
EXISTS DICTIONARY t_01048;
|
||||
|
||||
CREATE DICTIONARY database_for_dict.t (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000);
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t; -- Dictionaries are tables as well. But not all tables are dictionaries.
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
CREATE DICTIONARY db_01048.t_01048 (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT TSV)) LIFETIME(1000);
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048; -- Dictionaries are tables as well. But not all tables are dictionaries.
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
-- But dictionary-tables cannot be dropped as usual tables.
|
||||
DROP TABLE database_for_dict.t; -- { serverError 60 }
|
||||
DROP DICTIONARY database_for_dict.t;
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
DROP TABLE db_01048.t_01048; -- { serverError 60 }
|
||||
DROP DICTIONARY db_01048.t_01048;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
||||
DROP DATABASE database_for_dict;
|
||||
EXISTS database_for_dict.t;
|
||||
EXISTS TABLE database_for_dict.t;
|
||||
EXISTS DICTIONARY database_for_dict.t;
|
||||
DROP DATABASE db_01048;
|
||||
EXISTS db_01048.t_01048;
|
||||
EXISTS TABLE db_01048.t_01048;
|
||||
EXISTS DICTIONARY db_01048.t_01048;
|
||||
|
@ -1,7 +1,7 @@
|
||||
CREATE TABLE t (a Int) ENGINE = Log;
|
||||
ATTACH TABLE t; -- { serverError 57 }
|
||||
ATTACH TABLE IF NOT EXISTS t;
|
||||
DETACH TABLE t;
|
||||
ATTACH TABLE IF NOT EXISTS t;
|
||||
EXISTS TABLE t;
|
||||
DROP TABLE t;
|
||||
CREATE TABLE aine (a Int) ENGINE = Log;
|
||||
ATTACH TABLE aine; -- { serverError 57 }
|
||||
ATTACH TABLE IF NOT EXISTS aine;
|
||||
DETACH TABLE aine;
|
||||
ATTACH TABLE IF NOT EXISTS aine;
|
||||
EXISTS TABLE aine;
|
||||
DROP TABLE aine;
|
||||
|
@ -0,0 +1 @@
|
||||
10000000
|
7
tests/queries/0_stateless/01109_inflating_cross_join.sql
Normal file
7
tests/queries/0_stateless/01109_inflating_cross_join.sql
Normal file
@ -0,0 +1,7 @@
|
||||
SET max_memory_usage = 16000000;
|
||||
|
||||
SET max_joined_block_size_rows = 10000000;
|
||||
SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; -- { serverError 241 }
|
||||
|
||||
SET max_joined_block_size_rows = 1000;
|
||||
SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2;
|
@ -0,0 +1,10 @@
|
||||
0 2
|
||||
1 1
|
||||
2 1
|
||||
3 1
|
||||
4 1
|
||||
5 1
|
||||
6 1
|
||||
7 1
|
||||
8 1
|
||||
9 1
|
@ -0,0 +1,25 @@
|
||||
set send_logs_level = 'error';
|
||||
|
||||
create database if not exists shard_0;
|
||||
create database if not exists shard_1;
|
||||
|
||||
drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient;
|
||||
drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient;
|
||||
drop table if exists ma_dist;
|
||||
|
||||
create table shard_0.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x;
|
||||
create table shard_1.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x;
|
||||
|
||||
insert into shard_0.shard_01231_distributed_aggregation_memory_efficient select * from numbers(1);
|
||||
insert into shard_1.shard_01231_distributed_aggregation_memory_efficient select * from numbers(10);
|
||||
|
||||
create table ma_dist (x UInt64) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 'shard_01231_distributed_aggregation_memory_efficient');
|
||||
|
||||
set distributed_aggregation_memory_efficient = 1;
|
||||
set group_by_two_level_threshold = 2;
|
||||
set max_bytes_before_external_group_by = 16;
|
||||
|
||||
select x, count() from ma_dist group by x order by x;
|
||||
|
||||
drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient;
|
||||
drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient;
|
Loading…
Reference in New Issue
Block a user