mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge branch 'master' into reset-connection-s3
This commit is contained in:
commit
20995fcb13
@ -512,7 +512,7 @@ Both the cache for `local_disk`, and temporary data will be stored in `/tiny_loc
|
||||
<type>cache</type>
|
||||
<disk>local_disk</disk>
|
||||
<path>/tiny_local_cache/</path>
|
||||
<max_size>10M</max_size>
|
||||
<max_size_rows>10M</max_size_rows>
|
||||
<max_file_segment_size>1M</max_file_segment_size>
|
||||
<cache_on_write_operations>1</cache_on_write_operations>
|
||||
<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>
|
||||
@ -1592,6 +1592,10 @@ To manually turn on metrics history collection [`system.metric_log`](../../opera
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
</clickhouse>
|
||||
```
|
||||
@ -1695,6 +1699,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1706,6 +1718,10 @@ Use the following parameters to configure logging:
|
||||
<table>part_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
```
|
||||
|
||||
@ -1773,6 +1789,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1786,6 +1810,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
```
|
||||
|
||||
@ -1831,6 +1859,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size_rows, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1844,6 +1880,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
```
|
||||
|
||||
@ -1861,6 +1901,14 @@ Use the following parameters to configure logging:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` – Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1874,6 +1922,10 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
<table>query_views_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_views_log>
|
||||
```
|
||||
|
||||
@ -1890,6 +1942,14 @@ Parameters:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1901,13 +1961,16 @@ Parameters:
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
|
||||
## trace_log {#server_configuration_parameters-trace_log}
|
||||
|
||||
Settings for the [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
||||
@ -1920,6 +1983,12 @@ Parameters:
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
@ -1931,6 +2000,10 @@ The default server configuration file `config.xml` contains the following settin
|
||||
<table>trace_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</trace_log>
|
||||
```
|
||||
|
||||
@ -1945,9 +2018,18 @@ Parameters:
|
||||
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) for a system table. Can't be used if `partition_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<asynchronous_insert_log>
|
||||
@ -1955,11 +2037,53 @@ Parameters:
|
||||
<table>asynchronous_insert_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
||||
</asynchronous_insert_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## crash_log {#server_configuration_parameters-crash_log}
|
||||
|
||||
Settings for the [crash_log](../../operations/system-tables/crash-log.md) system table operation.
|
||||
|
||||
Parameters:
|
||||
|
||||
- `database` — Database for storing a table.
|
||||
- `table` — Table name.
|
||||
- `partition_by` — [Custom partitioning key](../../engines/table-engines/mergetree-family/custom-partitioning-key.md) for a system table. Can't be used if `engine` defined.
|
||||
- `order_by` - [Custom sorting key](../../engines/table-engines/mergetree-family/mergetree.md#order_by) for a system table. Can't be used if `engine` defined.
|
||||
- `engine` - [MergeTree Engine Definition](../../engines/table-engines/mergetree-family/index.md) for a system table. Can't be used if `partition_by` or `order_by` defined.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `max_size_rows` – Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk.
|
||||
Default: 1048576.
|
||||
- `reserved_size_rows` – Pre-allocated memory size in lines for the logs.
|
||||
Default: 8192.
|
||||
- `buffer_size_rows_flush_threshold` – Lines amount threshold, reaching it launches flushing logs to the disk in background.
|
||||
Default: `max_size_rows / 2`.
|
||||
- `flush_on_crash` - Indication whether logs should be dumped to the disk in case of a crash.
|
||||
Default: false.
|
||||
- `storage_policy` – Name of storage policy to use for the table (optional)
|
||||
- `settings` - [Additional parameters](../../engines/table-engines/mergetree-family/mergetree.md/#settings) that control the behavior of the MergeTree (optional).
|
||||
|
||||
The default server configuration file `config.xml` contains the following settings section:
|
||||
|
||||
``` xml
|
||||
<crash_log>
|
||||
<database>system</database>
|
||||
<table>crash_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</crash_log>
|
||||
```
|
||||
|
||||
## query_masking_rules {#query-masking-rules}
|
||||
|
||||
Regexp-based rules, which will be applied to queries as well as all log messages before storing them in server logs,
|
||||
|
@ -1164,7 +1164,7 @@ Enabled by default.
|
||||
|
||||
Compression method used in output Arrow format. Supported codecs: `lz4_frame`, `zstd`, `none` (uncompressed)
|
||||
|
||||
Default value: `none`.
|
||||
Default value: `lz4_frame`.
|
||||
|
||||
## ORC format settings {#orc-format-settings}
|
||||
|
||||
|
@ -47,6 +47,10 @@ An example:
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -1058,6 +1058,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
</clickhouse>
|
||||
```
|
||||
@ -1160,7 +1164,14 @@ ClickHouse использует потоки из глобального пул
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
@ -1169,6 +1180,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>part_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
```
|
||||
|
||||
@ -1219,10 +1234,18 @@ ClickHouse использует потоки из глобального пул
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы, куда будет записываться лог;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1234,6 +1257,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_log</table>
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
```
|
||||
|
||||
@ -1246,10 +1273,18 @@ ClickHouse использует потоки из глобального пул
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы, куда будет записываться лог;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1261,6 +1296,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toMonday(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
```
|
||||
|
||||
@ -1272,11 +1311,19 @@ ClickHouse использует потоки из глобального пул
|
||||
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
- `database` – имя базы данных.
|
||||
- `table` – имя системной таблицы, где будут логироваться запросы.
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать, если задан параметр `engine`.
|
||||
- `engine` — устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать, если задан параметр `partition_by`.
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
Если таблица не существует, то ClickHouse создаст её. Если структура журнала запросов изменилась при обновлении сервера ClickHouse, то таблица со старой структурой переименовывается, а новая таблица создается автоматически.
|
||||
|
||||
@ -1288,6 +1335,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<table>query_views_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_views_log>
|
||||
```
|
||||
|
||||
@ -1298,11 +1349,19 @@ ClickHouse использует потоки из глобального пул
|
||||
Параметры:
|
||||
|
||||
- `level` — Максимальный уровень сообщения (по умолчанию `Trace`) которое будет сохранено в таблице.
|
||||
- `database` — имя базы данных для хранения таблицы.
|
||||
- `table` — имя таблицы, куда будут записываться текстовые сообщения.
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Нельзя использовать если используется `engine`
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
**Пример**
|
||||
```xml
|
||||
@ -1312,6 +1371,10 @@ ClickHouse использует потоки из глобального пул
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
@ -1323,13 +1386,21 @@ ClickHouse использует потоки из глобального пул
|
||||
|
||||
Настройки для [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) system table operation.
|
||||
|
||||
Parameters:
|
||||
Параметры:
|
||||
|
||||
- `database` — Database for storing a table.
|
||||
- `table` — Table name.
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — Interval for flushing data from the buffer in memory to the table.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
По умолчанию файл настроек сервера `config.xml` содержит следующие настройки:
|
||||
|
||||
@ -1339,9 +1410,84 @@ Parameters:
|
||||
<table>trace_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
</trace_log>
|
||||
```
|
||||
|
||||
## asynchronous_insert_log {#server_configuration_parameters-asynchronous_insert_log}
|
||||
|
||||
Настройки для asynchronous_insert_log Система для логирования ассинхронных вставок.
|
||||
|
||||
Параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1048576.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 8192.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: false.
|
||||
|
||||
**Пример**
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<asynchronous_insert_log>
|
||||
<database>system</database>
|
||||
<table>asynchronous_insert_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- <engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine> -->
|
||||
</asynchronous_insert_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## crash_log {#server_configuration_parameters-crash_log}
|
||||
|
||||
Настройки для таблицы [crash_log](../../operations/system-tables/crash-log.md).
|
||||
|
||||
Параметры:
|
||||
|
||||
- `database` — имя базы данных;
|
||||
- `table` — имя таблицы;
|
||||
- `partition_by` — устанавливает [произвольный ключ партиционирования](../../operations/server-configuration-parameters/settings.md). Нельзя использовать если используется `engine`
|
||||
- `engine` - устанавливает [настройки MergeTree Engine](../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table) для системной таблицы. Нельзя использовать если используется `partition_by`.
|
||||
- `flush_interval_milliseconds` — период сброса данных из буфера в памяти в таблицу.
|
||||
- `max_size_rows` – максимальный размер в строках для буфера с логами. Когда буфер будет заполнен полностью, сбрасывает логи на диск.
|
||||
Значение по умолчанию: 1024.
|
||||
- `reserved_size_rows` – преаллоцированный размер в строках для буфера с логами.
|
||||
Значение по умолчанию: 1024.
|
||||
- `buffer_size_bytes_flush_threshold` – количество линий в логе при достижении которого логи начнут скидываться на диск в неблокирующем режиме.
|
||||
Значение по умолчанию: `max_size / 2`.
|
||||
- `flush_on_crash` - должны ли логи быть сброшены на диск в случае неожиданной остановки программы.
|
||||
Значение по умолчанию: true.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<crash_log>
|
||||
<database>system</database>
|
||||
<table>crash_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</crash_log>
|
||||
```
|
||||
|
||||
## query_masking_rules {#query-masking-rules}
|
||||
|
||||
Правила, основанные на регулярных выражениях, которые будут применены для всех запросов, а также для всех сообщений перед сохранением их в лог на сервере,
|
||||
|
@ -45,6 +45,10 @@ sidebar_label: "Системные таблицы"
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -1035,6 +1035,11 @@ try
|
||||
/// Initialize merge tree metadata cache
|
||||
if (config().has("merge_tree_metadata_cache"))
|
||||
{
|
||||
global_context->addWarningMessage("The setting 'merge_tree_metadata_cache' is enabled."
|
||||
" But the feature of 'metadata cache in RocksDB' is experimental and is not ready for production."
|
||||
" The usage of this feature can lead to data corruption and loss. The setting should be disabled in production."
|
||||
" See the corresponding report at https://github.com/ClickHouse/ClickHouse/issues/51182");
|
||||
|
||||
fs::create_directories(path / "rocksdb/");
|
||||
size_t size = config().getUInt64("merge_tree_metadata_cache.lru_cache_size", 256 << 20);
|
||||
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||
|
@ -1026,6 +1026,14 @@
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<!-- Maximal size in lines for the logs. When non-flushed logs amount reaches max_size, logs dumped to the disk. -->
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<!-- Pre-allocated size in lines for the logs. -->
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<!-- Lines amount threshold, reaching it launches flushing logs to the disk in background. -->
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- Indication whether logs should be dumped to the disk in case of a crash -->
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
|
||||
<!-- example of using a different storage policy for a system table -->
|
||||
<!-- storage_policy>local_ssd</storage_policy -->
|
||||
@ -1039,6 +1047,11 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<!-- Indication whether logs should be dumped to the disk in case of a crash -->
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
@ -1048,6 +1061,10 @@
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Query views log. Has information about all dependent views associated with a query.
|
||||
@ -1066,6 +1083,10 @@
|
||||
<table>part_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</part_log>
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
@ -1075,6 +1096,10 @@
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<level></level>
|
||||
</text_log>
|
||||
-->
|
||||
@ -1084,7 +1109,11 @@
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</metric_log>
|
||||
|
||||
<!--
|
||||
@ -1095,6 +1124,10 @@
|
||||
<database>system</database>
|
||||
<table>asynchronous_metric_log</table>
|
||||
<flush_interval_milliseconds>7000</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<!--
|
||||
@ -1119,6 +1152,10 @@
|
||||
<database>system</database>
|
||||
<table>opentelemetry_span_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</opentelemetry_span_log>
|
||||
|
||||
|
||||
@ -1130,6 +1167,10 @@
|
||||
|
||||
<partition_by />
|
||||
<flush_interval_milliseconds>1000</flush_interval_milliseconds>
|
||||
<max_size_rows>1024</max_size_rows>
|
||||
<reserved_size_rows>1024</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>512</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</crash_log>
|
||||
|
||||
<!-- Session log. Stores user log in (successful or not) and log out events.
|
||||
@ -1142,6 +1183,10 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</session_log> -->
|
||||
|
||||
<!-- Profiling on Processors level. -->
|
||||
@ -1151,6 +1196,10 @@
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
</processors_profile_log>
|
||||
|
||||
<!-- Log of asynchronous inserts. It allows to check status
|
||||
@ -1161,6 +1210,10 @@
|
||||
<table>asynchronous_insert_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
<partition_by>event_date</partition_by>
|
||||
<ttl>event_date + INTERVAL 3 DAY</ttl>
|
||||
</asynchronous_insert_log>
|
||||
@ -1418,12 +1471,6 @@
|
||||
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
|
||||
</query_cache>
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
<!--merge_tree_metadata_cache>
|
||||
<lru_cache_size>268435456</lru_cache_size>
|
||||
<continue_if_corrupted>true</continue_if_corrupted>
|
||||
</merge_tree_metadata_cache-->
|
||||
|
||||
<!-- This allows to disable exposing addresses in stack traces for security reasons.
|
||||
Please be aware that it does not improve security much, but makes debugging much harder.
|
||||
The addresses that are small offsets from zero will be displayed nevertheless to show nullptr dereferences.
|
||||
|
@ -41,9 +41,25 @@ namespace DB
|
||||
}
|
||||
}
|
||||
|
||||
std::mutex CaresPTRResolver::mutex;
|
||||
struct AresChannelRAII
|
||||
{
|
||||
AresChannelRAII()
|
||||
{
|
||||
if (ares_init(&channel) != ARES_SUCCESS)
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to initialize c-ares channel");
|
||||
}
|
||||
}
|
||||
|
||||
CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr)
|
||||
~AresChannelRAII()
|
||||
{
|
||||
ares_destroy(channel);
|
||||
}
|
||||
|
||||
ares_channel channel;
|
||||
};
|
||||
|
||||
CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token)
|
||||
{
|
||||
/*
|
||||
* ares_library_init is not thread safe. Currently, the only other usage of c-ares seems to be in grpc.
|
||||
@ -57,34 +73,22 @@ namespace DB
|
||||
* */
|
||||
static const auto library_init_result = ares_library_init(ARES_LIB_INIT_ALL);
|
||||
|
||||
if (library_init_result != ARES_SUCCESS || ares_init(&channel) != ARES_SUCCESS)
|
||||
if (library_init_result != ARES_SUCCESS)
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to initialize c-ares");
|
||||
}
|
||||
}
|
||||
|
||||
CaresPTRResolver::~CaresPTRResolver()
|
||||
{
|
||||
ares_destroy(channel);
|
||||
/*
|
||||
* Library initialization is currently done only once in the constructor. Multiple instances of CaresPTRResolver
|
||||
* will be used in the lifetime of ClickHouse, thus it's problematic to have de-init here.
|
||||
* In a practical view, it makes little to no sense to de-init a DNS library since DNS requests will happen
|
||||
* until the end of the program. Hence, ares_library_cleanup() will not be called.
|
||||
* */
|
||||
}
|
||||
|
||||
std::unordered_set<std::string> CaresPTRResolver::resolve(const std::string & ip)
|
||||
{
|
||||
std::lock_guard guard(mutex);
|
||||
AresChannelRAII channel_raii;
|
||||
|
||||
std::unordered_set<std::string> ptr_records;
|
||||
|
||||
resolve(ip, ptr_records);
|
||||
resolve(ip, ptr_records, channel_raii.channel);
|
||||
|
||||
if (!wait_and_process())
|
||||
if (!wait_and_process(channel_raii.channel))
|
||||
{
|
||||
cancel_requests();
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to complete reverse DNS query for IP {}", ip);
|
||||
}
|
||||
|
||||
@ -93,22 +97,21 @@ namespace DB
|
||||
|
||||
std::unordered_set<std::string> CaresPTRResolver::resolve_v6(const std::string & ip)
|
||||
{
|
||||
std::lock_guard guard(mutex);
|
||||
AresChannelRAII channel_raii;
|
||||
|
||||
std::unordered_set<std::string> ptr_records;
|
||||
|
||||
resolve_v6(ip, ptr_records);
|
||||
resolve_v6(ip, ptr_records, channel_raii.channel);
|
||||
|
||||
if (!wait_and_process())
|
||||
if (!wait_and_process(channel_raii.channel))
|
||||
{
|
||||
cancel_requests();
|
||||
throw DB::Exception(DB::ErrorCodes::DNS_ERROR, "Failed to complete reverse DNS query for IP {}", ip);
|
||||
}
|
||||
|
||||
return ptr_records;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::resolve(const std::string & ip, std::unordered_set<std::string> & response)
|
||||
void CaresPTRResolver::resolve(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel)
|
||||
{
|
||||
in_addr addr;
|
||||
|
||||
@ -117,7 +120,7 @@ namespace DB
|
||||
ares_gethostbyaddr(channel, reinterpret_cast<const void*>(&addr), sizeof(addr), AF_INET, callback, &response);
|
||||
}
|
||||
|
||||
void CaresPTRResolver::resolve_v6(const std::string & ip, std::unordered_set<std::string> & response)
|
||||
void CaresPTRResolver::resolve_v6(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel)
|
||||
{
|
||||
in6_addr addr;
|
||||
inet_pton(AF_INET6, ip.c_str(), &addr);
|
||||
@ -125,15 +128,15 @@ namespace DB
|
||||
ares_gethostbyaddr(channel, reinterpret_cast<const void*>(&addr), sizeof(addr), AF_INET6, callback, &response);
|
||||
}
|
||||
|
||||
bool CaresPTRResolver::wait_and_process()
|
||||
bool CaresPTRResolver::wait_and_process(ares_channel channel)
|
||||
{
|
||||
int sockets[ARES_GETSOCK_MAXNUM];
|
||||
pollfd pollfd[ARES_GETSOCK_MAXNUM];
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto readable_sockets = get_readable_sockets(sockets, pollfd);
|
||||
auto timeout = calculate_timeout();
|
||||
auto readable_sockets = get_readable_sockets(sockets, pollfd, channel);
|
||||
auto timeout = calculate_timeout(channel);
|
||||
|
||||
int number_of_fds_ready = 0;
|
||||
if (!readable_sockets.empty())
|
||||
@ -158,11 +161,11 @@ namespace DB
|
||||
|
||||
if (number_of_fds_ready > 0)
|
||||
{
|
||||
process_readable_sockets(readable_sockets);
|
||||
process_readable_sockets(readable_sockets, channel);
|
||||
}
|
||||
else
|
||||
{
|
||||
process_possible_timeout();
|
||||
process_possible_timeout(channel);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -170,12 +173,12 @@ namespace DB
|
||||
return true;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::cancel_requests()
|
||||
void CaresPTRResolver::cancel_requests(ares_channel channel)
|
||||
{
|
||||
ares_cancel(channel);
|
||||
}
|
||||
|
||||
std::span<pollfd> CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd)
|
||||
std::span<pollfd> CaresPTRResolver::get_readable_sockets(int * sockets, pollfd * pollfd, ares_channel channel)
|
||||
{
|
||||
int sockets_bitmask = ares_getsock(channel, sockets, ARES_GETSOCK_MAXNUM);
|
||||
|
||||
@ -205,7 +208,7 @@ namespace DB
|
||||
return std::span<struct pollfd>(pollfd, number_of_sockets_to_poll);
|
||||
}
|
||||
|
||||
int64_t CaresPTRResolver::calculate_timeout()
|
||||
int64_t CaresPTRResolver::calculate_timeout(ares_channel channel)
|
||||
{
|
||||
timeval tv;
|
||||
if (auto * tvp = ares_timeout(channel, nullptr, &tv))
|
||||
@ -218,14 +221,14 @@ namespace DB
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CaresPTRResolver::process_possible_timeout()
|
||||
void CaresPTRResolver::process_possible_timeout(ares_channel channel)
|
||||
{
|
||||
/* Call ares_process() unconditonally here, even if we simply timed out
|
||||
above, as otherwise the ares name resolve won't timeout! */
|
||||
ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
||||
}
|
||||
|
||||
void CaresPTRResolver::process_readable_sockets(std::span<pollfd> readable_sockets)
|
||||
void CaresPTRResolver::process_readable_sockets(std::span<pollfd> readable_sockets, ares_channel channel)
|
||||
{
|
||||
for (auto readable_socket : readable_sockets)
|
||||
{
|
||||
|
@ -28,32 +28,35 @@ namespace DB
|
||||
|
||||
public:
|
||||
explicit CaresPTRResolver(provider_token);
|
||||
~CaresPTRResolver() override;
|
||||
|
||||
/*
|
||||
* Library initialization is currently done only once in the constructor. Multiple instances of CaresPTRResolver
|
||||
* will be used in the lifetime of ClickHouse, thus it's problematic to have de-init here.
|
||||
* In a practical view, it makes little to no sense to de-init a DNS library since DNS requests will happen
|
||||
* until the end of the program. Hence, ares_library_cleanup() will not be called.
|
||||
* */
|
||||
~CaresPTRResolver() override = default;
|
||||
|
||||
std::unordered_set<std::string> resolve(const std::string & ip) override;
|
||||
|
||||
std::unordered_set<std::string> resolve_v6(const std::string & ip) override;
|
||||
|
||||
private:
|
||||
bool wait_and_process();
|
||||
bool wait_and_process(ares_channel channel);
|
||||
|
||||
void cancel_requests();
|
||||
void cancel_requests(ares_channel channel);
|
||||
|
||||
void resolve(const std::string & ip, std::unordered_set<std::string> & response);
|
||||
void resolve(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel);
|
||||
|
||||
void resolve_v6(const std::string & ip, std::unordered_set<std::string> & response);
|
||||
void resolve_v6(const std::string & ip, std::unordered_set<std::string> & response, ares_channel channel);
|
||||
|
||||
std::span<pollfd> get_readable_sockets(int * sockets, pollfd * pollfd);
|
||||
std::span<pollfd> get_readable_sockets(int * sockets, pollfd * pollfd, ares_channel channel);
|
||||
|
||||
int64_t calculate_timeout();
|
||||
int64_t calculate_timeout(ares_channel channel);
|
||||
|
||||
void process_possible_timeout();
|
||||
void process_possible_timeout(ares_channel channel);
|
||||
|
||||
void process_readable_sockets(std::span<pollfd> readable_sockets);
|
||||
|
||||
ares_channel channel;
|
||||
|
||||
static std::mutex mutex;
|
||||
void process_readable_sockets(std::span<pollfd> readable_sockets, ares_channel channel);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -31,30 +31,25 @@ namespace ErrorCodes
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
constexpr size_t DBMS_SYSTEM_LOG_QUEUE_SIZE = 1048576;
|
||||
}
|
||||
|
||||
ISystemLog::~ISystemLog() = default;
|
||||
|
||||
|
||||
template <typename LogElement>
|
||||
SystemLogQueue<LogElement>::SystemLogQueue(
|
||||
const String & table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
bool turn_off_logger_)
|
||||
: log(&Poco::Logger::get("SystemLogQueue (" + table_name_ + ")"))
|
||||
, flush_interval_milliseconds(flush_interval_milliseconds_)
|
||||
SystemLogQueue<LogElement>::SystemLogQueue(const SystemLogQueueSettings & settings_)
|
||||
: log(&Poco::Logger::get("SystemLogQueue (" + settings_.database + "." +settings_.table + ")"))
|
||||
, settings(settings_)
|
||||
|
||||
{
|
||||
if (turn_off_logger_)
|
||||
queue.reserve(settings.reserved_size_rows);
|
||||
|
||||
if (settings.turn_off_logger)
|
||||
log->setLevel(0);
|
||||
}
|
||||
|
||||
static thread_local bool recursive_push_call = false;
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
void SystemLogQueue<LogElement>::push(LogElement&& element)
|
||||
{
|
||||
/// It is possible that the method will be called recursively.
|
||||
/// Better to drop these events to avoid complications.
|
||||
@ -70,7 +65,7 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
MemoryTrackerBlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
/// Should not log messages under mutex.
|
||||
bool queue_is_half_full = false;
|
||||
bool buffer_size_rows_flush_threshold_exceeded = false;
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
@ -78,9 +73,9 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
if (is_shutdown)
|
||||
return;
|
||||
|
||||
if (queue.size() == DBMS_SYSTEM_LOG_QUEUE_SIZE / 2)
|
||||
if (queue.size() == settings.buffer_size_rows_flush_threshold)
|
||||
{
|
||||
queue_is_half_full = true;
|
||||
buffer_size_rows_flush_threshold_exceeded = true;
|
||||
|
||||
// The queue more than half full, time to flush.
|
||||
// We only check for strict equality, because messages are added one
|
||||
@ -94,7 +89,7 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
flush_event.notify_all();
|
||||
}
|
||||
|
||||
if (queue.size() >= DBMS_SYSTEM_LOG_QUEUE_SIZE)
|
||||
if (queue.size() >= settings.max_size_rows)
|
||||
{
|
||||
// Ignore all further entries until the queue is flushed.
|
||||
// Log a message about that. Don't spam it -- this might be especially
|
||||
@ -108,27 +103,28 @@ void SystemLogQueue<LogElement>::push(const LogElement & element)
|
||||
// TextLog sets its logger level to 0, so this log is a noop and
|
||||
// there is no recursive logging.
|
||||
lock.unlock();
|
||||
LOG_ERROR(log, "Queue is full for system log '{}' at {}", demangle(typeid(*this).name()), queue_front_index);
|
||||
LOG_ERROR(log, "Queue is full for system log '{}' at {}. max_size_rows {}",
|
||||
demangle(typeid(*this).name()),
|
||||
queue_front_index,
|
||||
settings.max_size_rows);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
queue.push_back(element);
|
||||
queue.push_back(std::move(element));
|
||||
}
|
||||
|
||||
if (queue_is_half_full)
|
||||
LOG_INFO(log, "Queue is half full for system log '{}'.", demangle(typeid(*this).name()));
|
||||
if (buffer_size_rows_flush_threshold_exceeded)
|
||||
LOG_INFO(log, "Queue is half full for system log '{}'. buffer_size_rows_flush_threshold {}",
|
||||
demangle(typeid(*this).name()), settings.buffer_size_rows_flush_threshold);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::flush(bool force)
|
||||
void SystemLogQueue<LogElement>::handleCrash()
|
||||
{
|
||||
uint64_t this_thread_requested_offset = queue->notifyFlush(force);
|
||||
if (this_thread_requested_offset == uint64_t(-1))
|
||||
return;
|
||||
|
||||
queue->waitFlush(this_thread_requested_offset);
|
||||
if (settings.notify_flush_on_crash)
|
||||
notifyFlush(/* force */ true);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
@ -185,11 +181,13 @@ void SystemLogQueue<LogElement>::confirm(uint64_t to_flush_end)
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
typename SystemLogQueue<LogElement>::Index SystemLogQueue<LogElement>::pop(std::vector<LogElement>& output, bool& should_prepare_tables_anyway, bool& exit_this_thread)
|
||||
typename SystemLogQueue<LogElement>::Index SystemLogQueue<LogElement>::pop(std::vector<LogElement> & output,
|
||||
bool & should_prepare_tables_anyway,
|
||||
bool & exit_this_thread)
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
flush_event.wait_for(lock,
|
||||
std::chrono::milliseconds(flush_interval_milliseconds),
|
||||
std::chrono::milliseconds(settings.flush_interval_milliseconds),
|
||||
[&] ()
|
||||
{
|
||||
return requested_flush_up_to > flushed_up_to || is_shutdown || is_force_prepare_tables;
|
||||
@ -219,13 +217,28 @@ void SystemLogQueue<LogElement>::shutdown()
|
||||
|
||||
template <typename LogElement>
|
||||
SystemLogBase<LogElement>::SystemLogBase(
|
||||
const String& table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
const SystemLogQueueSettings & settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_)
|
||||
: queue(queue_ ? queue_ : std::make_shared<SystemLogQueue<LogElement>>(table_name_, flush_interval_milliseconds_))
|
||||
: queue(queue_ ? queue_ : std::make_shared<SystemLogQueue<LogElement>>(settings_))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::flush(bool force)
|
||||
{
|
||||
uint64_t this_thread_requested_offset = queue->notifyFlush(force);
|
||||
if (this_thread_requested_offset == uint64_t(-1))
|
||||
return;
|
||||
|
||||
queue->waitFlush(this_thread_requested_offset);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::handleCrash()
|
||||
{
|
||||
queue->handleCrash();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::startup()
|
||||
{
|
||||
@ -234,9 +247,9 @@ void SystemLogBase<LogElement>::startup()
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
void SystemLogBase<LogElement>::add(const LogElement & element)
|
||||
void SystemLogBase<LogElement>::add(LogElement element)
|
||||
{
|
||||
queue->push(element);
|
||||
queue->push(std::move(element));
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
|
@ -62,6 +62,9 @@ public:
|
||||
|
||||
virtual void stopFlushThread() = 0;
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
virtual void handleCrash() = 0;
|
||||
|
||||
virtual ~ISystemLog();
|
||||
|
||||
virtual void savingThreadFunction() = 0;
|
||||
@ -73,24 +76,36 @@ protected:
|
||||
bool is_shutdown = false;
|
||||
};
|
||||
|
||||
struct SystemLogQueueSettings
|
||||
{
|
||||
String database;
|
||||
String table;
|
||||
size_t reserved_size_rows;
|
||||
size_t max_size_rows;
|
||||
size_t buffer_size_rows_flush_threshold;
|
||||
size_t flush_interval_milliseconds;
|
||||
bool notify_flush_on_crash;
|
||||
bool turn_off_logger;
|
||||
};
|
||||
|
||||
template <typename LogElement>
|
||||
class SystemLogQueue
|
||||
{
|
||||
using Index = uint64_t;
|
||||
|
||||
public:
|
||||
SystemLogQueue(
|
||||
const String & table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
bool turn_off_logger_ = false);
|
||||
SystemLogQueue(const SystemLogQueueSettings & settings_);
|
||||
|
||||
void shutdown();
|
||||
|
||||
// producer methods
|
||||
void push(const LogElement & element);
|
||||
void push(LogElement && element);
|
||||
Index notifyFlush(bool should_prepare_tables_anyway);
|
||||
void waitFlush(Index expected_flushed_up_to);
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
void handleCrash();
|
||||
|
||||
// consumer methods
|
||||
Index pop(std::vector<LogElement>& output, bool & should_prepare_tables_anyway, bool & exit_this_thread);
|
||||
void confirm(Index to_flush_end);
|
||||
@ -120,7 +135,8 @@ private:
|
||||
bool is_shutdown = false;
|
||||
|
||||
std::condition_variable flush_event;
|
||||
const size_t flush_interval_milliseconds;
|
||||
|
||||
const SystemLogQueueSettings settings;
|
||||
};
|
||||
|
||||
|
||||
@ -131,8 +147,7 @@ public:
|
||||
using Self = SystemLogBase;
|
||||
|
||||
SystemLogBase(
|
||||
const String& table_name_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
const SystemLogQueueSettings & settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_ = nullptr);
|
||||
|
||||
void startup() override;
|
||||
@ -140,17 +155,25 @@ public:
|
||||
/** Append a record into log.
|
||||
* Writing to table will be done asynchronously and in case of failure, record could be lost.
|
||||
*/
|
||||
void add(const LogElement & element);
|
||||
void add(LogElement element);
|
||||
|
||||
/// Flush data in the buffer to disk. Block the thread until the data is stored on disk.
|
||||
void flush(bool force) override;
|
||||
|
||||
/// Handles crash, flushes log without blocking if notify_flush_on_crash is set
|
||||
void handleCrash() override;
|
||||
|
||||
/// Non-blocking flush data in the buffer to disk.
|
||||
void notifyFlush(bool force);
|
||||
|
||||
String getName() const override { return LogElement::name(); }
|
||||
|
||||
static const char * getDefaultOrderBy() { return "event_date, event_time"; }
|
||||
static consteval size_t getDefaultMaxSize() { return 1048576; }
|
||||
static consteval size_t getDefaultReservedSize() { return 8192; }
|
||||
static consteval size_t getDefaultFlushIntervalMilliseconds() { return 7500; }
|
||||
static consteval bool shouldNotifyFlushOnCrash() { return false; }
|
||||
static consteval bool shouldTurnOffLogger() { return false; }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue;
|
||||
|
@ -1440,7 +1440,7 @@ void ZooKeeper::logOperationIfNeeded(const ZooKeeperRequestPtr & request, const
|
||||
elem.thread_id = request->thread_id;
|
||||
elem.query_id = request->query_id;
|
||||
}
|
||||
maybe_zk_log->add(elem);
|
||||
maybe_zk_log->add(std::move(elem));
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -9,34 +9,35 @@ namespace DB
|
||||
{
|
||||
TEST(Common, ReverseDNS)
|
||||
{
|
||||
auto addresses = std::vector<std::string>({
|
||||
"8.8.8.8", "2001:4860:4860::8888", // dns.google
|
||||
"142.250.219.35", // google.com
|
||||
"157.240.12.35", // facebook
|
||||
"208.84.244.116", "2600:1419:c400::214:c410", //www.terra.com.br,
|
||||
"127.0.0.1", "::1"
|
||||
});
|
||||
|
||||
auto func = [&]()
|
||||
{
|
||||
// Good random seed, good engine
|
||||
auto rnd1 = std::mt19937(std::random_device{}());
|
||||
|
||||
for (int i = 0; i < 50; ++i)
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
auto & dns_resolver_instance = DNSResolver::instance();
|
||||
// unfortunately, DNS cache can't be disabled because we might end up causing a DDoS attack
|
||||
// dns_resolver_instance.setDisableCacheFlag();
|
||||
dns_resolver_instance.setDisableCacheFlag();
|
||||
|
||||
auto addr_index = rnd1() % addresses.size();
|
||||
auto val1 = rnd1() % static_cast<uint32_t>((pow(2, 31) - 1));
|
||||
auto val2 = rnd1() % static_cast<uint32_t>((pow(2, 31) - 1));
|
||||
auto val3 = rnd1() % static_cast<uint32_t>((pow(2, 31) - 1));
|
||||
auto val4 = rnd1() % static_cast<uint32_t>((pow(2, 31) - 1));
|
||||
|
||||
[[maybe_unused]] auto result = dns_resolver_instance.reverseResolve(Poco::Net::IPAddress{ addresses[addr_index] });
|
||||
uint32_t ipv4_buffer[1] = {
|
||||
static_cast<uint32_t>(val1)
|
||||
};
|
||||
|
||||
// will not assert either because some of the IP addresses might change in the future and
|
||||
// this test will become flaky
|
||||
// ASSERT_TRUE(!result.empty());
|
||||
uint32_t ipv6_buffer[4] = {
|
||||
static_cast<uint32_t>(val1),
|
||||
static_cast<uint32_t>(val2),
|
||||
static_cast<uint32_t>(val3),
|
||||
static_cast<uint32_t>(val4)
|
||||
};
|
||||
|
||||
dns_resolver_instance.reverseResolve(Poco::Net::IPAddress{ ipv4_buffer, sizeof(ipv4_buffer)});
|
||||
dns_resolver_instance.reverseResolve(Poco::Net::IPAddress{ ipv6_buffer, sizeof(ipv6_buffer)});
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
auto number_of_threads = 200u;
|
||||
|
@ -805,21 +805,10 @@ protected:
|
||||
const String & user_name,
|
||||
const String & password,
|
||||
Session & session,
|
||||
Messaging::MessageTransport & mt,
|
||||
const Poco::Net::SocketAddress & address)
|
||||
{
|
||||
try
|
||||
{
|
||||
session.authenticate(user_name, password, address);
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
mt.send(
|
||||
Messaging::ErrorOrNoticeResponse(Messaging::ErrorOrNoticeResponse::ERROR, "28P01", "Invalid user or password"),
|
||||
true);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void authenticate(
|
||||
@ -839,10 +828,10 @@ public:
|
||||
void authenticate(
|
||||
const String & user_name,
|
||||
Session & session,
|
||||
Messaging::MessageTransport & mt,
|
||||
[[maybe_unused]] Messaging::MessageTransport & mt,
|
||||
const Poco::Net::SocketAddress & address) override
|
||||
{
|
||||
return setPassword(user_name, "", session, mt, address);
|
||||
return setPassword(user_name, "", session, address);
|
||||
}
|
||||
|
||||
AuthenticationType getType() const override
|
||||
@ -866,7 +855,7 @@ public:
|
||||
if (type == Messaging::FrontMessageType::PASSWORD_MESSAGE)
|
||||
{
|
||||
std::unique_ptr<Messaging::PasswordMessage> password = mt.receive<Messaging::PasswordMessage>();
|
||||
return setPassword(user_name, password->password, session, mt, address);
|
||||
return setPassword(user_name, password->password, session, address);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::UNEXPECTED_PACKET_FROM_CLIENT,
|
||||
@ -901,7 +890,10 @@ public:
|
||||
Messaging::MessageTransport & mt,
|
||||
const Poco::Net::SocketAddress & address)
|
||||
{
|
||||
const AuthenticationType user_auth_type = session.getAuthenticationTypeOrLogInFailure(user_name);
|
||||
AuthenticationType user_auth_type;
|
||||
try
|
||||
{
|
||||
user_auth_type = session.getAuthenticationTypeOrLogInFailure(user_name);
|
||||
if (type_to_method.find(user_auth_type) != type_to_method.end())
|
||||
{
|
||||
type_to_method[user_auth_type]->authenticate(user_name, session, mt, address);
|
||||
@ -909,12 +901,19 @@ public:
|
||||
LOG_DEBUG(log, "Authentication for user {} was successful.", user_name);
|
||||
return;
|
||||
}
|
||||
|
||||
mt.send(
|
||||
Messaging::ErrorOrNoticeResponse(Messaging::ErrorOrNoticeResponse::ERROR, "0A000", "Authentication method is not supported"),
|
||||
}
|
||||
catch (const Exception&)
|
||||
{
|
||||
mt.send(Messaging::ErrorOrNoticeResponse(Messaging::ErrorOrNoticeResponse::ERROR, "28P01", "Invalid user or password"),
|
||||
true);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Authentication type {} is not supported.", user_auth_type);
|
||||
throw;
|
||||
}
|
||||
|
||||
mt.send(Messaging::ErrorOrNoticeResponse(Messaging::ErrorOrNoticeResponse::ERROR, "0A000", "Authentication method is not supported"),
|
||||
true);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Authentication method is not supported: {}", user_auth_type);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -466,6 +466,10 @@ private:
|
||||
if (collectCrashLog)
|
||||
collectCrashLog(sig, thread_num, query_id, stack_trace);
|
||||
|
||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
||||
Context::getGlobalContextInstance()->handleCrash();
|
||||
#endif
|
||||
|
||||
/// Send crash report to developers (if configured)
|
||||
if (sig != SanitizerTrap)
|
||||
{
|
||||
|
@ -147,7 +147,7 @@ void AsynchronousBoundedReadBuffer::appendToPrefetchLog(
|
||||
};
|
||||
|
||||
if (prefetches_log)
|
||||
prefetches_log->add(elem);
|
||||
prefetches_log->add(std::move(elem));
|
||||
}
|
||||
|
||||
|
||||
|
@ -108,7 +108,7 @@ void CachedOnDiskReadBufferFromFile::appendFilesystemCacheLog(
|
||||
break;
|
||||
}
|
||||
|
||||
cache_log->add(elem);
|
||||
cache_log->add(std::move(elem));
|
||||
}
|
||||
|
||||
void CachedOnDiskReadBufferFromFile::initialize(size_t offset, size_t size)
|
||||
|
@ -171,7 +171,7 @@ void FileSegmentRangeWriter::appendFilesystemCacheLog(const FileSegment & file_s
|
||||
.profile_counters = nullptr,
|
||||
};
|
||||
|
||||
cache_log->add(elem);
|
||||
cache_log->add(std::move(elem));
|
||||
}
|
||||
|
||||
void FileSegmentRangeWriter::completeFileSegment()
|
||||
|
@ -112,7 +112,7 @@ void ReadBufferFromRemoteFSGather::appendUncachedReadInfo()
|
||||
.file_segment_size = current_object.bytes_size,
|
||||
.read_from_cache_attempted = false,
|
||||
};
|
||||
cache_log->add(elem);
|
||||
cache_log->add(std::move(elem));
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result ReadBufferFromRemoteFSGather::readInto(char * data, size_t size, size_t offset, size_t ignore)
|
||||
|
@ -22,7 +22,14 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
class IMetadataStorage;
|
||||
struct UnlinkMetadataFileOperationOutcome;
|
||||
|
||||
/// Return the result of operation to the caller.
|
||||
/// It is used in `IDiskObjectStorageOperation::finalize` after metadata transaction executed to make decision on blob removal.
|
||||
struct UnlinkMetadataFileOperationOutcome
|
||||
{
|
||||
UInt32 num_hardlinks = std::numeric_limits<UInt32>::max();
|
||||
};
|
||||
|
||||
using UnlinkMetadataFileOperationOutcomePtr = std::shared_ptr<UnlinkMetadataFileOperationOutcome>;
|
||||
|
||||
/// Tries to provide some "transactions" interface, which allow
|
||||
|
@ -244,15 +244,6 @@ private:
|
||||
std::unique_ptr<WriteFileOperation> write_operation;
|
||||
};
|
||||
|
||||
/// Return the result of operation to the caller.
|
||||
/// It is used in `IDiskObjectStorageOperation::finalize` after metadata transaction executed to make decision on blob removal.
|
||||
struct UnlinkMetadataFileOperationOutcome
|
||||
{
|
||||
UInt32 num_hardlinks = std::numeric_limits<UInt32>::max();
|
||||
};
|
||||
|
||||
using UnlinkMetadataFileOperationOutcomePtr = std::shared_ptr<UnlinkMetadataFileOperationOutcome>;
|
||||
|
||||
struct UnlinkMetadataFileOperation final : public IMetadataOperation
|
||||
{
|
||||
const UnlinkMetadataFileOperationOutcomePtr outcome = std::make_shared<UnlinkMetadataFileOperationOutcome>();
|
||||
|
@ -1919,25 +1919,6 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A
|
||||
return executeAggregateAddition(arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
/// Special case - one or both arguments are IPv4
|
||||
if (isIPv4(arguments[0].type) || isIPv4(arguments[1].type))
|
||||
{
|
||||
ColumnsWithTypeAndName new_arguments {
|
||||
{
|
||||
isIPv4(arguments[0].type) ? castColumn(arguments[0], std::make_shared<DataTypeUInt32>()) : arguments[0].column,
|
||||
isIPv4(arguments[0].type) ? std::make_shared<DataTypeUInt32>() : arguments[0].type,
|
||||
arguments[0].name,
|
||||
},
|
||||
{
|
||||
isIPv4(arguments[1].type) ? castColumn(arguments[1], std::make_shared<DataTypeUInt32>()) : arguments[1].column,
|
||||
isIPv4(arguments[1].type) ? std::make_shared<DataTypeUInt32>() : arguments[1].type,
|
||||
arguments[1].name
|
||||
}
|
||||
};
|
||||
|
||||
return executeImpl(new_arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
/// Special case when the function is plus or minus, one of arguments is Date/DateTime and another is Interval.
|
||||
if (auto function_builder = getFunctionForIntervalArithmetic(arguments[0].type, arguments[1].type, context))
|
||||
{
|
||||
@ -1991,6 +1972,25 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A
|
||||
return wrapInNullable(res, arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
/// Special case - one or both arguments are IPv4
|
||||
if (isIPv4(arguments[0].type) || isIPv4(arguments[1].type))
|
||||
{
|
||||
ColumnsWithTypeAndName new_arguments {
|
||||
{
|
||||
isIPv4(arguments[0].type) ? castColumn(arguments[0], std::make_shared<DataTypeUInt32>()) : arguments[0].column,
|
||||
isIPv4(arguments[0].type) ? std::make_shared<DataTypeUInt32>() : arguments[0].type,
|
||||
arguments[0].name,
|
||||
},
|
||||
{
|
||||
isIPv4(arguments[1].type) ? castColumn(arguments[1], std::make_shared<DataTypeUInt32>()) : arguments[1].column,
|
||||
isIPv4(arguments[1].type) ? std::make_shared<DataTypeUInt32>() : arguments[1].type,
|
||||
arguments[1].name
|
||||
}
|
||||
};
|
||||
|
||||
return executeImpl2(new_arguments, result_type, input_rows_count, right_nullmap);
|
||||
}
|
||||
|
||||
const auto * const left_generic = left_argument.type.get();
|
||||
const auto * const right_generic = right_argument.type.get();
|
||||
ColumnPtr res;
|
||||
|
@ -438,7 +438,7 @@ try
|
||||
elem.flush_query_id = flush_query_id;
|
||||
elem.exception = flush_exception;
|
||||
elem.status = flush_exception.empty() ? Status::Ok : Status::FlushError;
|
||||
log.add(elem);
|
||||
log.add(std::move(elem));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -608,7 +608,7 @@ try
|
||||
if (!elem.exception.empty())
|
||||
{
|
||||
elem.status = AsynchronousInsertLogElement::ParsingError;
|
||||
insert_log->add(elem);
|
||||
insert_log->add(std::move(elem));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2903,16 +2903,6 @@ std::map<String, zkutil::ZooKeeperPtr> Context::getAuxiliaryZooKeepers() const
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
MergeTreeMetadataCachePtr Context::getMergeTreeMetadataCache() const
|
||||
{
|
||||
auto cache = tryGetMergeTreeMetadataCache();
|
||||
if (!cache)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Merge tree metadata cache is not initialized, please add config merge_tree_metadata_cache in config.xml and restart");
|
||||
return cache;
|
||||
}
|
||||
|
||||
MergeTreeMetadataCachePtr Context::tryGetMergeTreeMetadataCache() const
|
||||
{
|
||||
return shared->merge_tree_metadata_cache;
|
||||
@ -3210,6 +3200,12 @@ void Context::initializeMergeTreeMetadataCache(const String & dir, size_t size)
|
||||
}
|
||||
#endif
|
||||
|
||||
/// Call after unexpected crash happen.
|
||||
void Context::handleCrash() const
|
||||
{
|
||||
shared->system_logs->handleCrash();
|
||||
}
|
||||
|
||||
bool Context::hasTraceCollector() const
|
||||
{
|
||||
return shared->hasTraceCollector();
|
||||
|
@ -889,7 +889,6 @@ public:
|
||||
void setClientProtocolVersion(UInt64 version);
|
||||
|
||||
#if USE_ROCKSDB
|
||||
MergeTreeMetadataCachePtr getMergeTreeMetadataCache() const;
|
||||
MergeTreeMetadataCachePtr tryGetMergeTreeMetadataCache() const;
|
||||
#endif
|
||||
|
||||
@ -998,6 +997,9 @@ public:
|
||||
void initializeMergeTreeMetadataCache(const String & dir, size_t size);
|
||||
#endif
|
||||
|
||||
/// Call after unexpected crash happen.
|
||||
void handleCrash() const;
|
||||
|
||||
bool hasTraceCollector() const;
|
||||
|
||||
/// Nullptr if the query log is not ready for this moment.
|
||||
|
@ -83,9 +83,6 @@ void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, co
|
||||
stack_trace.toStringEveryLine([&trace_full](std::string_view line) { trace_full.push_back(line); });
|
||||
|
||||
CrashLogElement element{static_cast<time_t>(time / 1000000000), time, signal, thread_id, query_id, trace, trace_full};
|
||||
crash_log_owned->add(element);
|
||||
/// Notify savingThreadFunction to start flushing crash log
|
||||
/// Crash log is storing in parallel with the signal processing thread.
|
||||
crash_log_owned->notifyFlush(true);
|
||||
crash_log_owned->add(std::move(element));
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,11 @@ public:
|
||||
{
|
||||
crash_log = crash_log_;
|
||||
}
|
||||
|
||||
static consteval size_t getDefaultMaxSize() { return 1024; }
|
||||
static consteval size_t getDefaultReservedSize() { return 1024; }
|
||||
static consteval size_t getDefaultFlushIntervalMilliseconds() { return 1000; }
|
||||
static consteval size_t shouldNotifyFlushOnCrash() { return true; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ void MetricLog::metricThreadFunction()
|
||||
elem.current_metrics[i] = CurrentMetrics::values[i];
|
||||
}
|
||||
|
||||
this->add(elem);
|
||||
this->add(std::move(elem));
|
||||
|
||||
/// We will record current time into table but align it to regular time intervals to avoid time drift.
|
||||
/// We may drop some time points if the server is overloaded and recording took too much time.
|
||||
|
@ -242,7 +242,7 @@ bool PartLog::addNewParts(
|
||||
|
||||
elem.profile_counters = part_log_entry.profile_counters;
|
||||
|
||||
part_log->add(elem);
|
||||
part_log->add(std::move(elem));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
|
@ -73,12 +73,5 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insert(output_bytes);
|
||||
}
|
||||
|
||||
ProcessorsProfileLog::ProcessorsProfileLog(ContextPtr context_, const String & database_name_,
|
||||
const String & table_name_, const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_)
|
||||
: SystemLog<ProcessorProfileLogElement>(context_, database_name_, table_name_,
|
||||
storage_def_, flush_interval_milliseconds_)
|
||||
{
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -45,12 +45,7 @@ struct ProcessorProfileLogElement
|
||||
class ProcessorsProfileLog : public SystemLog<ProcessorProfileLogElement>
|
||||
{
|
||||
public:
|
||||
ProcessorsProfileLog(
|
||||
ContextPtr context_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_);
|
||||
using SystemLog<ProcessorProfileLogElement>::SystemLog;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ private:
|
||||
|
||||
if (session != sessions.end() && session->second->close_cycle <= current_cycle)
|
||||
{
|
||||
if (!session->second.unique())
|
||||
if (session->second.use_count() != 1)
|
||||
{
|
||||
LOG_TEST(log, "Delay closing session with session_id: {}, user_id: {}", key.second, key.first);
|
||||
|
||||
|
@ -227,7 +227,7 @@ void SessionLog::addLoginSuccess(const UUID & auth_id, std::optional<String> ses
|
||||
for (const auto & s : settings.allChanged())
|
||||
log_entry.settings.emplace_back(s.getName(), s.getValueString());
|
||||
|
||||
add(log_entry);
|
||||
add(std::move(log_entry));
|
||||
}
|
||||
|
||||
void SessionLog::addLoginFailure(
|
||||
@ -243,7 +243,7 @@ void SessionLog::addLoginFailure(
|
||||
log_entry.client_info = info;
|
||||
log_entry.user_identified_with = AuthenticationType::NO_PASSWORD;
|
||||
|
||||
add(log_entry);
|
||||
add(std::move(log_entry));
|
||||
}
|
||||
|
||||
void SessionLog::addLogOut(const UUID & auth_id, const UserPtr & login_user, const ClientInfo & client_info)
|
||||
@ -257,7 +257,7 @@ void SessionLog::addLogOut(const UUID & auth_id, const UserPtr & login_user, con
|
||||
log_entry.external_auth_server = login_user ? login_user->auth_data.getLDAPServerName() : "";
|
||||
log_entry.client_info = client_info;
|
||||
|
||||
add(log_entry);
|
||||
add(std::move(log_entry));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -101,7 +101,6 @@ namespace
|
||||
namespace
|
||||
{
|
||||
|
||||
constexpr size_t DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS = 7500;
|
||||
constexpr size_t DEFAULT_METRIC_LOG_COLLECT_INTERVAL_MILLISECONDS = 1000;
|
||||
|
||||
/// Creates a system log with MergeTree engine using parameters from config
|
||||
@ -124,18 +123,23 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
LOG_DEBUG(&Poco::Logger::get("SystemLog"),
|
||||
"Creating {}.{} from {}", default_database_name, default_table_name, config_prefix);
|
||||
|
||||
String database = config.getString(config_prefix + ".database", default_database_name);
|
||||
String table = config.getString(config_prefix + ".table", default_table_name);
|
||||
SystemLogSettings log_settings;
|
||||
log_settings.queue_settings.database = config.getString(config_prefix + ".database", default_database_name);
|
||||
log_settings.queue_settings.table = config.getString(config_prefix + ".table", default_table_name);
|
||||
|
||||
if (database != default_database_name)
|
||||
if (log_settings.queue_settings.database != default_database_name)
|
||||
{
|
||||
/// System tables must be loaded before other tables, but loading order is undefined for all databases except `system`
|
||||
LOG_ERROR(&Poco::Logger::get("SystemLog"), "Custom database name for a system table specified in config."
|
||||
" Table `{}` will be created in `system` database instead of `{}`", table, database);
|
||||
database = default_database_name;
|
||||
LOG_ERROR(
|
||||
&Poco::Logger::get("SystemLog"),
|
||||
"Custom database name for a system table specified in config."
|
||||
" Table `{}` will be created in `system` database instead of `{}`",
|
||||
log_settings.queue_settings.table,
|
||||
log_settings.queue_settings.database);
|
||||
|
||||
log_settings.queue_settings.database = default_database_name;
|
||||
}
|
||||
|
||||
String engine;
|
||||
if (config.has(config_prefix + ".engine"))
|
||||
{
|
||||
if (config.has(config_prefix + ".partition_by"))
|
||||
@ -159,26 +163,26 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
"If 'engine' is specified for system table, SETTINGS parameters should "
|
||||
"be specified directly inside 'engine' and 'settings' setting doesn't make sense");
|
||||
|
||||
engine = config.getString(config_prefix + ".engine");
|
||||
log_settings.engine = config.getString(config_prefix + ".engine");
|
||||
}
|
||||
else
|
||||
{
|
||||
/// ENGINE expr is necessary.
|
||||
engine = "ENGINE = MergeTree";
|
||||
log_settings.engine = "ENGINE = MergeTree";
|
||||
|
||||
/// PARTITION expr is not necessary.
|
||||
String partition_by = config.getString(config_prefix + ".partition_by", "toYYYYMM(event_date)");
|
||||
if (!partition_by.empty())
|
||||
engine += " PARTITION BY (" + partition_by + ")";
|
||||
log_settings.engine += " PARTITION BY (" + partition_by + ")";
|
||||
|
||||
/// TTL expr is not necessary.
|
||||
String ttl = config.getString(config_prefix + ".ttl", "");
|
||||
if (!ttl.empty())
|
||||
engine += " TTL " + ttl;
|
||||
log_settings.engine += " TTL " + ttl;
|
||||
|
||||
/// ORDER BY expr is necessary.
|
||||
String order_by = config.getString(config_prefix + ".order_by", TSystemLog::getDefaultOrderBy());
|
||||
engine += " ORDER BY (" + order_by + ")";
|
||||
log_settings.engine += " ORDER BY (" + order_by + ")";
|
||||
|
||||
/// SETTINGS expr is not necessary.
|
||||
/// https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#settings
|
||||
@ -188,24 +192,52 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
String settings = config.getString(config_prefix + ".settings", "");
|
||||
if (!storage_policy.empty() || !settings.empty())
|
||||
{
|
||||
engine += " SETTINGS";
|
||||
log_settings.engine += " SETTINGS";
|
||||
/// If 'storage_policy' is repeated, the 'settings' configuration is preferred.
|
||||
if (!storage_policy.empty())
|
||||
engine += " storage_policy = " + quoteString(storage_policy);
|
||||
log_settings.engine += " storage_policy = " + quoteString(storage_policy);
|
||||
if (!settings.empty())
|
||||
engine += (storage_policy.empty() ? " " : ", ") + settings;
|
||||
log_settings.engine += (storage_policy.empty() ? " " : ", ") + settings;
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate engine definition syntax to prevent some configuration errors.
|
||||
ParserStorageWithComment storage_parser;
|
||||
parseQuery(storage_parser, engine.data(), engine.data() + engine.size(),
|
||||
parseQuery(storage_parser, log_settings.engine.data(), log_settings.engine.data() + log_settings.engine.size(),
|
||||
"Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
|
||||
size_t flush_interval_milliseconds = config.getUInt64(config_prefix + ".flush_interval_milliseconds",
|
||||
DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS);
|
||||
log_settings.queue_settings.flush_interval_milliseconds = config.getUInt64(config_prefix + ".flush_interval_milliseconds",
|
||||
TSystemLog::getDefaultFlushIntervalMilliseconds());
|
||||
|
||||
return std::make_shared<TSystemLog>(context, database, table, engine, flush_interval_milliseconds);
|
||||
log_settings.queue_settings.max_size_rows = config.getUInt64(config_prefix + ".max_size_rows",
|
||||
TSystemLog::getDefaultMaxSize());
|
||||
|
||||
if (log_settings.queue_settings.max_size_rows < 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{0}.max_size_rows {1} should be 1 at least",
|
||||
config_prefix,
|
||||
log_settings.queue_settings.max_size_rows);
|
||||
|
||||
log_settings.queue_settings.reserved_size_rows = config.getUInt64(config_prefix + ".reserved_size_rows",
|
||||
TSystemLog::getDefaultReservedSize());
|
||||
|
||||
if (log_settings.queue_settings.max_size_rows < log_settings.queue_settings.reserved_size_rows)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"{0}.max_size_rows {1} should be greater or equal to {0}.reserved_size_rows {2}",
|
||||
config_prefix,
|
||||
log_settings.queue_settings.max_size_rows,
|
||||
log_settings.queue_settings.reserved_size_rows);
|
||||
}
|
||||
|
||||
log_settings.queue_settings.buffer_size_rows_flush_threshold = config.getUInt64(config_prefix + ".buffer_size_rows_flush_threshold",
|
||||
log_settings.queue_settings.max_size_rows / 2);
|
||||
|
||||
log_settings.queue_settings.notify_flush_on_crash = config.getBool(config_prefix + ".flush_on_crash",
|
||||
TSystemLog::shouldNotifyFlushOnCrash());
|
||||
|
||||
log_settings.queue_settings.turn_off_logger = TSystemLog::shouldTurnOffLogger();
|
||||
|
||||
return std::make_shared<TSystemLog>(context, log_settings);
|
||||
}
|
||||
|
||||
|
||||
@ -325,23 +357,25 @@ void SystemLogs::shutdown()
|
||||
log->shutdown();
|
||||
}
|
||||
|
||||
void SystemLogs::handleCrash()
|
||||
{
|
||||
for (auto & log : logs)
|
||||
log->handleCrash();
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
SystemLog<LogElement>::SystemLog(
|
||||
ContextPtr context_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
const SystemLogSettings & settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_)
|
||||
: Base(database_name_ + "." + table_name_, flush_interval_milliseconds_, queue_)
|
||||
: Base(settings_.queue_settings, queue_)
|
||||
, WithContext(context_)
|
||||
, log(&Poco::Logger::get("SystemLog (" + database_name_ + "." + table_name_ + ")"))
|
||||
, table_id(database_name_, table_name_)
|
||||
, storage_def(storage_def_)
|
||||
, log(&Poco::Logger::get("SystemLog (" + settings_.queue_settings.database + "." + settings_.queue_settings.table + ")"))
|
||||
, table_id(settings_.queue_settings.database, settings_.queue_settings.table)
|
||||
, storage_def(settings_.engine)
|
||||
, create_query(serializeAST(*getCreateTableQuery()))
|
||||
{
|
||||
assert(database_name_ == DatabaseCatalog::SYSTEM_DATABASE);
|
||||
assert(settings_.queue_settings.database == DatabaseCatalog::SYSTEM_DATABASE);
|
||||
}
|
||||
|
||||
template <typename LogElement>
|
||||
|
@ -58,6 +58,7 @@ struct SystemLogs
|
||||
~SystemLogs();
|
||||
|
||||
void shutdown();
|
||||
void handleCrash();
|
||||
|
||||
std::shared_ptr<QueryLog> query_log; /// Used to log queries.
|
||||
std::shared_ptr<QueryThreadLog> query_thread_log; /// Used to log query threads.
|
||||
@ -87,6 +88,12 @@ struct SystemLogs
|
||||
std::vector<ISystemLog *> logs;
|
||||
};
|
||||
|
||||
struct SystemLogSettings
|
||||
{
|
||||
SystemLogQueueSettings queue_settings;
|
||||
|
||||
String engine;
|
||||
};
|
||||
|
||||
template <typename LogElement>
|
||||
class SystemLog : public SystemLogBase<LogElement>, private boost::noncopyable, WithContext
|
||||
@ -103,12 +110,8 @@ public:
|
||||
* where N - is a minimal number from 1, for that table with corresponding name doesn't exist yet;
|
||||
* and new table get created - as if previous table was not exist.
|
||||
*/
|
||||
SystemLog(
|
||||
ContextPtr context_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_,
|
||||
SystemLog(ContextPtr context_,
|
||||
const SystemLogSettings& settings_,
|
||||
std::shared_ptr<SystemLogQueue<LogElement>> queue_ = nullptr);
|
||||
|
||||
/** Append a record into log.
|
||||
|
@ -80,15 +80,10 @@ void TextLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insert(message_format_string);
|
||||
}
|
||||
|
||||
TextLog::TextLog(ContextPtr context_, const String & database_name_,
|
||||
const String & table_name_, const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_)
|
||||
: SystemLog<TextLogElement>(context_, database_name_, table_name_,
|
||||
storage_def_, flush_interval_milliseconds_, getLogQueue(flush_interval_milliseconds_))
|
||||
TextLog::TextLog(ContextPtr context_,
|
||||
const SystemLogSettings & settings)
|
||||
: SystemLog<TextLogElement>(context_, settings, getLogQueue(settings.queue_settings))
|
||||
{
|
||||
// SystemLog methods may write text logs, so we disable logging for the text
|
||||
// log table to avoid recursion.
|
||||
log->setLevel(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -42,18 +42,15 @@ class TextLog : public SystemLog<TextLogElement>
|
||||
public:
|
||||
using Queue = SystemLogQueue<TextLogElement>;
|
||||
|
||||
TextLog(
|
||||
ContextPtr context_,
|
||||
const String & database_name_,
|
||||
const String & table_name_,
|
||||
const String & storage_def_,
|
||||
size_t flush_interval_milliseconds_);
|
||||
explicit TextLog(ContextPtr context_, const SystemLogSettings & settings);
|
||||
|
||||
static std::shared_ptr<Queue> getLogQueue(size_t flush_interval_milliseconds)
|
||||
static std::shared_ptr<Queue> getLogQueue(const SystemLogQueueSettings & settings)
|
||||
{
|
||||
static std::shared_ptr<Queue> queue = std::make_shared<Queue>("text_log", flush_interval_milliseconds, true);
|
||||
static std::shared_ptr<Queue> queue = std::make_shared<Queue>(settings);
|
||||
return queue;
|
||||
}
|
||||
|
||||
static consteval bool shouldTurnOffLogger() { return true; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ void ThreadStatus::logToQueryThreadLog(QueryThreadLog & thread_log, const String
|
||||
}
|
||||
}
|
||||
|
||||
thread_log.add(elem);
|
||||
thread_log.add(std::move(elem));
|
||||
}
|
||||
|
||||
static String getCleanQueryAst(const ASTPtr q, ContextPtr context)
|
||||
@ -573,7 +573,7 @@ void ThreadStatus::logToQueryViewsLog(const ViewRuntimeData & vinfo)
|
||||
element.stack_trace = getExceptionStackTraceString(vinfo.exception);
|
||||
}
|
||||
|
||||
views_log->add(element);
|
||||
views_log->add(std::move(element));
|
||||
}
|
||||
|
||||
void CurrentThread::attachToGroup(const ThreadGroupPtr & thread_group)
|
||||
|
@ -128,7 +128,7 @@ void TraceCollector::run()
|
||||
UInt64 time = static_cast<UInt64>(ts.tv_sec * 1000000000LL + ts.tv_nsec);
|
||||
UInt64 time_in_microseconds = static_cast<UInt64>((ts.tv_sec * 1000000LL) + (ts.tv_nsec / 1000));
|
||||
TraceLogElement element{time_t(time / 1000000000), time_in_microseconds, time, trace_type, thread_id, query_id, trace, size, event, increment};
|
||||
trace_log->add(element);
|
||||
trace_log->add(std::move(element));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ try
|
||||
elem.tid = tid;
|
||||
elem.csn = csn;
|
||||
elem.fillCommonFields(nullptr);
|
||||
system_log->add(elem);
|
||||
system_log->add(std::move(elem));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -101,7 +101,7 @@ try
|
||||
elem.type = type;
|
||||
elem.tid = tid;
|
||||
elem.fillCommonFields(&context);
|
||||
system_log->add(elem);
|
||||
system_log->add(std::move(elem));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -21,6 +21,12 @@ namespace fs = std::filesystem;
|
||||
namespace DB
|
||||
{
|
||||
class SensitiveDataMasker;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -44,10 +50,6 @@ static std::string renderFileNameTemplate(time_t now, const std::string & file_p
|
||||
return path.replace_filename(ss.str());
|
||||
}
|
||||
|
||||
#ifndef WITHOUT_TEXT_LOG
|
||||
constexpr size_t DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS = 7500;
|
||||
#endif
|
||||
|
||||
void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger /*_root*/, const std::string & cmd_name)
|
||||
{
|
||||
auto current_logger = config.getString("logger", "");
|
||||
@ -271,9 +273,37 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
{
|
||||
String text_log_level_str = config.getString("text_log.level", "trace");
|
||||
int text_log_level = Poco::Logger::parseLevel(text_log_level_str);
|
||||
size_t flush_interval_milliseconds = config.getUInt64("text_log.flush_interval_milliseconds",
|
||||
DEFAULT_SYSTEM_LOG_FLUSH_INTERVAL_MILLISECONDS);
|
||||
split->addTextLog(DB::TextLog::getLogQueue(flush_interval_milliseconds), text_log_level);
|
||||
|
||||
DB::SystemLogQueueSettings log_settings;
|
||||
log_settings.flush_interval_milliseconds = config.getUInt64("text_log.flush_interval_milliseconds",
|
||||
DB::TextLog::getDefaultFlushIntervalMilliseconds());
|
||||
|
||||
log_settings.max_size_rows = config.getUInt64("text_log.max_size_rows",
|
||||
DB::TextLog::getDefaultMaxSize());
|
||||
|
||||
if (log_settings.max_size_rows< 1)
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "text_log.max_size_rows {} should be 1 at least",
|
||||
log_settings.max_size_rows);
|
||||
|
||||
log_settings.reserved_size_rows = config.getUInt64("text_log.reserved_size_rows", DB::TextLog::getDefaultReservedSize());
|
||||
|
||||
if (log_settings.max_size_rows < log_settings.reserved_size_rows)
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS,
|
||||
"text_log.max_size {0} should be greater or equal to text_log.reserved_size_rows {1}",
|
||||
log_settings.max_size_rows,
|
||||
log_settings.reserved_size_rows);
|
||||
}
|
||||
|
||||
log_settings.buffer_size_rows_flush_threshold = config.getUInt64("text_log.buffer_size_rows_flush_threshold",
|
||||
log_settings.max_size_rows / 2);
|
||||
|
||||
log_settings.notify_flush_on_crash = config.getBool("text_log.flush_on_crash",
|
||||
DB::TextLog::shouldNotifyFlushOnCrash());
|
||||
|
||||
log_settings.turn_off_logger = DB::TextLog::shouldTurnOffLogger();
|
||||
|
||||
split->addTextLog(DB::TextLog::getLogQueue(log_settings), text_log_level);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
||||
std::shared_ptr<SystemLogQueue<TextLogElement>> text_log_locked{};
|
||||
text_log_locked = text_log.lock();
|
||||
if (text_log_locked)
|
||||
text_log_locked->push(elem);
|
||||
text_log_locked->push(std::move(elem));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1674,8 +1674,8 @@ std::pair<bool, NameSet> IMergeTreeDataPart::canRemovePart() const
|
||||
void IMergeTreeDataPart::initializePartMetadataManager()
|
||||
{
|
||||
#if USE_ROCKSDB
|
||||
if (use_metadata_cache)
|
||||
metadata_manager = std::make_shared<PartMetadataManagerWithCache>(this, storage.getContext()->getMergeTreeMetadataCache());
|
||||
if (auto metadata_cache = storage.getContext()->tryGetMergeTreeMetadataCache(); metadata_cache && use_metadata_cache)
|
||||
metadata_manager = std::make_shared<PartMetadataManagerWithCache>(this, metadata_cache);
|
||||
else
|
||||
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||
#else
|
||||
|
@ -7775,7 +7775,7 @@ try
|
||||
LOG_WARNING(log, "Profile counters are not set");
|
||||
}
|
||||
|
||||
part_log->add(part_log_elem);
|
||||
part_log->add(std::move(part_log_elem));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -1,145 +0,0 @@
|
||||
#include <Storages/System/StorageSystemMergeTreeMetadataCache.h>
|
||||
|
||||
#if USE_ROCKSDB
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Storages/MergeTree/KeyCondition.h>
|
||||
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
NamesAndTypesList StorageSystemMergeTreeMetadataCache::getNamesAndTypes()
|
||||
{
|
||||
return {
|
||||
{"key", std::make_shared<DataTypeString>()},
|
||||
{"value", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
static bool extractKeyImpl(const IAST & elem, String & res, bool & precise)
|
||||
{
|
||||
const auto * function = elem.as<ASTFunction>();
|
||||
if (!function)
|
||||
return false;
|
||||
|
||||
if (function->name == "and")
|
||||
{
|
||||
for (const auto & child : function->arguments->children)
|
||||
{
|
||||
bool tmp_precise = false;
|
||||
if (extractKeyImpl(*child, res, tmp_precise))
|
||||
{
|
||||
precise = tmp_precise;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (function->name == "equals" || function->name == "like")
|
||||
{
|
||||
const auto & args = function->arguments->as<ASTExpressionList &>();
|
||||
const IAST * value;
|
||||
|
||||
if (args.children.size() != 2)
|
||||
return false;
|
||||
|
||||
const ASTIdentifier * ident;
|
||||
if ((ident = args.children.at(0)->as<ASTIdentifier>()))
|
||||
value = args.children.at(1).get();
|
||||
else if ((ident = args.children.at(1)->as<ASTIdentifier>()))
|
||||
value = args.children.at(0).get();
|
||||
else
|
||||
return false;
|
||||
|
||||
if (ident->name() != "key")
|
||||
return false;
|
||||
|
||||
const auto * literal = value->as<ASTLiteral>();
|
||||
if (!literal)
|
||||
return false;
|
||||
|
||||
if (literal->value.getType() != Field::Types::String)
|
||||
return false;
|
||||
|
||||
res = literal->value.safeGet<String>();
|
||||
precise = function->name == "equals";
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/// Retrieve from the query a condition of the form `key= 'key'`, from conjunctions in the WHERE clause.
|
||||
static String extractKey(const ASTPtr & query, bool& precise)
|
||||
{
|
||||
const auto & select = query->as<ASTSelectQuery &>();
|
||||
if (!select.where())
|
||||
return "";
|
||||
|
||||
String res;
|
||||
return extractKeyImpl(*select.where(), res, precise) ? res : "";
|
||||
}
|
||||
|
||||
|
||||
void StorageSystemMergeTreeMetadataCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const
|
||||
{
|
||||
bool precise = false;
|
||||
String key = extractKey(query_info.query, precise);
|
||||
if (key.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"SELECT from system.merge_tree_metadata_cache table must contain condition like key = 'key' "
|
||||
"or key LIKE 'prefix%' in WHERE clause.");
|
||||
|
||||
auto cache = context->getMergeTreeMetadataCache();
|
||||
if (precise)
|
||||
{
|
||||
String value;
|
||||
if (cache->get(key, value) != MergeTreeMetadataCache::Status::OK())
|
||||
return;
|
||||
|
||||
size_t col_num = 0;
|
||||
res_columns[col_num++]->insert(key);
|
||||
res_columns[col_num++]->insert(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
String target = extractFixedPrefixFromLikePattern(key, /*requires_perfect_prefix*/ false);
|
||||
if (target.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"SELECT from system.merge_tree_metadata_cache table must contain condition like key = 'key' "
|
||||
"or key LIKE 'prefix%' in WHERE clause.");
|
||||
|
||||
Strings keys;
|
||||
Strings values;
|
||||
keys.reserve(4096);
|
||||
values.reserve(4096);
|
||||
cache->getByPrefix(target, keys, values);
|
||||
if (keys.empty())
|
||||
return;
|
||||
|
||||
assert(keys.size() == values.size());
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
size_t col_num = 0;
|
||||
res_columns[col_num++]->insert(keys[i]);
|
||||
res_columns[col_num++]->insert(values[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
@ -1,29 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_ROCKSDB
|
||||
#include <Storages/System/IStorageSystemOneBlock.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Context;
|
||||
|
||||
|
||||
/// Implements `merge_tree_metadata_cache` system table, which allows you to view the metadata cache data in rocksdb for testing purposes.
|
||||
class StorageSystemMergeTreeMetadataCache : public IStorageSystemOneBlock<StorageSystemMergeTreeMetadataCache>
|
||||
{
|
||||
public:
|
||||
std::string getName() const override { return "SystemMergeTreeMetadataCache"; }
|
||||
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
|
||||
protected:
|
||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||
|
||||
void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const override;
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
@ -90,7 +90,6 @@
|
||||
|
||||
#if USE_ROCKSDB
|
||||
#include <Storages/RocksDB/StorageSystemRocksDB.h>
|
||||
#include <Storages/System/StorageSystemMergeTreeMetadataCache.h>
|
||||
#endif
|
||||
|
||||
|
||||
@ -150,7 +149,6 @@ void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database)
|
||||
#endif
|
||||
#if USE_ROCKSDB
|
||||
attach<StorageSystemRocksDB>(context, system_database, "rocksdb");
|
||||
attach<StorageSystemMergeTreeMetadataCache>(context, system_database, "merge_tree_metadata_cache");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
<users>
|
||||
<session_log_test_xml_user>
|
||||
<password></password>
|
||||
<no_password></no_password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
@ -74,5 +74,12 @@
|
||||
|
||||
"test_http_failover/test.py::test_url_destination_host_with_multiple_addrs",
|
||||
"test_http_failover/test.py::test_url_invalid_hostname",
|
||||
"test_http_failover/test.py::test_url_ip_change"
|
||||
"test_http_failover/test.py::test_url_ip_change",
|
||||
|
||||
"test_system_logs/test_system_logs.py::test_max_size_0",
|
||||
"test_system_logs/test_system_logs.py::test_reserved_size_greater_max_size",
|
||||
"test_system_flush_logs/test.py::test_log_buffer_size_rows_flush_threshold",
|
||||
"test_system_flush_logs/test.py::test_log_max_size",
|
||||
"test_crash_log/test.py::test_pkill_query_log",
|
||||
"test_crash_log/test.py::test_pkill"
|
||||
]
|
||||
|
16
tests/integration/test_crash_log/configs/crash_log.xml
Normal file
16
tests/integration/test_crash_log/configs/crash_log.xml
Normal file
@ -0,0 +1,16 @@
|
||||
<clickhouse>
|
||||
<crash_log replace="replace">
|
||||
<flush_interval_milliseconds>1000000</flush_interval_milliseconds>
|
||||
<max_size_rows>1</max_size_rows>
|
||||
<reserved_size_rows>1</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>1</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</crash_log>
|
||||
<query_log replace="replace">
|
||||
<flush_interval_milliseconds>1000000</flush_interval_milliseconds>
|
||||
<max_size_rows>100</max_size_rows>
|
||||
<reserved_size_rows>100</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>100</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>true</flush_on_crash>
|
||||
</query_log>
|
||||
</clickhouse>
|
@ -12,7 +12,9 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
def started_node():
|
||||
cluster = helpers.cluster.ClickHouseCluster(__file__)
|
||||
try:
|
||||
node = cluster.add_instance("node", stay_alive=True)
|
||||
node = cluster.add_instance(
|
||||
"node", main_configs=["configs/crash_log.xml"], stay_alive=True
|
||||
)
|
||||
|
||||
cluster.start()
|
||||
yield node
|
||||
@ -55,3 +57,18 @@ def test_pkill(started_node):
|
||||
started_node.query("SELECT COUNT(*) FROM system.crash_log")
|
||||
== f"{crashes_count}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_pkill_query_log(started_node):
|
||||
for signal in ["SEGV", "4"]:
|
||||
# force create query_log if it was not created
|
||||
started_node.query("SYSTEM FLUSH LOGS")
|
||||
started_node.query("TRUNCATE TABLE IF EXISTS system.query_log")
|
||||
started_node.query("SELECT COUNT(*) FROM system.query_log")
|
||||
# logs don't flush
|
||||
assert started_node.query("SELECT COUNT(*) FROM system.query_log") == f"{0}\n"
|
||||
|
||||
send_signal(started_node, signal)
|
||||
wait_for_clickhouse_stop(started_node)
|
||||
started_node.restart_clickhouse()
|
||||
assert started_node.query("SELECT COUNT(*) FROM system.query_log") >= f"3\n"
|
||||
|
@ -14,8 +14,4 @@
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
<merge_tree_metadata_cache>
|
||||
<lru_cache_size>268435456</lru_cache_size>
|
||||
<continue_if_corrupted>true</continue_if_corrupted>
|
||||
</merge_tree_metadata_cache>
|
||||
</clickhouse>
|
||||
|
@ -889,15 +889,12 @@ def get_paths_for_partition_from_part_log(node, table, partition_id):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"name,engine,use_metadata_cache",
|
||||
"name,engine",
|
||||
[
|
||||
pytest.param("altering_mt", "MergeTree()", "false", id="mt"),
|
||||
pytest.param("altering_mt", "MergeTree()", "true", id="mt_use_metadata_cache"),
|
||||
# ("altering_replicated_mt","ReplicatedMergeTree('/clickhouse/altering_replicated_mt', '1')",),
|
||||
# SYSTEM STOP MERGES doesn't disable merges assignments
|
||||
pytest.param("altering_mt", "MergeTree()", id="mt"),
|
||||
],
|
||||
)
|
||||
def test_alter_move(start_cluster, name, engine, use_metadata_cache):
|
||||
def test_alter_move(start_cluster, name, engine):
|
||||
try:
|
||||
node1.query(
|
||||
"""
|
||||
@ -907,9 +904,9 @@ def test_alter_move(start_cluster, name, engine, use_metadata_cache):
|
||||
) ENGINE = {engine}
|
||||
ORDER BY tuple()
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
SETTINGS storage_policy='jbods_with_external', use_metadata_cache={use_metadata_cache}
|
||||
SETTINGS storage_policy='jbods_with_external'
|
||||
""".format(
|
||||
name=name, engine=engine, use_metadata_cache=use_metadata_cache
|
||||
name=name, engine=engine
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -843,24 +843,7 @@ def test_kafka_formats(kafka_cluster):
|
||||
extra_settings=format_opts.get("extra_settings") or "",
|
||||
)
|
||||
)
|
||||
|
||||
instance.wait_for_log_line(
|
||||
"kafka.*Committed offset [0-9]+.*format_tests_",
|
||||
repetitions=len(all_formats.keys()),
|
||||
look_behind_lines=12000,
|
||||
)
|
||||
|
||||
for format_name, format_opts in list(all_formats.items()):
|
||||
logging.debug(("Checking {}".format(format_name)))
|
||||
topic_name = f"format_tests_{format_name}"
|
||||
# shift offsets by 1 if format supports empty value
|
||||
offsets = (
|
||||
[1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2]
|
||||
)
|
||||
result = instance.query(
|
||||
"SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name)
|
||||
)
|
||||
expected = """\
|
||||
raw_expected = """\
|
||||
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
|
||||
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
@ -878,7 +861,27 @@ def test_kafka_formats(kafka_cluster):
|
||||
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
|
||||
""".format(
|
||||
"""
|
||||
|
||||
expected_rows_count = raw_expected.count("\n")
|
||||
instance.query_with_retry(
|
||||
f"SELECT * FROM test.kafka_{list(all_formats.keys())[-1]}_mv;",
|
||||
retry_count=30,
|
||||
sleep_time=1,
|
||||
check_callback=lambda res: res.count("\n") == expected_rows_count,
|
||||
)
|
||||
|
||||
for format_name, format_opts in list(all_formats.items()):
|
||||
logging.debug(("Checking {}".format(format_name)))
|
||||
topic_name = f"format_tests_{format_name}"
|
||||
# shift offsets by 1 if format supports empty value
|
||||
offsets = (
|
||||
[1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2]
|
||||
)
|
||||
result = instance.query(
|
||||
"SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name)
|
||||
)
|
||||
expected = raw_expected.format(
|
||||
topic_name=topic_name,
|
||||
offset_0=offsets[0],
|
||||
offset_1=offsets[1],
|
||||
@ -3755,19 +3758,7 @@ def test_kafka_formats_with_broken_message(kafka_cluster):
|
||||
)
|
||||
)
|
||||
|
||||
for format_name, format_opts in list(all_formats.items()):
|
||||
logging.debug("Checking {format_name}")
|
||||
topic_name = f"{topic_name_prefix}{format_name}"
|
||||
# shift offsets by 1 if format supports empty value
|
||||
offsets = (
|
||||
[1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2]
|
||||
)
|
||||
result = instance.query(
|
||||
"SELECT * FROM test.kafka_data_{format_name}_mv;".format(
|
||||
format_name=format_name
|
||||
)
|
||||
)
|
||||
expected = """\
|
||||
raw_expected = """\
|
||||
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
|
||||
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
@ -3785,7 +3776,29 @@ def test_kafka_formats_with_broken_message(kafka_cluster):
|
||||
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
|
||||
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
|
||||
""".format(
|
||||
"""
|
||||
|
||||
expected_rows_count = raw_expected.count("\n")
|
||||
instance.query_with_retry(
|
||||
f"SELECT * FROM test.kafka_data_{list(all_formats.keys())[-1]}_mv;",
|
||||
retry_count=30,
|
||||
sleep_time=1,
|
||||
check_callback=lambda res: res.count("\n") == expected_rows_count,
|
||||
)
|
||||
|
||||
for format_name, format_opts in list(all_formats.items()):
|
||||
logging.debug(f"Checking {format_name}")
|
||||
topic_name = f"{topic_name_prefix}{format_name}"
|
||||
# shift offsets by 1 if format supports empty value
|
||||
offsets = (
|
||||
[1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2]
|
||||
)
|
||||
result = instance.query(
|
||||
"SELECT * FROM test.kafka_data_{format_name}_mv;".format(
|
||||
format_name=format_name
|
||||
)
|
||||
)
|
||||
expected = raw_expected.format(
|
||||
topic_name=topic_name,
|
||||
offset_0=offsets[0],
|
||||
offset_1=offsets[1],
|
||||
|
@ -2,11 +2,16 @@
|
||||
# pylint: disable=unused-argument
|
||||
# pylint: disable=redefined-outer-name
|
||||
|
||||
import time
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance("node_default")
|
||||
node = cluster.add_instance(
|
||||
"node_default",
|
||||
stay_alive=True,
|
||||
)
|
||||
|
||||
system_logs = [
|
||||
# disabled by default
|
||||
@ -64,3 +69,95 @@ def test_system_suspend():
|
||||
node.query("SYSTEM SUSPEND FOR 1 SECOND;")
|
||||
node.query("INSERT INTO t VALUES (now());")
|
||||
assert "1\n" == node.query("SELECT max(x) - min(x) >= 1 FROM t;")
|
||||
|
||||
|
||||
def test_log_max_size(start_cluster):
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"""echo "
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<flush_interval_milliseconds replace=\\"replace\\">1000000</flush_interval_milliseconds>
|
||||
<max_size_rows replace=\\"replace\\">10</max_size_rows>
|
||||
<reserved_size_rows replace=\\"replace\\">10</reserved_size_rows>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||
""",
|
||||
]
|
||||
)
|
||||
node.restart_clickhouse()
|
||||
for i in range(10):
|
||||
node.query(f"select {i}")
|
||||
|
||||
assert node.query("select count() >= 10 from system.query_log") == "1\n"
|
||||
node.exec_in_container(
|
||||
["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"]
|
||||
)
|
||||
|
||||
|
||||
def test_log_buffer_size_rows_flush_threshold(start_cluster):
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"""echo "
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<flush_interval_milliseconds replace=\\"replace\\">1000000</flush_interval_milliseconds>
|
||||
<buffer_size_rows_flush_threshold replace=\\"replace\\">10</buffer_size_rows_flush_threshold>
|
||||
<max_size_rows replace=\\"replace\\">10000</max_size_rows>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||
""",
|
||||
]
|
||||
)
|
||||
node.restart_clickhouse()
|
||||
node.query(f"TRUNCATE TABLE IF EXISTS system.query_log")
|
||||
for i in range(10):
|
||||
node.query(f"select {i}")
|
||||
|
||||
assert_eq_with_retry(
|
||||
node,
|
||||
f"select count() >= 11 from system.query_log",
|
||||
"1",
|
||||
sleep_time=0.2,
|
||||
retry_count=100,
|
||||
)
|
||||
|
||||
node.query(f"TRUNCATE TABLE IF EXISTS system.query_log")
|
||||
node.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"""echo "
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<flush_interval_milliseconds replace=\\"replace\\">1000000</flush_interval_milliseconds>
|
||||
<buffer_size_rows_flush_threshold replace=\\"replace\\">10000</buffer_size_rows_flush_threshold>
|
||||
<max_size_rows replace=\\"replace\\">10000</max_size_rows>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||
""",
|
||||
]
|
||||
)
|
||||
node.restart_clickhouse()
|
||||
for i in range(10):
|
||||
node.query(f"select {i}")
|
||||
|
||||
# Logs aren't flushed
|
||||
assert_eq_with_retry(
|
||||
node,
|
||||
f"select count() < 10 from system.query_log",
|
||||
"1",
|
||||
sleep_time=0.2,
|
||||
retry_count=100,
|
||||
)
|
||||
|
||||
node.exec_in_container(
|
||||
["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"]
|
||||
)
|
||||
|
@ -88,3 +88,53 @@ def test_system_logs_settings_expr(start_cluster):
|
||||
assert expected in node3.query(
|
||||
"SELECT engine_full FROM system.tables WHERE database='system' and name='query_log'"
|
||||
)
|
||||
|
||||
|
||||
def test_max_size_0(start_cluster):
|
||||
node1.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"""echo "
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<max_size_rows replace=\\"replace\\">0</max_size_rows>
|
||||
<reserved_size_rows replace=\\"replace\\">0</reserved_size_rows>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||
""",
|
||||
]
|
||||
)
|
||||
with pytest.raises(Exception):
|
||||
node1.restart_clickhouse()
|
||||
|
||||
node1.exec_in_container(
|
||||
["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"]
|
||||
)
|
||||
node1.restart_clickhouse()
|
||||
|
||||
|
||||
def test_reserved_size_greater_max_size(start_cluster):
|
||||
node1.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
f"""echo "
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<max_size_rows replace=\\"replace\\">10</max_size_rows>
|
||||
<reserved_size_rows replace=\\"replace\\">11</reserved_size_rows>
|
||||
</query_log>
|
||||
</clickhouse>
|
||||
" > /etc/clickhouse-server/config.d/yyy-override-query_log.xml
|
||||
""",
|
||||
]
|
||||
)
|
||||
with pytest.raises(Exception):
|
||||
node1.restart_clickhouse()
|
||||
|
||||
node1.exec_in_container(
|
||||
["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"]
|
||||
)
|
||||
node1.restart_clickhouse()
|
||||
|
@ -18,7 +18,7 @@ function run_selects()
|
||||
{
|
||||
thread_num=$1
|
||||
readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT database || '.' || name FROM system.tables
|
||||
WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name!='zookeeper' and name!='merge_tree_metadata_cache' and name!='models'
|
||||
WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name != 'zookeeper' and name != 'models'
|
||||
AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num")
|
||||
|
||||
for t in "${tables_arr[@]}"
|
||||
|
@ -1,672 +0,0 @@
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:false; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:false; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:true; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:true; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:false; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:false; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:true; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Ordinary; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:true; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:false; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:false; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:true; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:false; use projection:true; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:false; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:false; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:true; use_compact_data_part:false
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
database engine:Atomic; table engine:ReplicatedMergeTree; use metadata cache:true; use projection:true; use_compact_data_part:true
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
||||
TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
CHECK TABLE test_metadata_cache.check_part_metadata_cache;
|
||||
1
|
@ -1,104 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, long, no-s3-storage, no-random-settings, no-parallel
|
||||
# Tag no-fasttest: setting use_metadata_cache=true is not supported in fasttest, because clickhouse binary in fasttest is build without RocksDB.
|
||||
# Tag no-random-settings: random settings significantly slow down test with debug build (alternative: add no-debug tag)
|
||||
# To suppress Warning messages from CHECK TABLE
|
||||
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
set -e
|
||||
|
||||
table_engines=(ReplicatedMergeTree)
|
||||
database_engines=(Ordinary Atomic)
|
||||
use_metadata_caches=(false true)
|
||||
use_projections=(false true)
|
||||
use_compact_data_parts=(false true)
|
||||
|
||||
for table_engine in "${table_engines[@]}"; do
|
||||
for database_engine in "${database_engines[@]}"; do
|
||||
for use_metadata_cache in "${use_metadata_caches[@]}"; do
|
||||
for use_projection in "${use_projections[@]}"; do
|
||||
for use_compact_data_part in "${use_compact_data_parts[@]}"; do
|
||||
echo "database engine:${database_engine}; table engine:${table_engine}; use metadata cache:${use_metadata_cache}; use projection:${use_projection}; use_compact_data_part:${use_compact_data_part}"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_metadata_cache.check_part_metadata_cache SYNC;"
|
||||
${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_metadata_cache;"
|
||||
${CLICKHOUSE_CLIENT} --allow_deprecated_database_ordinary=1 --query "CREATE DATABASE test_metadata_cache ENGINE = ${database_engine};"
|
||||
|
||||
table_engine_clause=""
|
||||
if [[ "$table_engine" == "ReplicatedMergeTree" ]]; then
|
||||
table_engine_clause="ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_metadata_cache/check_part_metadata_cache', 'r1')"
|
||||
elif [[ "$table_engine" == "MergeTree" ]]; then
|
||||
table_engine_clause="ENGINE MergeTree()"
|
||||
fi
|
||||
|
||||
projection_clause=""
|
||||
if [[ "$use_projection" == "true" ]]; then
|
||||
projection_clause=", projection p1 (select p, sum(k), sum(v1), sum(v2) group by p)"
|
||||
fi
|
||||
|
||||
compact_data_part_clause=", min_bytes_for_wide_part = 10485760"
|
||||
if [[ $use_compact_data_part == "true" ]]; then
|
||||
compact_data_part_clause=", min_bytes_for_wide_part = 0"
|
||||
fi
|
||||
${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_metadata_cache.check_part_metadata_cache (p Date, k UInt64, v1 UInt64, v2 Int64${projection_clause}) $table_engine_clause PARTITION BY toYYYYMM(p) ORDER BY k settings use_metadata_cache = ${use_metadata_cache} ${compact_data_part_clause}"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Insert first batch of data.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Insert second batch of data.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-18', 8, 7000, 8000);"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# First update.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache update v1 = 2001 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Second update.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache update v2 = 4002 where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# First delete.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 1 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Second delete.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache delete where k = 8 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Insert third batch of data.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "INSERT INTO test_metadata_cache.check_part_metadata_cache (p, k, v1, v2) VALUES ('2018-06-15', 5, 1000, 2000), ('2018-06-16', 6, 3000, 4000), ('2018-06-17', 7, 5000, 6000), ('2018-06-18', 8, 7000, 8000);"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Drop one partition.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache drop partition 201805 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Add column.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache add column v3 UInt64 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Delete column.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache drop column v3 settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Add TTL.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 10 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Modify TTL.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "ALTER TABLE test_metadata_cache.check_part_metadata_cache modify TTL p + INTERVAL 15 YEAR settings mutations_sync = 1, replication_alter_partitions_sync = 1;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
|
||||
# Truncate table.
|
||||
${CLICKHOUSE_CLIENT} --echo --query "TRUNCATE TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
${CLICKHOUSE_CLIENT} --echo --query "CHECK TABLE test_metadata_cache.check_part_metadata_cache;"
|
||||
done
|
||||
done
|
||||
done
|
||||
done
|
||||
done
|
@ -4,215 +4,291 @@ TCP endpoint
|
||||
TCP 'wrong password' case is skipped for no_password.
|
||||
HTTP endpoint
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint no_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
MySQL 'wrong password' case is skipped for no_password.
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL 'wrong password' case is skipped for no_password.
|
||||
|
||||
# no_password - No profiles no roles
|
||||
TCP endpoint
|
||||
TCP 'wrong password' case is skipped for no_password.
|
||||
HTTP endpoint
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint no_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
MySQL 'wrong password' case is skipped for no_password.
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL 'wrong password' case is skipped for no_password.
|
||||
|
||||
# no_password - Two profiles, no roles
|
||||
TCP endpoint
|
||||
TCP 'wrong password' case is skipped for no_password.
|
||||
HTTP endpoint
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint no_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
MySQL 'wrong password' case is skipped for no_password.
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL 'wrong password' case is skipped for no_password.
|
||||
|
||||
# no_password - Two profiles and two simple roles
|
||||
TCP endpoint
|
||||
TCP 'wrong password' case is skipped for no_password.
|
||||
HTTP endpoint
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
HTTP 'wrong password' case is skipped for no_password.
|
||||
MySQL endpoint no_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
MySQL 'wrong password' case is skipped for no_password.
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL 'wrong password' case is skipped for no_password.
|
||||
|
||||
# plaintext_password - No profiles no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint plaintext_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
|
||||
# plaintext_password - Two profiles, no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint plaintext_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
|
||||
# plaintext_password - Two profiles and two simple roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint plaintext_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
|
||||
# sha256_password - No profiles no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint sha256_password
|
||||
MySQL 'successful login' case is skipped for sha256_password.
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for sha256_password
|
||||
|
||||
# sha256_password - Two profiles, no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint sha256_password
|
||||
MySQL 'successful login' case is skipped for sha256_password.
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for sha256_password
|
||||
|
||||
# sha256_password - Two profiles and two simple roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint sha256_password
|
||||
MySQL 'successful login' case is skipped for sha256_password.
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for sha256_password
|
||||
|
||||
# double_sha1_password - No profiles no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint double_sha1_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for double_sha1_password
|
||||
|
||||
# double_sha1_password - Two profiles, no roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint double_sha1_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for double_sha1_password
|
||||
|
||||
# double_sha1_password - Two profiles and two simple roles
|
||||
TCP endpoint
|
||||
HTTP endpoint
|
||||
MySQL endpoint
|
||||
HTTP endpoint with named session
|
||||
MySQL endpoint double_sha1_password
|
||||
Wrong username
|
||||
Wrong password
|
||||
PostrgreSQL endpoint
|
||||
PostgreSQL tests are skipped for double_sha1_password
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP Logout many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles TCP Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP Logout many
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_no_profiles_no_roles PostgreSQL LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_no_roles PostgreSQL LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP Logout many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginSuccess 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL Logout 1
|
||||
${BASE_USERNAME}_plaintext_password_two_profiles_two_roles PostgreSQL LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP Logout many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginSuccess 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP Logout 1
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginSuccess many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP Logout many
|
||||
${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_double_sha1_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_no_profiles_no_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_no_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_no_password_two_profiles_two_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_no_profiles_no_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_no_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_plaintext_password_two_profiles_two_roles PostgreSQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_no_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_no_roles MySQL LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles TCP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure 1
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles HTTP LoginFailure many
|
||||
invalid_${BASE_USERNAME}_sha256_password_two_profiles_two_roles MySQL LoginFailure many
|
||||
invalid_session_log_test_xml_user TCP LoginFailure 1
|
||||
invalid_session_log_test_xml_user HTTP LoginFailure 1
|
||||
invalid_session_log_test_xml_user HTTP LoginFailure many
|
||||
invalid_session_log_test_xml_user MySQL LoginFailure many
|
||||
invalid_session_log_test_xml_user PostgreSQL LoginFailure many
|
||||
session_log_test_xml_user TCP LoginSuccess 1
|
||||
session_log_test_xml_user TCP Logout 1
|
||||
session_log_test_xml_user HTTP LoginSuccess 1
|
||||
session_log_test_xml_user HTTP Logout 1
|
||||
session_log_test_xml_user HTTP LoginSuccess many
|
||||
session_log_test_xml_user HTTP Logout many
|
||||
session_log_test_xml_user MySQL LoginSuccess 1
|
||||
session_log_test_xml_user MySQL Logout 1
|
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, no-parallel, no-fasttest
|
||||
# Tag no-fasttest: Accesses CH via mysql table function (which is unavailable)
|
||||
|
||||
##################################################################################################
|
||||
# Verify that login, logout, and login failure events are properly stored in system.session_log
|
||||
@ -11,9 +10,8 @@
|
||||
# Using multiple protocols
|
||||
# * native TCP protocol with CH client
|
||||
# * HTTP with CURL
|
||||
# * MySQL - CH server accesses itself via mysql table function, query typically fails (unrelated)
|
||||
# but auth should be performed properly.
|
||||
# * PostgreSQL - CH server accesses itself via postgresql table function (currently out of order).
|
||||
# * MySQL - CH server accesses itself via mysql table function.
|
||||
# * PostgreSQL - CH server accesses itself via postgresql table function, but can't execute query (No LOGIN SUCCESS entry).
|
||||
# * gRPC - not done yet
|
||||
#
|
||||
# There is way to control how many time a query (e.g. via mysql table function) is retried
|
||||
@ -53,7 +51,7 @@ function reportError()
|
||||
|
||||
function executeQuery()
|
||||
{
|
||||
## Execute query (provided via heredoc or herestring) and print query in case of error.
|
||||
# Execute query (provided via heredoc or herestring) and print query in case of error.
|
||||
trap 'rm -f ${TMP_QUERY_FILE}; trap - ERR RETURN' RETURN
|
||||
# Since we want to report with current values supplied to this function call
|
||||
# shellcheck disable=SC2064
|
||||
@ -82,7 +80,7 @@ trap "cleanup" EXIT
|
||||
function executeQueryExpectError()
|
||||
{
|
||||
cat - > "${TMP_QUERY_FILE}"
|
||||
! ${CLICKHOUSE_CLIENT} "${@}" --multiquery --queries-file "${TMP_QUERY_FILE}" 2>&1 | tee -a ${TMP_QUERY_FILE}
|
||||
! ${CLICKHOUSE_CLIENT} --multiquery --queries-file "${TMP_QUERY_FILE}" "${@}" 2>&1 | tee -a ${TMP_QUERY_FILE}
|
||||
}
|
||||
|
||||
function createUser()
|
||||
@ -121,6 +119,8 @@ function createUser()
|
||||
executeQuery <<EOF
|
||||
DROP USER IF EXISTS '${username}';
|
||||
CREATE USER '${username}' IDENTIFIED WITH ${auth_type} ${password};
|
||||
GRANT SELECT ON system.one TO ${username};
|
||||
GRANT SELECT ON INFORMATION_SCHEMA.* TO ${username};
|
||||
EOF
|
||||
ALL_USERNAMES+=("${username}")
|
||||
}
|
||||
@ -144,7 +144,7 @@ function testTCP()
|
||||
# Wrong username
|
||||
executeQueryExpectError -u "invalid_${username}" \
|
||||
<<< "SELECT 1 Format Null" \
|
||||
| grep -Eq "Code: 516. .+ invalid_${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||
| grep -Eq "Code: 516. .+ invalid_${username}: Authentication failed*"
|
||||
|
||||
# Wrong password
|
||||
if [[ "${auth_type}" == "no_password" ]]
|
||||
@ -154,7 +154,7 @@ function testTCP()
|
||||
# user with `no_password` user is able to login with any password, so it makes sense to skip this testcase.
|
||||
executeQueryExpectError -u "${username}" --password "invalid_${password}" \
|
||||
<<< "SELECT 1 Format Null" \
|
||||
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect, or there is no user with such name"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -173,8 +173,7 @@ function testHTTPWithURL()
|
||||
# Wrong username
|
||||
${CLICKHOUSE_CURL} -sS "${clickhouse_url}" \
|
||||
-H "X-ClickHouse-User: invalid_${username}" -H "X-ClickHouse-Key: ${password}" \
|
||||
-d 'SELECT 1 Format Null' \
|
||||
| grep -Eq "Code: 516. .+ invalid_${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||
-d 'SELECT 1 Format Null' | grep -Eq "Code: 516. DB::Exception: invalid_${username}: Authentication failed: password is incorrect, or there is no user with such name"
|
||||
|
||||
# Wrong password
|
||||
if [[ "${auth_type}" == "no_password" ]]
|
||||
@ -185,7 +184,7 @@ function testHTTPWithURL()
|
||||
${CLICKHOUSE_CURL} -sS "${clickhouse_url}" \
|
||||
-H "X-ClickHouse-User: ${username}" -H "X-ClickHouse-Key: invalid_${password}" \
|
||||
-d 'SELECT 1 Format Null' \
|
||||
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect or there is no user with such name"
|
||||
| grep -Eq "Code: 516. .+ ${username}: Authentication failed: password is incorrect, or there is no user with such name"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -197,7 +196,7 @@ function testHTTP()
|
||||
|
||||
function testHTTPNamedSession()
|
||||
{
|
||||
# echo "HTTP endpoint with named session"
|
||||
echo "HTTP endpoint with named session"
|
||||
local HTTP_SESSION_ID
|
||||
HTTP_SESSION_ID="session_id_$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 32)"
|
||||
if [ -v CLICKHOUSE_URL_PARAMS ]
|
||||
@ -212,7 +211,7 @@ function testHTTPNamedSession()
|
||||
|
||||
function testMySQL()
|
||||
{
|
||||
echo "MySQL endpoint"
|
||||
echo "MySQL endpoint ${auth_type}"
|
||||
local auth_type="${1}"
|
||||
local username="${2}"
|
||||
local password="${3}"
|
||||
@ -225,58 +224,64 @@ function testMySQL()
|
||||
then
|
||||
echo "MySQL 'successful login' case is skipped for ${auth_type}."
|
||||
else
|
||||
# CH is able to log into itself via MySQL protocol but query fails.
|
||||
executeQueryExpectError \
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', '${username}', '${password}') LIMIT 1 \
|
||||
FORMAT NUll" \
|
||||
| grep -Eq "Code: 1000\. DB::Exception: .*"
|
||||
executeQuery \
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'one', '${username}', '${password}') LIMIT 1 \
|
||||
FORMAT Null"
|
||||
fi
|
||||
|
||||
# echo 'Wrong username'
|
||||
echo 'Wrong username'
|
||||
executeQueryExpectError \
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', 'invalid_${username}', '${password}') LIMIT 1 \
|
||||
FORMAT NUll" \
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'one', 'invalid_${username}', '${password}') LIMIT 1 \
|
||||
FORMAT Null" \
|
||||
| grep -Eq "Code: 1000\. DB::Exception: .* invalid_${username}"
|
||||
|
||||
# echo 'Wrong password'
|
||||
|
||||
echo 'Wrong password'
|
||||
if [[ "${auth_type}" == "no_password" ]]
|
||||
then
|
||||
# user with `no_password` is able to login with any password, so it makes sense to skip this testcase.
|
||||
echo "MySQL 'wrong password' case is skipped for ${auth_type}."
|
||||
else
|
||||
# user with `no_password` is able to login with any password, so it makes sense to skip this testcase.
|
||||
executeQueryExpectError \
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'numbers', '${username}', 'invalid_${password}') LIMIT 1 \
|
||||
FORMAT NUll" \
|
||||
| grep -Eq "Code: 1000\. DB::Exception: .* ${username}"
|
||||
<<< "SELECT 1 FROM mysql('127.0.0.1:9004', 'system', 'one', '${username}', 'invalid_${password}') LIMIT 1 \
|
||||
FORMAT Null" | grep -Eq "Code: 1000\. DB::Exception: .* ${username}"
|
||||
fi
|
||||
}
|
||||
|
||||
# function testPostgreSQL()
|
||||
# {
|
||||
# local auth_type="${1}"
|
||||
#
|
||||
# # Right now it is impossible to log into CH via PostgreSQL protocol without a password.
|
||||
# if [[ "${auth_type}" == "no_password" ]]
|
||||
# then
|
||||
# return 0
|
||||
# fi
|
||||
#
|
||||
function testPostgreSQL()
|
||||
{
|
||||
echo "PostrgreSQL endpoint"
|
||||
local auth_type="${1}"
|
||||
|
||||
if [[ "${auth_type}" == "sha256_password" || "${auth_type}" == "double_sha1_password" ]]
|
||||
then
|
||||
echo "PostgreSQL tests are skipped for ${auth_type}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# TODO: Uncomment this case after implementation of postgresql function
|
||||
# Connecting to ClickHouse server
|
||||
## Loging\Logout
|
||||
## CH is being able to log into itself via PostgreSQL protocol but query fails.
|
||||
#executeQueryExpectError \
|
||||
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', '${username}', '${password}') LIMIT 1 FORMAT NUll" \
|
||||
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||
#
|
||||
# # Wrong username
|
||||
# executeQueryExpectError \
|
||||
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', 'invalid_${username}', '${password}') LIMIT 1 FORMAT NUll" \
|
||||
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||
#
|
||||
# # Wrong password
|
||||
# executeQueryExpectError \
|
||||
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'numbers', '${username}', 'invalid_${password}') LIMIT 1 FORMAT NUll" \
|
||||
# | grep -Eq "Code: 1001. DB::Exception: .* pqxx::broken_connection: .*"
|
||||
# }
|
||||
# <<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'one', '${username}', '${password}') LIMIT 1 FORMAT Null" \
|
||||
|
||||
# Wrong username
|
||||
executeQueryExpectError \
|
||||
<<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'one', 'invalid_${username}', '${password}') LIMIT 1 FORMAT Null" \
|
||||
| grep -Eq "Invalid user or password"
|
||||
|
||||
if [[ "${auth_type}" == "no_password" ]]
|
||||
then
|
||||
# user with `no_password` is able to login with any password, so it makes sense to skip this testcase.
|
||||
echo "PostgreSQL 'wrong password' case is skipped for ${auth_type}."
|
||||
else
|
||||
# Wrong password
|
||||
executeQueryExpectError \
|
||||
<<< "SELECT 1 FROM postgresql('localhost:9005', 'system', 'one', '${username}', 'invalid_${password}') LIMIT 1 FORMAT Null" \
|
||||
| grep -Eq "Invalid user or password"
|
||||
fi
|
||||
}
|
||||
|
||||
function runEndpointTests()
|
||||
{
|
||||
@ -301,9 +306,9 @@ function runEndpointTests()
|
||||
testTCP "${auth_type}" "${username}" "${password}"
|
||||
testHTTP "${auth_type}" "${username}" "${password}"
|
||||
|
||||
# testHTTPNamedSession "${auth_type}" "${username}" "${password}"
|
||||
testHTTPNamedSession "${auth_type}" "${username}" "${password}"
|
||||
testMySQL "${auth_type}" "${username}" "${password}"
|
||||
# testPostgreSQL "${auth_type}" "${username}" "${password}"
|
||||
testPostgreSQL "${auth_type}" "${username}" "${password}"
|
||||
}
|
||||
|
||||
function testAsUserIdentifiedBy()
|
@ -1,3 +1,5 @@
|
||||
-- { echoOn }
|
||||
SELECT number, ip, ip % number FROM (SELECT number, toIPv4('1.2.3.4') as ip FROM numbers(10, 20));
|
||||
10 1.2.3.4 0
|
||||
11 1.2.3.4 3
|
||||
12 1.2.3.4 4
|
||||
@ -18,3 +20,24 @@
|
||||
27 1.2.3.4 13
|
||||
28 1.2.3.4 0
|
||||
29 1.2.3.4 1
|
||||
SELECT number, ip, number % ip FROM (SELECT number, toIPv4OrNull('0.0.0.3') as ip FROM numbers(10, 20));
|
||||
10 0.0.0.3 1
|
||||
11 0.0.0.3 2
|
||||
12 0.0.0.3 0
|
||||
13 0.0.0.3 1
|
||||
14 0.0.0.3 2
|
||||
15 0.0.0.3 0
|
||||
16 0.0.0.3 1
|
||||
17 0.0.0.3 2
|
||||
18 0.0.0.3 0
|
||||
19 0.0.0.3 1
|
||||
20 0.0.0.3 2
|
||||
21 0.0.0.3 0
|
||||
22 0.0.0.3 1
|
||||
23 0.0.0.3 2
|
||||
24 0.0.0.3 0
|
||||
25 0.0.0.3 1
|
||||
26 0.0.0.3 2
|
||||
27 0.0.0.3 0
|
||||
28 0.0.0.3 1
|
||||
29 0.0.0.3 2
|
||||
|
@ -1 +1,4 @@
|
||||
-- { echoOn }
|
||||
SELECT number, ip, ip % number FROM (SELECT number, toIPv4('1.2.3.4') as ip FROM numbers(10, 20));
|
||||
SELECT number, ip, number % ip FROM (SELECT number, toIPv4OrNull('0.0.0.3') as ip FROM numbers(10, 20));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user