mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge pull request #25773 from ClickHouse/system-query-log-map-type-merge
Merging #23934
This commit is contained in:
commit
878a159538
@ -409,10 +409,10 @@ create view right_query_log as select *
|
||||
'$(cat "right-query-log.tsv.columns")');
|
||||
|
||||
create view query_logs as
|
||||
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||
select 0 version, query_id, ProfileEvents.keys, ProfileEvents.values,
|
||||
query_duration_ms, memory_usage from left_query_log
|
||||
union all
|
||||
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||
select 1 version, query_id, ProfileEvents.keys, ProfileEvents.values,
|
||||
query_duration_ms, memory_usage from right_query_log
|
||||
;
|
||||
|
||||
@ -424,7 +424,7 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
|
||||
with (
|
||||
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because
|
||||
-- sumMap removes keys with positive zeros.
|
||||
with (select groupUniqArrayArray(ProfileEvents.Names) from query_logs) as all_names
|
||||
with (select groupUniqArrayArray(ProfileEvents.keys) from query_logs) as all_names
|
||||
select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))])
|
||||
) as all_metrics
|
||||
select test, query_index, version, query_id,
|
||||
@ -433,8 +433,8 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
|
||||
[
|
||||
all_metrics,
|
||||
arrayReduce('sumMapState',
|
||||
[(ProfileEvents.Names,
|
||||
arrayMap(x->toFloat64(x), ProfileEvents.Values))]
|
||||
[(ProfileEvents.keys,
|
||||
arrayMap(x->toFloat64(x), ProfileEvents.values))]
|
||||
),
|
||||
arrayReduce('sumMapState', [(
|
||||
['client_time', 'server_time', 'memory_usage'],
|
||||
@ -1005,7 +1005,7 @@ create table unstable_run_metrics engine File(TSVWithNamesAndTypes,
|
||||
'unstable-run-metrics.$version.rep') as
|
||||
select
|
||||
test, query_index, query_id,
|
||||
ProfileEvents.Values value, ProfileEvents.Names metric
|
||||
ProfileEvents.values value, ProfileEvents.keys metric
|
||||
from query_log array join ProfileEvents
|
||||
join unstable_query_runs using (query_id)
|
||||
;
|
||||
|
@ -53,10 +53,8 @@ memory_usage: 0
|
||||
peak_memory_usage: 0
|
||||
query: SELECT * from system.processes LIMIT 10 FORMAT Vertical;
|
||||
thread_ids: [67]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ContextLock','RWLockAcquiredReadLocks']
|
||||
ProfileEvents.Values: [1,1,36,1,10,1,89,16,1]
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','in_order','1','10000000000']
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
||||
|
||||
1 rows in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
@ -84,12 +84,10 @@ Columns:
|
||||
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
|
||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
|
||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
|
||||
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
||||
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions combinators`, which were used during query execution.
|
||||
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `database engines`, which were used during query execution.
|
||||
@ -109,68 +107,49 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDa
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryFinish
|
||||
event_date: 2021-03-18
|
||||
event_time: 2021-03-18 20:54:18
|
||||
event_time_microseconds: 2021-03-18 20:54:18.676686
|
||||
query_start_time: 2021-03-18 20:54:18
|
||||
query_start_time_microseconds: 2021-03-18 20:54:18.673934
|
||||
query_duration_ms: 2
|
||||
read_rows: 100
|
||||
read_bytes: 800
|
||||
type: QueryStart
|
||||
event_date: 2020-09-11
|
||||
event_time: 2020-09-11 10:08:17
|
||||
event_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_start_time: 2020-09-11 10:08:17
|
||||
query_start_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 2
|
||||
result_bytes: 4858
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: SELECT uniqArray([1, 1, 2]), SUBSTRING('Hello, world', 7, 5), flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]), week(toDate('2000-12-05')), CAST(arrayJoin([NULL, NULL]) AS Nullable(TEXT)), avgOrDefaultIf(number, number % 2), sumOrNull(number), toTypeName(sumOrNull(number)), countIf(toDate('2000-12-05') + number as d, toDayOfYear(d) % 2) FROM numbers(100)
|
||||
normalized_query_hash: 17858008518552525706
|
||||
query_kind: Select
|
||||
databases: ['_table_function']
|
||||
tables: ['_table_function.numbers']
|
||||
columns: ['_table_function.numbers.number']
|
||||
query: INSERT INTO test1 VALUES
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
|
||||
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 37486
|
||||
port: 33452
|
||||
initial_user: default
|
||||
initial_query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
|
||||
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 37486
|
||||
initial_port: 33452
|
||||
interface: 1
|
||||
os_user: sevirov
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
os_user: bharatnc
|
||||
client_hostname: tower
|
||||
client_name: ClickHouse
|
||||
client_revision: 54447
|
||||
client_version_major: 21
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
client_revision: 54437
|
||||
client_version_major: 20
|
||||
client_version_minor: 7
|
||||
client_version_patch: 2
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54449
|
||||
log_comment:
|
||||
thread_ids: [587,11939]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','TableFunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes']
|
||||
ProfileEvents.Values: [1,1,36,1,10,2,1048680,1,4096,36,1,110,100,800,77,1,3137,1476,1101,8,2577,8192]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
used_aggregate_functions: ['groupBitAnd','avg','sum','count','uniq']
|
||||
used_aggregate_function_combinators: ['OrDefault','If','OrNull','Array']
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['String','Array','Int32','Nullable']
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['toWeek','CAST','arrayFlatten','toTypeName','toDayOfYear','addDays','array','toDate','modulo','substring','plus']
|
||||
used_storages: []
|
||||
used_table_functions: ['numbers']
|
||||
revision: 54440
|
||||
thread_ids: []
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -58,8 +58,7 @@ Columns:
|
||||
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events).
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -107,8 +106,7 @@ http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54440
|
||||
ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520]
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -625,10 +625,8 @@ ClickHouseはこのテーブルを作成します。 [query_log](server-configur
|
||||
- `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` 列。
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` パラメータは1。
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` 列。
|
||||
- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics. The description of them could be found in the table [システムイベント](#system_tables-events)
|
||||
- `Settings` (Map(String, String)) — Settings 列。
|
||||
|
||||
それぞれのクエリでは、一つまたは二つの行が `query_log` クエリのステータスに応じて、テーブル:
|
||||
|
||||
@ -698,8 +696,7 @@ ClickHouseはこのテーブルを作成します。 [query_thread_log](server-c
|
||||
- `http_user_agent` (String) — The `UserAgent` HTTP要求で渡されるヘッダー。
|
||||
- `quota_key` (String) — The “quota key” で指定される。 [クォータ](quotas.md) 設定(参照 `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。
|
||||
- `ProfileEvents` (Map(String, UInt64)) — ProfileEvents that measure different metrics for this thread. The description of them could be found in the table [システムイベント](#system_tables-events)
|
||||
|
||||
既定では、ログは7.5秒間隔でテーブルに追加されます。 この間隔は [query_thread_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) サーバ設定(参照 `flush_interval_milliseconds` 変数)。 ログをメモリバッファからテーブルに強制的にフラッシュするには、 `SYSTEM FLUSH LOGS` クエリ。
|
||||
|
||||
|
@ -84,12 +84,10 @@ ClickHouse не удаляет данные из таблица автомати
|
||||
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `X-Forwarded-For`.
|
||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — `ключ квоты` из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events
|
||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — комментарий к записи в логе. Представляет собой произвольную строку, длина которой должна быть не больше, чем [max_query_size](../../operations/settings/settings.md#settings-max_query_size). Если нет комментария, то пустая строка.
|
||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — идентификаторы потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(#system_tables-events
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `агрегатных функций`, использованных при выполнении запроса.
|
||||
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `комбинаторов агрегатных функций`, использованных при выполнении запроса.
|
||||
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — канонические имена `движков баз данных`, использованных при выполнении запроса.
|
||||
@ -109,68 +107,49 @@ SELECT * FROM system.query_log WHERE type = 'QueryFinish' AND (query LIKE '%toDa
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryFinish
|
||||
event_date: 2021-03-18
|
||||
event_time: 2021-03-18 20:54:18
|
||||
event_time_microseconds: 2021-03-18 20:54:18.676686
|
||||
query_start_time: 2021-03-18 20:54:18
|
||||
query_start_time_microseconds: 2021-03-18 20:54:18.673934
|
||||
query_duration_ms: 2
|
||||
read_rows: 100
|
||||
read_bytes: 800
|
||||
type: QueryStart
|
||||
event_date: 2020-09-11
|
||||
event_time: 2020-09-11 10:08:17
|
||||
event_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_start_time: 2020-09-11 10:08:17
|
||||
query_start_time_microseconds: 2020-09-11 10:08:17.063321
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 2
|
||||
result_bytes: 4858
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
current_database: default
|
||||
query: SELECT uniqArray([1, 1, 2]), SUBSTRING('Hello, world', 7, 5), flatten([[[BIT_AND(123)]], [[mod(3, 2)], [CAST('1' AS INTEGER)]]]), week(toDate('2000-12-05')), CAST(arrayJoin([NULL, NULL]) AS Nullable(TEXT)), avgOrDefaultIf(number, number % 2), sumOrNull(number), toTypeName(sumOrNull(number)), countIf(toDate('2000-12-05') + number as d, toDayOfYear(d) % 2) FROM numbers(100)
|
||||
normalized_query_hash: 17858008518552525706
|
||||
query_kind: Select
|
||||
databases: ['_table_function']
|
||||
tables: ['_table_function.numbers']
|
||||
columns: ['_table_function.numbers.number']
|
||||
query: INSERT INTO test1 VALUES
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
|
||||
query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 37486
|
||||
port: 33452
|
||||
initial_user: default
|
||||
initial_query_id: 58f3d392-0fa0-4663-ae1d-29917a1a9c9c
|
||||
initial_query_id: 50a320fd-85a8-49b8-8761-98a86bcbacef
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 37486
|
||||
initial_port: 33452
|
||||
interface: 1
|
||||
os_user: sevirov
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
os_user: bharatnc
|
||||
client_hostname: tower
|
||||
client_name: ClickHouse
|
||||
client_revision: 54447
|
||||
client_version_major: 21
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
client_revision: 54437
|
||||
client_version_major: 20
|
||||
client_version_minor: 7
|
||||
client_version_patch: 2
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
http_referer:
|
||||
forwarded_for:
|
||||
quota_key:
|
||||
revision: 54449
|
||||
log_comment:
|
||||
thread_ids: [587,11939]
|
||||
ProfileEvents.Names: ['Query','SelectQuery','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','ArenaAllocChunks','ArenaAllocBytes','FunctionExecute','TableFunctionExecute','NetworkSendElapsedMicroseconds','SelectedRows','SelectedBytes','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SystemTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes']
|
||||
ProfileEvents.Values: [1,1,36,1,10,2,1048680,1,4096,36,1,110,100,800,77,1,3137,1476,1101,8,2577,8192]
|
||||
Settings.Names: ['load_balancing','max_memory_usage']
|
||||
Settings.Values: ['random','10000000000']
|
||||
used_aggregate_functions: ['groupBitAnd','avg','sum','count','uniq']
|
||||
used_aggregate_function_combinators: ['OrDefault','If','OrNull','Array']
|
||||
used_database_engines: []
|
||||
used_data_type_families: ['String','Array','Int32','Nullable']
|
||||
used_dictionaries: []
|
||||
used_formats: []
|
||||
used_functions: ['toWeek','CAST','arrayFlatten','toTypeName','toDayOfYear','addDays','array','toDate','modulo','substring','plus']
|
||||
used_storages: []
|
||||
used_table_functions: ['numbers']
|
||||
revision: 54440
|
||||
thread_ids: []
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
@ -57,8 +57,7 @@ ClickHouse не удаляет данные из таблицы автомати
|
||||
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
|
||||
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events).
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events).
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -106,8 +105,7 @@ http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54440
|
||||
ProfileEvents.Names: ['Query','InsertQuery','FileOpen','WriteBufferFromFileDescriptorWrite','WriteBufferFromFileDescriptorWriteBytes','ReadCompressedBytes','CompressedReadBufferBlocks','CompressedReadBufferBytes','IOBufferAllocs','IOBufferAllocBytes','FunctionExecute','CreatedWriteBufferOrdinary','DiskWriteElapsedMicroseconds','NetworkReceiveElapsedMicroseconds','NetworkSendElapsedMicroseconds','InsertedRows','InsertedBytes','SelectedRows','SelectedBytes','MergeTreeDataWriterRows','MergeTreeDataWriterUncompressedBytes','MergeTreeDataWriterCompressedBytes','MergeTreeDataWriterBlocks','MergeTreeDataWriterBlocksAlreadySorted','ContextLock','RWLockAcquiredReadLocks','RealTimeMicroseconds','UserTimeMicroseconds','SoftPageFaults','OSCPUVirtualTimeMicroseconds','OSWriteBytes','OSReadChars','OSWriteChars']
|
||||
ProfileEvents.Values: [1,1,11,11,591,148,3,71,29,6533808,1,11,72,18,47,1,12,1,12,1,12,189,1,1,10,2,70853,2748,49,2747,45056,422,1520]
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
@ -81,9 +81,9 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction](
|
||||
- `http_user_agent` ([String](../../sql-reference/data-types/string.md)) — The `UserAgent` The UserAgent header passed in the HTTP request。
|
||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — 在[quotas](../../operations/quotas.md) 配置里设置的“quota key” (见 `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `thread_numbers` ([Array(UInt32)](../../sql-reference/data-types/array.md)) — 参与查询的线程数.
|
||||
- `ProfileEvents.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — 衡量不同指标的计数器。 可以在[system.events](../../operations/system-tables/events.md#system_tables-events)中找到它们的描述。
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — `ProfileEvents.Names` 列中列出的指标的值。
|
||||
- `ProfileEvents` ([Map(String, UInt64))](../../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [系统。活动](../../operations/system-tables/events.md#system_tables-events)
|
||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` 参数为1。
|
||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — 参与查询的线程数.
|
||||
- `Settings.Names` ([Array(String)](../../sql-reference/data-types/array.md)) — 客户端运行查询时更改的设置的名称。 要启用对设置的日志记录更改,请将log_query_settings参数设置为1。
|
||||
- `Settings.Values` ([Array(String)](../../sql-reference/data-types/array.md)) — `Settings.Names` 列中列出的设置的值。
|
||||
**示例**
|
||||
@ -133,10 +133,8 @@ http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
Settings: {'background_pool_size':'32','load_balancing':'random','allow_suspicious_low_cardinality_types':'1','distributed_aggregation_memory_efficient':'1','skip_unavailable_shards':'1','log_queries':'1','max_bytes_before_external_group_by':'20000000000','max_bytes_before_external_sort':'20000000000','allow_introspection_functions':'1'}
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
@ -61,8 +61,7 @@ ClickHouse不会自动从表中删除数据。 看 [导言](../../operations/sys
|
||||
- `http_user_agent` ([字符串](../../sql-reference/data-types/string.md)) — The `UserAgent` http请求中传递的标头。
|
||||
- `quota_key` ([字符串](../../sql-reference/data-types/string.md)) — The “quota key” 在指定 [配额](../../operations/quotas.md) 设置(见 `keyed`).
|
||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `ProfileEvents.Names` ([数组(字符串)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events).
|
||||
- `ProfileEvents.Values` ([数组(UInt64)](../../sql-reference/data-types/array.md)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` 列。
|
||||
- `ProfileEvents` ([数组(字符串, UInt64)](../../sql-reference/data-types/array.md)) — Counters that measure different metrics for this thread. The description of them could be found in the table [系统。活动](#system_tables-events).
|
||||
|
||||
**示例**
|
||||
|
||||
@ -108,8 +107,7 @@ http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
ProfileEvents.Names: ['ContextLock','RealTimeMicroseconds','UserTimeMicroseconds','OSCPUWaitMicroseconds','OSCPUVirtualTimeMicroseconds']
|
||||
ProfileEvents.Values: [1,97,81,5,81]
|
||||
ProfileEvents: {'Query':1,'SelectQuery':1,'ReadCompressedBytes':36,'CompressedReadBufferBlocks':1,'CompressedReadBufferBytes':10,'IOBufferAllocs':1,'IOBufferAllocBytes':89,'ContextLock':15,'RWLockAcquiredReadLocks':1}
|
||||
...
|
||||
```
|
||||
|
||||
|
53
src/Core/NamesAndAliases.h
Normal file
53
src/Core/NamesAndAliases.h
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <initializer_list>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Core/Names.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class NameAndAliasPair
|
||||
{
|
||||
public:
|
||||
NameAndAliasPair(const String & name_, const DataTypePtr & type_, const String & expression_)
|
||||
: name(name_)
|
||||
, type(type_)
|
||||
, expression(expression_)
|
||||
{}
|
||||
|
||||
String name;
|
||||
DataTypePtr type;
|
||||
String expression;
|
||||
};
|
||||
|
||||
/// This needed to use structured bindings for NameAndTypePair
|
||||
/// const auto & [name, type] = name_and_type
|
||||
template <int I>
|
||||
decltype(auto) get(const NameAndAliasPair & name_and_alias)
|
||||
{
|
||||
if constexpr (I == 0)
|
||||
return name_and_alias.name;
|
||||
else if constexpr (I == 1)
|
||||
return name_and_alias.type;
|
||||
else if constexpr (I == 2)
|
||||
return name_and_alias.expression;
|
||||
}
|
||||
|
||||
using NamesAndAliases = std::vector<NameAndAliasPair>;
|
||||
|
||||
}
|
||||
|
||||
namespace std
|
||||
{
|
||||
template <> struct tuple_size<DB::NameAndAliasPair> : std::integral_constant<size_t, 2> {};
|
||||
template <> struct tuple_element<0, DB::NameAndAliasPair> { using type = DB::String; };
|
||||
template <> struct tuple_element<1, DB::NameAndAliasPair> { using type = DB::DataTypePtr; };
|
||||
template <> struct tuple_element<2, DB::NameAndAliasPair> { using type = DB::String; };
|
||||
}
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <string.h>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
@ -56,40 +57,28 @@ void Settings::loadSettingsFromConfig(const String & path, const Poco::Util::Abs
|
||||
}
|
||||
}
|
||||
|
||||
void Settings::dumpToArrayColumns(IColumn * column_names_, IColumn * column_values_, bool changed_only)
|
||||
void Settings::dumpToMapColumn(IColumn * column, bool changed_only)
|
||||
{
|
||||
/// Convert ptr and make simple check
|
||||
auto * column_names = (column_names_) ? &typeid_cast<ColumnArray &>(*column_names_) : nullptr;
|
||||
auto * column_values = (column_values_) ? &typeid_cast<ColumnArray &>(*column_values_) : nullptr;
|
||||
auto * column_map = column ? &typeid_cast<ColumnMap &>(*column) : nullptr;
|
||||
if (!column_map)
|
||||
return;
|
||||
|
||||
size_t count = 0;
|
||||
auto & offsets = column_map->getNestedColumn().getOffsets();
|
||||
auto & tuple_column = column_map->getNestedData();
|
||||
auto & key_column = tuple_column.getColumn(0);
|
||||
auto & value_column = tuple_column.getColumn(1);
|
||||
|
||||
size_t size = 0;
|
||||
for (const auto & setting : all(changed_only ? SKIP_UNCHANGED : SKIP_NONE))
|
||||
{
|
||||
if (column_names)
|
||||
{
|
||||
auto name = setting.getName();
|
||||
column_names->getData().insertData(name.data(), name.size());
|
||||
}
|
||||
if (column_values)
|
||||
column_values->getData().insert(setting.getValueString());
|
||||
++count;
|
||||
key_column.insertData(name.data(), name.size());
|
||||
value_column.insert(setting.getValueString());
|
||||
size++;
|
||||
}
|
||||
|
||||
if (column_names)
|
||||
{
|
||||
auto & offsets = column_names->getOffsets();
|
||||
offsets.push_back(offsets.back() + count);
|
||||
}
|
||||
|
||||
/// Nested columns case
|
||||
bool the_same_offsets = column_names && column_values && column_names->getOffsetsPtr() == column_values->getOffsetsPtr();
|
||||
|
||||
if (column_values && !the_same_offsets)
|
||||
{
|
||||
auto & offsets = column_values->getOffsets();
|
||||
offsets.push_back(offsets.back() + count);
|
||||
}
|
||||
offsets.push_back(offsets.back() + size);
|
||||
}
|
||||
|
||||
void Settings::addProgramOptions(boost::program_options::options_description & options)
|
||||
|
@ -441,7 +441,7 @@ class IColumn;
|
||||
M(Bool, asterisk_include_alias_columns, false, "Include ALIAS columns for wildcard query", 0) \
|
||||
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
||||
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
||||
M(Bool, allow_experimental_map_type, false, "Allow data type Map", 0) \
|
||||
M(Bool, allow_experimental_map_type, true, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, allow_experimental_window_functions, false, "Allow experimental window functions", 0) \
|
||||
M(Bool, allow_experimental_projection_optimization, false, "Enable projection optimization when processing SELECT queries", 0) \
|
||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||
@ -596,8 +596,8 @@ struct Settings : public BaseSettings<SettingsTraits>
|
||||
/// Load settings from configuration file, at "path" prefix in configuration.
|
||||
void loadSettingsFromConfig(const String & path, const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
/// Dumps profile events to two columns of type Array(String)
|
||||
void dumpToArrayColumns(IColumn * column_names, IColumn * column_values, bool changed_only = true);
|
||||
/// Dumps profile events to column of type Map(String, String)
|
||||
void dumpToMapColumn(IColumn * column, bool changed_only = true);
|
||||
|
||||
/// Adds program options to set the settings from a command line.
|
||||
/// (Don't forget to call notify() on the `variables_map` after parsing it!)
|
||||
|
@ -11,17 +11,16 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block AsynchronousMetricLogElement::createBlock()
|
||||
NamesAndTypesList AsynchronousMetricLogElement::getNamesAndTypes()
|
||||
{
|
||||
ColumnsWithTypeAndName columns;
|
||||
|
||||
columns.emplace_back(std::make_shared<DataTypeDate>(), "event_date");
|
||||
columns.emplace_back(std::make_shared<DataTypeDateTime>(), "event_time");
|
||||
columns.emplace_back(std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds");
|
||||
columns.emplace_back(std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "name");
|
||||
columns.emplace_back(std::make_shared<DataTypeFloat64>(), "value");
|
||||
|
||||
return Block(columns);
|
||||
return
|
||||
{
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"name", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())},
|
||||
{"value", std::make_shared<DataTypeFloat64>(),}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
@ -12,8 +12,8 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
typedef double AsynchronousMetricValue;
|
||||
typedef std::unordered_map<std::string, AsynchronousMetricValue> AsynchronousMetricValues;
|
||||
using AsynchronousMetricValue = double;
|
||||
using AsynchronousMetricValues = std::unordered_map<std::string, AsynchronousMetricValue>;
|
||||
|
||||
/** AsynchronousMetricLog is a log of metric values measured at regular time interval.
|
||||
*/
|
||||
@ -27,7 +27,8 @@ struct AsynchronousMetricLogElement
|
||||
double value;
|
||||
|
||||
static std::string name() { return "AsynchronousMetricLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -18,21 +18,21 @@ namespace DB
|
||||
std::weak_ptr<CrashLog> CrashLog::crash_log;
|
||||
|
||||
|
||||
Block CrashLogElement::createBlock()
|
||||
NamesAndTypesList CrashLogElement::getNamesAndTypes()
|
||||
{
|
||||
return
|
||||
{
|
||||
{std::make_shared<DataTypeDate>(), "event_date"},
|
||||
{std::make_shared<DataTypeDateTime>(), "event_time"},
|
||||
{std::make_shared<DataTypeUInt64>(), "timestamp_ns"},
|
||||
{std::make_shared<DataTypeInt32>(), "signal"},
|
||||
{std::make_shared<DataTypeUInt64>(), "thread_id"},
|
||||
{std::make_shared<DataTypeString>(), "query_id"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "trace"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "trace_full"},
|
||||
{std::make_shared<DataTypeString>(), "version"},
|
||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||
{std::make_shared<DataTypeString>(), "build_id"},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"timestamp_ns", std::make_shared<DataTypeUInt64>()},
|
||||
{"signal", std::make_shared<DataTypeInt32>()},
|
||||
{"thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"trace", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())},
|
||||
{"trace_full", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"version", std::make_shared<DataTypeString>()},
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
{"build_id", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
@ -60,7 +60,6 @@ void CrashLogElement::appendToBlock(MutableColumns & columns) const
|
||||
|
||||
}
|
||||
|
||||
|
||||
void collectCrashLog(Int32 signal, UInt64 thread_id, const String & query_id, const StackTrace & stack_trace)
|
||||
{
|
||||
using namespace DB;
|
||||
|
@ -24,7 +24,8 @@ struct CrashLogElement
|
||||
Array trace_full;
|
||||
|
||||
static std::string name() { return "CrashLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -302,6 +302,35 @@ ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns)
|
||||
return columns_list;
|
||||
}
|
||||
|
||||
ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns, const NamesAndAliases & alias_columns)
|
||||
{
|
||||
std::shared_ptr<ASTExpressionList> columns_list = std::static_pointer_cast<ASTExpressionList>(formatColumns(columns));
|
||||
|
||||
for (const auto & alias_column : alias_columns)
|
||||
{
|
||||
const auto column_declaration = std::make_shared<ASTColumnDeclaration>();
|
||||
column_declaration->name = alias_column.name;
|
||||
|
||||
ParserDataType type_parser;
|
||||
String type_name = alias_column.type->getName();
|
||||
const char * type_pos = type_name.data();
|
||||
const char * type_end = type_pos + type_name.size();
|
||||
column_declaration->type = parseQuery(type_parser, type_pos, type_end, "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
|
||||
column_declaration->default_specifier = "ALIAS";
|
||||
|
||||
const auto & alias = alias_column.expression;
|
||||
const char * alias_pos = alias.data();
|
||||
const char * alias_end = alias_pos + alias.size();
|
||||
ParserExpression expression_parser;
|
||||
column_declaration->default_expression = parseQuery(expression_parser, alias_pos, alias_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
|
||||
columns_list->children.emplace_back(column_declaration);
|
||||
}
|
||||
|
||||
return columns_list;
|
||||
}
|
||||
|
||||
ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns)
|
||||
{
|
||||
auto columns_list = std::make_shared<ASTExpressionList>();
|
||||
@ -646,23 +675,6 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!create.attach && !settings.allow_experimental_map_type)
|
||||
{
|
||||
for (const auto & name_and_type_pair : properties.columns.getAllPhysical())
|
||||
{
|
||||
WhichDataType which(*name_and_type_pair.type);
|
||||
if (which.isMap())
|
||||
{
|
||||
const auto & type_name = name_and_type_pair.type->getName();
|
||||
String message = "Cannot create table with column '" + name_and_type_pair.name + "' which type is '"
|
||||
+ type_name + "' because experimental Map type is not allowed. "
|
||||
+ "Set 'allow_experimental_map_type = 1' setting to enable";
|
||||
throw Exception(message, ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
|
@ -1,12 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndAliases.h>
|
||||
#include <Access/AccessRightsElement.h>
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <Storages/ConstraintsDescription.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Storages/StorageInMemoryMetadata.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -31,8 +31,8 @@ public:
|
||||
|
||||
/// List of columns and their types in AST.
|
||||
static ASTPtr formatColumns(const NamesAndTypesList & columns);
|
||||
static ASTPtr formatColumns(const NamesAndTypesList & columns, const NamesAndAliases & alias_columns);
|
||||
static ASTPtr formatColumns(const ColumnsDescription & columns);
|
||||
|
||||
static ASTPtr formatIndices(const IndicesDescription & indices);
|
||||
static ASTPtr formatConstraints(const ConstraintsDescription & constraints);
|
||||
static ASTPtr formatProjections(const ProjectionsDescription & projections);
|
||||
|
@ -8,21 +8,21 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block MetricLogElement::createBlock()
|
||||
NamesAndTypesList MetricLogElement::getNamesAndTypes()
|
||||
{
|
||||
ColumnsWithTypeAndName columns_with_type_and_name;
|
||||
NamesAndTypesList columns_with_type_and_name;
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDate>(), "event_date");
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDateTime>(), "event_time");
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds");
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "milliseconds");
|
||||
columns_with_type_and_name.emplace_back("event_date", std::make_shared<DataTypeDate>());
|
||||
columns_with_type_and_name.emplace_back("event_time", std::make_shared<DataTypeDateTime>());
|
||||
columns_with_type_and_name.emplace_back("event_time_microseconds", std::make_shared<DataTypeDateTime64>(6));
|
||||
columns_with_type_and_name.emplace_back("milliseconds", std::make_shared<DataTypeUInt64>());
|
||||
|
||||
for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i)
|
||||
{
|
||||
std::string name;
|
||||
name += "ProfileEvent_";
|
||||
name += ProfileEvents::getName(ProfileEvents::Event(i));
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), std::move(name));
|
||||
columns_with_type_and_name.emplace_back(std::move(name), std::make_shared<DataTypeUInt64>());
|
||||
}
|
||||
|
||||
for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i)
|
||||
@ -30,10 +30,10 @@ Block MetricLogElement::createBlock()
|
||||
std::string name;
|
||||
name += "CurrentMetric_";
|
||||
name += CurrentMetrics::getName(CurrentMetrics::Metric(i));
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeInt64>(), std::move(name));
|
||||
columns_with_type_and_name.emplace_back(std::move(name), std::make_shared<DataTypeInt64>());
|
||||
}
|
||||
|
||||
return Block(columns_with_type_and_name);
|
||||
return columns_with_type_and_name;
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,7 +25,8 @@ struct MetricLogElement
|
||||
std::vector<CurrentMetrics::Metric> current_metrics;
|
||||
|
||||
static std::string name() { return "MetricLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
|
||||
#include <Common/hex.h>
|
||||
@ -14,13 +15,13 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block OpenTelemetrySpanLogElement::createBlock()
|
||||
NamesAndTypesList OpenTelemetrySpanLogElement::getNamesAndTypes()
|
||||
{
|
||||
return {
|
||||
{std::make_shared<DataTypeUUID>(), "trace_id"},
|
||||
{std::make_shared<DataTypeUInt64>(), "span_id"},
|
||||
{std::make_shared<DataTypeUInt64>(), "parent_span_id"},
|
||||
{std::make_shared<DataTypeString>(), "operation_name"},
|
||||
{"trace_id", std::make_shared<DataTypeUUID>()},
|
||||
{"span_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"parent_span_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"operation_name", std::make_shared<DataTypeString>()},
|
||||
// DateTime64 is really unwieldy -- there is no "normal" way to convert
|
||||
// it to an UInt64 count of microseconds, except:
|
||||
// 1) reinterpretAsUInt64(reinterpretAsFixedString(date)), which just
|
||||
@ -31,16 +32,21 @@ Block OpenTelemetrySpanLogElement::createBlock()
|
||||
// Also subtraction of two DateTime64 points doesn't work, so you can't
|
||||
// get duration.
|
||||
// It is much less hassle to just use UInt64 of microseconds.
|
||||
{std::make_shared<DataTypeUInt64>(), "start_time_us"},
|
||||
{std::make_shared<DataTypeUInt64>(), "finish_time_us"},
|
||||
{std::make_shared<DataTypeDate>(), "finish_date"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()),
|
||||
"attribute.names"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()),
|
||||
"attribute.values"}
|
||||
{"start_time_us", std::make_shared<DataTypeUInt64>()},
|
||||
{"finish_time_us", std::make_shared<DataTypeUInt64>()},
|
||||
{"finish_date", std::make_shared<DataTypeDate>()},
|
||||
{"attribute", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeString>())},
|
||||
};
|
||||
}
|
||||
|
||||
NamesAndAliases OpenTelemetrySpanLogElement::getNamesAndAliases()
|
||||
{
|
||||
return
|
||||
{
|
||||
{"attribute.names", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "mapKeys(attribute)"},
|
||||
{"attribute.values", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "mapKeys(attribute)"}
|
||||
};
|
||||
}
|
||||
|
||||
void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const
|
||||
{
|
||||
@ -53,17 +59,16 @@ void OpenTelemetrySpanLogElement::appendToBlock(MutableColumns & columns) const
|
||||
columns[i++]->insert(start_time_us);
|
||||
columns[i++]->insert(finish_time_us);
|
||||
columns[i++]->insert(DateLUT::instance().toDayNum(finish_time_us / 1000000).toUnderType());
|
||||
columns[i++]->insert(attribute_names);
|
||||
// The user might add some ints values, and we will have Int Field, and the
|
||||
// insert will fail because the column requires Strings. Convert the fields
|
||||
// here, because it's hard to remember to convert them in all other places.
|
||||
Array string_values;
|
||||
string_values.reserve(attribute_values.size());
|
||||
for (const auto & value : attribute_values)
|
||||
|
||||
Map map(attribute_names.size());
|
||||
for (size_t attr_idx = 0; attr_idx < map.size(); ++attr_idx)
|
||||
{
|
||||
string_values.push_back(toString(value));
|
||||
map[attr_idx] = Tuple{attribute_names[attr_idx], toString(attribute_values[attr_idx])};
|
||||
}
|
||||
columns[i++]->insert(string_values);
|
||||
columns[i++]->insert(map);
|
||||
}
|
||||
|
||||
|
||||
|
@ -27,7 +27,8 @@ struct OpenTelemetrySpanLogElement : public OpenTelemetrySpan
|
||||
: OpenTelemetrySpan(span) {}
|
||||
|
||||
static std::string name() { return "OpenTelemetrySpanLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block PartLogElement::createBlock()
|
||||
NamesAndTypesList PartLogElement::getNamesAndTypes()
|
||||
{
|
||||
auto event_type_datatype = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
@ -33,35 +33,34 @@ Block PartLogElement::createBlock()
|
||||
ColumnsWithTypeAndName columns_with_type_and_name;
|
||||
|
||||
return {
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"event_type", std::move(event_type_datatype)},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "query_id"),
|
||||
columns_with_type_and_name.emplace_back(std::move(event_type_datatype), "event_type"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDate>(), "event_date"),
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDateTime>(), "event_time"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds"),
|
||||
{"duration_ms", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "duration_ms"),
|
||||
{"database", std::make_shared<DataTypeString>()},
|
||||
{"table", std::make_shared<DataTypeString>()},
|
||||
{"part_name", std::make_shared<DataTypeString>()},
|
||||
{"partition_id", std::make_shared<DataTypeString>()},
|
||||
{"path_on_disk", std::make_shared<DataTypeString>()},
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "database"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "table"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "part_name"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "partition_id"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "path_on_disk"),
|
||||
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "rows"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "size_in_bytes"), // On disk
|
||||
{"rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"size_in_bytes", std::make_shared<DataTypeUInt64>()}, // On disk
|
||||
|
||||
/// Merge-specific info
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "merged_from"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "bytes_uncompressed"), // Result bytes
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "read_rows"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "read_bytes"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt64>(), "peak_memory_usage"),
|
||||
{"merged_from", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"bytes_uncompressed", std::make_shared<DataTypeUInt64>()}, // Result bytes
|
||||
{"read_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"read_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"peak_memory_usage", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
/// Is there an error during the execution or commit
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeUInt16>(), "error"),
|
||||
columns_with_type_and_name.emplace_back(std::make_shared<DataTypeString>(), "exception"),
|
||||
{"error", std::make_shared<DataTypeUInt16>()},
|
||||
{"exception", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,8 @@ struct PartLogElement
|
||||
|
||||
static std::string name() { return "PartLog"; }
|
||||
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
@ -11,14 +12,18 @@ namespace ProfileEvents
|
||||
{
|
||||
|
||||
/// Put implementation here to avoid extra linking dependencies for clickhouse_common_io
|
||||
void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names_, DB::IColumn * column_values_, bool nonzero_only)
|
||||
void dumpToMapColumn(const Counters & counters, DB::IColumn * column, bool nonzero_only)
|
||||
{
|
||||
/// Convert ptr and make simple check
|
||||
auto * column_names = (column_names_) ? &typeid_cast<DB::ColumnArray &>(*column_names_) : nullptr;
|
||||
auto * column_values = (column_values_) ? &typeid_cast<DB::ColumnArray &>(*column_values_) : nullptr;
|
||||
auto * column_map = column ? &typeid_cast<DB::ColumnMap &>(*column) : nullptr;
|
||||
if (!column_map)
|
||||
return;
|
||||
|
||||
auto & offsets = column_map->getNestedColumn().getOffsets();
|
||||
auto & tuple_column = column_map->getNestedData();
|
||||
auto & key_column = tuple_column.getColumn(0);
|
||||
auto & value_column = tuple_column.getColumn(1);
|
||||
|
||||
size_t size = 0;
|
||||
|
||||
for (Event event = 0; event < Counters::num_counters; ++event)
|
||||
{
|
||||
UInt64 value = counters[event].load(std::memory_order_relaxed);
|
||||
@ -26,34 +31,13 @@ void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names_,
|
||||
if (nonzero_only && 0 == value)
|
||||
continue;
|
||||
|
||||
++size;
|
||||
|
||||
if (column_names)
|
||||
{
|
||||
const char * desc = ProfileEvents::getName(event);
|
||||
column_names->getData().insertData(desc, strlen(desc));
|
||||
key_column.insertData(desc, strlen(desc));
|
||||
value_column.insert(value);
|
||||
size++;
|
||||
}
|
||||
|
||||
if (column_values)
|
||||
column_values->getData().insert(value);
|
||||
}
|
||||
|
||||
if (column_names)
|
||||
{
|
||||
auto & offsets = column_names->getOffsets();
|
||||
offsets.push_back(offsets.back() + size);
|
||||
}
|
||||
|
||||
if (column_values)
|
||||
{
|
||||
/// Nested columns case
|
||||
bool the_same_offsets = column_names && column_names->getOffsetsPtr().get() == column_values->getOffsetsPtr().get();
|
||||
if (!the_same_offsets)
|
||||
{
|
||||
auto & offsets = column_values->getOffsets();
|
||||
offsets.push_back(offsets.back() + size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
namespace ProfileEvents
|
||||
{
|
||||
|
||||
/// Dumps profile events to two columns Array(String) and Array(UInt64)
|
||||
void dumpToArrayColumns(const Counters & counters, DB::IColumn * column_names, DB::IColumn * column_value, bool nonzero_only = true);
|
||||
/// Dumps profile events to columns Map(String, UInt64)
|
||||
void dumpToMapColumn(const Counters & counters, DB::IColumn * column, bool nonzero_only = true);
|
||||
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
@ -19,12 +20,13 @@
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/IPv6ToBinary.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block QueryLogElement::createBlock()
|
||||
NamesAndTypesList QueryLogElement::getNamesAndTypes()
|
||||
{
|
||||
auto query_status_datatype = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
@ -37,86 +39,94 @@ Block QueryLogElement::createBlock()
|
||||
|
||||
return
|
||||
{
|
||||
{std::move(query_status_datatype), "type"},
|
||||
{std::make_shared<DataTypeDate>(), "event_date"},
|
||||
{std::make_shared<DataTypeDateTime>(), "event_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds"},
|
||||
{std::make_shared<DataTypeDateTime>(), "query_start_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "query_start_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt64>(), "query_duration_ms"},
|
||||
{"type", std::move(query_status_datatype)},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"query_start_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"query_start_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"query_duration_ms", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt64>(), "read_rows"},
|
||||
{std::make_shared<DataTypeUInt64>(), "read_bytes"},
|
||||
{std::make_shared<DataTypeUInt64>(), "written_rows"},
|
||||
{std::make_shared<DataTypeUInt64>(), "written_bytes"},
|
||||
{std::make_shared<DataTypeUInt64>(), "result_rows"},
|
||||
{std::make_shared<DataTypeUInt64>(), "result_bytes"},
|
||||
{std::make_shared<DataTypeUInt64>(), "memory_usage"},
|
||||
{"read_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"read_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"written_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"written_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"result_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"result_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"memory_usage", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{std::make_shared<DataTypeString>(), "current_database"},
|
||||
{std::make_shared<DataTypeString>(), "query"},
|
||||
{std::make_shared<DataTypeUInt64>(), "normalized_query_hash"},
|
||||
{std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "query_kind"},
|
||||
{std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())), "databases"},
|
||||
{std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())), "tables"},
|
||||
{std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())), "columns"},
|
||||
{std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())), "projections"},
|
||||
{std::make_shared<DataTypeInt32>(), "exception_code"},
|
||||
{std::make_shared<DataTypeString>(), "exception"},
|
||||
{std::make_shared<DataTypeString>(), "stack_trace"},
|
||||
{"current_database", std::make_shared<DataTypeString>()},
|
||||
{"query", std::make_shared<DataTypeString>()},
|
||||
{"normalized_query_hash", std::make_shared<DataTypeUInt64>()},
|
||||
{"query_kind", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())},
|
||||
{"databases", std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()))},
|
||||
{"tables", std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()))},
|
||||
{"columns", std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()))},
|
||||
{"projections", std::make_shared<DataTypeArray>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()))},
|
||||
{"exception_code", std::make_shared<DataTypeInt32>()},
|
||||
{"exception", std::make_shared<DataTypeString>()},
|
||||
{"stack_trace", std::make_shared<DataTypeString>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt8>(), "is_initial_query"},
|
||||
{std::make_shared<DataTypeString>(), "user"},
|
||||
{std::make_shared<DataTypeString>(), "query_id"},
|
||||
{DataTypeFactory::instance().get("IPv6"), "address"},
|
||||
{std::make_shared<DataTypeUInt16>(), "port"},
|
||||
{std::make_shared<DataTypeString>(), "initial_user"},
|
||||
{std::make_shared<DataTypeString>(), "initial_query_id"},
|
||||
{DataTypeFactory::instance().get("IPv6"), "initial_address"},
|
||||
{std::make_shared<DataTypeUInt16>(), "initial_port"},
|
||||
{std::make_shared<DataTypeDateTime>(), "initial_query_start_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "initial_query_start_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt8>(), "interface"},
|
||||
{std::make_shared<DataTypeString>(), "os_user"},
|
||||
{std::make_shared<DataTypeString>(), "client_hostname"},
|
||||
{std::make_shared<DataTypeString>(), "client_name"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_revision"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_major"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_minor"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_patch"},
|
||||
{std::make_shared<DataTypeUInt8>(), "http_method"},
|
||||
{std::make_shared<DataTypeString>(), "http_user_agent"},
|
||||
{std::make_shared<DataTypeString>(), "http_referer"},
|
||||
{std::make_shared<DataTypeString>(), "forwarded_for"},
|
||||
{std::make_shared<DataTypeString>(), "quota_key"},
|
||||
{"is_initial_query", std::make_shared<DataTypeUInt8>()},
|
||||
{"user", std::make_shared<DataTypeString>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"port", std::make_shared<DataTypeUInt16>()},
|
||||
{"initial_user", std::make_shared<DataTypeString>()},
|
||||
{"initial_query_id", std::make_shared<DataTypeString>()},
|
||||
{"initial_address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"initial_port", std::make_shared<DataTypeUInt16>()},
|
||||
{"initial_query_start_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"initial_query_start_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"interface", std::make_shared<DataTypeUInt8>()},
|
||||
{"os_user", std::make_shared<DataTypeString>()},
|
||||
{"client_hostname", std::make_shared<DataTypeString>()},
|
||||
{"client_name", std::make_shared<DataTypeString>()},
|
||||
{"client_revision", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_major", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_minor", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_patch", std::make_shared<DataTypeUInt32>()},
|
||||
{"http_method", std::make_shared<DataTypeUInt8>()},
|
||||
{"http_user_agent", std::make_shared<DataTypeString>()},
|
||||
{"http_referer", std::make_shared<DataTypeString>()},
|
||||
{"forwarded_for", std::make_shared<DataTypeString>()},
|
||||
{"quota_key", std::make_shared<DataTypeString>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
{std::make_shared<DataTypeString>(), "log_comment"},
|
||||
{"log_comment", std::make_shared<DataTypeString>()},
|
||||
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "thread_ids"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "ProfileEvents.Names"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "ProfileEvents.Values"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "Settings.Names"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "Settings.Values"},
|
||||
{"thread_ids", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())},
|
||||
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||
{"Settings", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeString>())},
|
||||
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_aggregate_functions"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_aggregate_function_combinators"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_database_engines"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_data_type_families"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_dictionaries"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_formats"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_functions"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_storages"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "used_table_functions"}
|
||||
{"used_aggregate_functions", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_aggregate_function_combinators", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_database_engines", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_data_type_families", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_dictionaries", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_formats", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_functions", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_storages", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"used_table_functions", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
NamesAndAliases QueryLogElement::getNamesAndAliases()
|
||||
{
|
||||
return
|
||||
{
|
||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"},
|
||||
{"Settings.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(Settings)" },
|
||||
{"Settings.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapValues(Settings)"}
|
||||
};
|
||||
}
|
||||
|
||||
void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
||||
{
|
||||
@ -188,26 +198,22 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
||||
|
||||
if (profile_counters)
|
||||
{
|
||||
auto * column_names = columns[i++].get();
|
||||
auto * column_values = columns[i++].get();
|
||||
ProfileEvents::dumpToArrayColumns(*profile_counters, column_names, column_values, true);
|
||||
auto * column = columns[i++].get();
|
||||
ProfileEvents::dumpToMapColumn(*profile_counters, column, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
columns[i++]->insertDefault();
|
||||
columns[i++]->insertDefault();
|
||||
}
|
||||
|
||||
if (query_settings)
|
||||
{
|
||||
auto * column_names = columns[i++].get();
|
||||
auto * column_values = columns[i++].get();
|
||||
query_settings->dumpToArrayColumns(column_names, column_values, true);
|
||||
auto * column = columns[i++].get();
|
||||
query_settings->dumpToMapColumn(column, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
columns[i++]->insertDefault();
|
||||
columns[i++]->insertDefault();
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1,9 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndAliases.h>
|
||||
#include <Interpreters/SystemLog.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
class Counters;
|
||||
@ -83,7 +83,8 @@ struct QueryLogElement
|
||||
|
||||
static std::string name() { return "QueryLog"; }
|
||||
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
|
||||
static void appendClientInfo(const ClientInfo & client_info, MutableColumns & columns, size_t & i);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
@ -18,59 +19,68 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
Block QueryThreadLogElement::createBlock()
|
||||
|
||||
NamesAndTypesList QueryThreadLogElement::getNamesAndTypes()
|
||||
{
|
||||
return {
|
||||
{std::make_shared<DataTypeDate>(), "event_date"},
|
||||
{std::make_shared<DataTypeDateTime>(), "event_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds"},
|
||||
{std::make_shared<DataTypeDateTime>(), "query_start_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "query_start_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt64>(), "query_duration_ms"},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"query_start_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"query_start_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"query_duration_ms", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt64>(), "read_rows"},
|
||||
{std::make_shared<DataTypeUInt64>(), "read_bytes"},
|
||||
{std::make_shared<DataTypeUInt64>(), "written_rows"},
|
||||
{std::make_shared<DataTypeUInt64>(), "written_bytes"},
|
||||
{std::make_shared<DataTypeInt64>(), "memory_usage"},
|
||||
{std::make_shared<DataTypeInt64>(), "peak_memory_usage"},
|
||||
{"read_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"read_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"written_rows", std::make_shared<DataTypeUInt64>()},
|
||||
{"written_bytes", std::make_shared<DataTypeUInt64>()},
|
||||
{"memory_usage", std::make_shared<DataTypeInt64>()},
|
||||
{"peak_memory_usage", std::make_shared<DataTypeInt64>()},
|
||||
|
||||
{std::make_shared<DataTypeString>(), "thread_name"},
|
||||
{std::make_shared<DataTypeUInt64>(), "thread_id"},
|
||||
{std::make_shared<DataTypeUInt64>(), "master_thread_id"},
|
||||
{std::make_shared<DataTypeString>(), "current_database"},
|
||||
{std::make_shared<DataTypeString>(), "query"},
|
||||
{std::make_shared<DataTypeUInt64>(), "normalized_query_hash"},
|
||||
{"thread_name", std::make_shared<DataTypeString>()},
|
||||
{"thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"master_thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"current_database", std::make_shared<DataTypeString>()},
|
||||
{"query", std::make_shared<DataTypeString>()},
|
||||
{"normalized_query_hash", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt8>(), "is_initial_query"},
|
||||
{std::make_shared<DataTypeString>(), "user"},
|
||||
{std::make_shared<DataTypeString>(), "query_id"},
|
||||
{DataTypeFactory::instance().get("IPv6"), "address"},
|
||||
{std::make_shared<DataTypeUInt16>(), "port"},
|
||||
{std::make_shared<DataTypeString>(), "initial_user"},
|
||||
{std::make_shared<DataTypeString>(), "initial_query_id"},
|
||||
{DataTypeFactory::instance().get("IPv6"), "initial_address"},
|
||||
{std::make_shared<DataTypeUInt16>(), "initial_port"},
|
||||
{std::make_shared<DataTypeDateTime>(), "initial_query_start_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "initial_query_start_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt8>(), "interface"},
|
||||
{std::make_shared<DataTypeString>(), "os_user"},
|
||||
{std::make_shared<DataTypeString>(), "client_hostname"},
|
||||
{std::make_shared<DataTypeString>(), "client_name"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_revision"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_major"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_minor"},
|
||||
{std::make_shared<DataTypeUInt32>(), "client_version_patch"},
|
||||
{std::make_shared<DataTypeUInt8>(), "http_method"},
|
||||
{std::make_shared<DataTypeString>(), "http_user_agent"},
|
||||
{std::make_shared<DataTypeString>(), "http_referer"},
|
||||
{std::make_shared<DataTypeString>(), "forwarded_for"},
|
||||
{std::make_shared<DataTypeString>(), "quota_key"},
|
||||
{"is_initial_query", std::make_shared<DataTypeUInt8>()},
|
||||
{"user", std::make_shared<DataTypeString>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"port", std::make_shared<DataTypeUInt16>()},
|
||||
{"initial_user", std::make_shared<DataTypeString>()},
|
||||
{"initial_query_id", std::make_shared<DataTypeString>()},
|
||||
{"initial_address", DataTypeFactory::instance().get("IPv6")},
|
||||
{"initial_port", std::make_shared<DataTypeUInt16>()},
|
||||
{"initial_query_start_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"initial_query_start_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"interface", std::make_shared<DataTypeUInt8>()},
|
||||
{"os_user", std::make_shared<DataTypeString>()},
|
||||
{"client_hostname", std::make_shared<DataTypeString>()},
|
||||
{"client_name", std::make_shared<DataTypeString>()},
|
||||
{"client_revision", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_major", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_minor", std::make_shared<DataTypeUInt32>()},
|
||||
{"client_version_patch", std::make_shared<DataTypeUInt32>()},
|
||||
{"http_method", std::make_shared<DataTypeUInt8>()},
|
||||
{"http_user_agent", std::make_shared<DataTypeString>()},
|
||||
{"http_referer", std::make_shared<DataTypeString>()},
|
||||
{"forwarded_for", std::make_shared<DataTypeString>()},
|
||||
{"quota_key", std::make_shared<DataTypeString>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "ProfileEvents.Names"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "ProfileEvents.Values"}
|
||||
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||
};
|
||||
}
|
||||
|
||||
NamesAndAliases QueryThreadLogElement::getNamesAndAliases()
|
||||
{
|
||||
return
|
||||
{
|
||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"}
|
||||
};
|
||||
}
|
||||
|
||||
@ -107,14 +117,12 @@ void QueryThreadLogElement::appendToBlock(MutableColumns & columns) const
|
||||
|
||||
if (profile_counters)
|
||||
{
|
||||
auto * column_names = columns[i++].get();
|
||||
auto * column_values = columns[i++].get();
|
||||
dumpToArrayColumns(*profile_counters, column_names, column_values, true);
|
||||
auto * column = columns[i++].get();
|
||||
ProfileEvents::dumpToMapColumn(*profile_counters, column, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
columns[i++]->insertDefault();
|
||||
columns[i++]->insertDefault();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,8 @@ struct QueryThreadLogElement
|
||||
|
||||
static std::string name() { return "QueryThreadLog"; }
|
||||
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,8 @@ namespace DB
|
||||
/// fields
|
||||
|
||||
static std::string name();
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
*/
|
||||
@ -451,10 +452,18 @@ void SystemLog<LogElement>::flushImpl(const std::vector<LogElement> & to_flush,
|
||||
/// is called from single thread.
|
||||
prepareTable();
|
||||
|
||||
Block block = LogElement::createBlock();
|
||||
ColumnsWithTypeAndName log_element_columns;
|
||||
auto log_element_names_and_types = LogElement::getNamesAndTypes();
|
||||
|
||||
for (auto name_and_type : log_element_names_and_types)
|
||||
log_element_columns.emplace_back(name_and_type.type, name_and_type.name);
|
||||
|
||||
Block block(std::move(log_element_columns));
|
||||
|
||||
MutableColumns columns = block.mutateColumns();
|
||||
for (const auto & elem : to_flush)
|
||||
elem.appendToBlock(columns);
|
||||
|
||||
block.setColumns(std::move(columns));
|
||||
|
||||
/// We write to table indirectly, using InterpreterInsertQuery.
|
||||
@ -500,11 +509,14 @@ void SystemLog<LogElement>::prepareTable()
|
||||
|
||||
if (table)
|
||||
{
|
||||
auto metadata_snapshot = table->getInMemoryMetadataPtr();
|
||||
const Block expected = LogElement::createBlock();
|
||||
const Block actual = metadata_snapshot->getSampleBlockNonMaterialized();
|
||||
auto metadata_columns = table->getInMemoryMetadataPtr()->getColumns();
|
||||
auto old_query = InterpreterCreateQuery::formatColumns(metadata_columns);
|
||||
|
||||
if (!blocksHaveEqualStructure(actual, expected))
|
||||
auto ordinary_columns = LogElement::getNamesAndTypes();
|
||||
auto alias_columns = LogElement::getNamesAndAliases();
|
||||
auto current_query = InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns);
|
||||
|
||||
if (old_query->getTreeHash() != current_query->getTreeHash())
|
||||
{
|
||||
/// Rename the existing table.
|
||||
int suffix = 0;
|
||||
@ -575,10 +587,10 @@ ASTPtr SystemLog<LogElement>::getCreateTableQuery()
|
||||
create->database = table_id.database_name;
|
||||
create->table = table_id.table_name;
|
||||
|
||||
Block sample = LogElement::createBlock();
|
||||
|
||||
auto ordinary_columns = LogElement::getNamesAndTypes();
|
||||
auto alias_columns = LogElement::getNamesAndAliases();
|
||||
auto new_columns_list = std::make_shared<ASTColumns>();
|
||||
new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(sample.getNamesAndTypesList()));
|
||||
new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns));
|
||||
create->set(create->columns_list, new_columns_list);
|
||||
|
||||
ParserStorage storage_parser;
|
||||
|
@ -14,7 +14,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
Block TextLogElement::createBlock()
|
||||
NamesAndTypesList TextLogElement::getNamesAndTypes()
|
||||
{
|
||||
auto priority_datatype = std::make_shared<DataTypeEnum8>(
|
||||
DataTypeEnum8::Values
|
||||
@ -31,23 +31,23 @@ Block TextLogElement::createBlock()
|
||||
|
||||
return
|
||||
{
|
||||
{std::make_shared<DataTypeDate>(), "event_date"},
|
||||
{std::make_shared<DataTypeDateTime>(), "event_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt32>(), "microseconds"},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"microseconds", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
{std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "thread_name"},
|
||||
{std::make_shared<DataTypeUInt64>(), "thread_id"},
|
||||
{"thread_name", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())},
|
||||
{"thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{std::move(priority_datatype), "level"},
|
||||
{std::make_shared<DataTypeString>(), "query_id"},
|
||||
{std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "logger_name"},
|
||||
{std::make_shared<DataTypeString>(), "message"},
|
||||
{"level", std::move(priority_datatype)},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"logger_name", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())},
|
||||
{"message", std::make_shared<DataTypeString>()},
|
||||
|
||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
{std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()), "source_file"},
|
||||
{std::make_shared<DataTypeUInt64>(), "source_line"}
|
||||
{"source_file", std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())},
|
||||
{"source_line", std::make_shared<DataTypeUInt64>()}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,8 @@ struct TextLogElement
|
||||
UInt64 source_line{};
|
||||
|
||||
static std::string name() { return "TextLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -21,20 +21,20 @@ const TraceDataType::Values TraceLogElement::trace_values =
|
||||
{"MemorySample", static_cast<UInt8>(TraceType::MemorySample)},
|
||||
};
|
||||
|
||||
Block TraceLogElement::createBlock()
|
||||
NamesAndTypesList TraceLogElement::getNamesAndTypes()
|
||||
{
|
||||
return
|
||||
{
|
||||
{std::make_shared<DataTypeDate>(), "event_date"},
|
||||
{std::make_shared<DataTypeDateTime>(), "event_time"},
|
||||
{std::make_shared<DataTypeDateTime64>(6), "event_time_microseconds"},
|
||||
{std::make_shared<DataTypeUInt64>(), "timestamp_ns"},
|
||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||
{std::make_shared<TraceDataType>(trace_values), "trace_type"},
|
||||
{std::make_shared<DataTypeUInt64>(), "thread_id"},
|
||||
{std::make_shared<DataTypeString>(), "query_id"},
|
||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "trace"},
|
||||
{std::make_shared<DataTypeInt64>(), "size"},
|
||||
{"event_date", std::make_shared<DataTypeDate>()},
|
||||
{"event_time", std::make_shared<DataTypeDateTime>()},
|
||||
{"event_time_microseconds", std::make_shared<DataTypeDateTime64>(6)},
|
||||
{"timestamp_ns", std::make_shared<DataTypeUInt64>()},
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
{"trace_type", std::make_shared<TraceDataType>(trace_values)},
|
||||
{"thread_id", std::make_shared<DataTypeUInt64>()},
|
||||
{"query_id", std::make_shared<DataTypeString>()},
|
||||
{"trace", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())},
|
||||
{"size", std::make_shared<DataTypeInt64>()},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,8 @@ struct TraceLogElement
|
||||
Int64 size{}; /// Allocation size in bytes for TraceType::Memory
|
||||
|
||||
static std::string name() { return "TraceLog"; }
|
||||
static Block createBlock();
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
void appendToBlock(MutableColumns & columns) const;
|
||||
};
|
||||
|
||||
|
@ -145,6 +145,25 @@ ColumnsDescription::ColumnsDescription(NamesAndTypesList ordinary)
|
||||
add(ColumnDescription(std::move(elem.name), std::move(elem.type)));
|
||||
}
|
||||
|
||||
ColumnsDescription::ColumnsDescription(NamesAndTypesList ordinary, NamesAndAliases aliases)
|
||||
{
|
||||
for (auto & elem : ordinary)
|
||||
add(ColumnDescription(std::move(elem.name), std::move(elem.type)));
|
||||
|
||||
for (auto & alias : aliases)
|
||||
{
|
||||
ColumnDescription description(std::move(alias.name), std::move(alias.type));
|
||||
description.default_desc.kind = ColumnDefaultKind::Alias;
|
||||
|
||||
const char * alias_expression_pos = alias.expression.data();
|
||||
const char * alias_expression_end = alias_expression_pos + alias.expression.size();
|
||||
ParserExpression expression_parser;
|
||||
description.default_desc.expression = parseQuery(expression_parser, alias_expression_pos, alias_expression_end, "expression", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
|
||||
add(std::move(description));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// We are trying to find first column from end with name `column_name` or with a name beginning with `column_name` and ".".
|
||||
/// For example "fruits.bananas"
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <Core/Block.h>
|
||||
#include <Core/Names.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/NamesAndAliases.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Storages/ColumnCodec.h>
|
||||
#include <Storages/ColumnDefault.h>
|
||||
@ -54,7 +55,9 @@ class ColumnsDescription
|
||||
{
|
||||
public:
|
||||
ColumnsDescription() = default;
|
||||
explicit ColumnsDescription(NamesAndTypesList ordinary_);
|
||||
explicit ColumnsDescription(NamesAndTypesList ordinary);
|
||||
|
||||
explicit ColumnsDescription(NamesAndTypesList ordinary, NamesAndAliases aliases);
|
||||
|
||||
/// `after_column` can be a Nested column name;
|
||||
void add(ColumnDescription column, const String & after_column = String(), bool first = false);
|
||||
|
@ -1,4 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/NamesAndAliases.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <Storages/IStorage.h>
|
||||
@ -29,6 +31,7 @@ class IStorageSystemOneBlock : public IStorage
|
||||
protected:
|
||||
virtual void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const = 0;
|
||||
|
||||
|
||||
public:
|
||||
#if defined(ARCADIA_BUILD)
|
||||
IStorageSystemOneBlock(const String & name_) : IStorageSystemOneBlock(StorageID{"system", name_}) {}
|
||||
@ -37,7 +40,7 @@ public:
|
||||
IStorageSystemOneBlock(const StorageID & table_id_) : IStorage(table_id_)
|
||||
{
|
||||
StorageInMemoryMetadata metadata_;
|
||||
metadata_.setColumns(ColumnsDescription(Self::getNamesAndTypes()));
|
||||
metadata_.setColumns(ColumnsDescription(Self::getNamesAndTypes(), Self::getNamesAndAliases()));
|
||||
setInMemoryMetadata(metadata_);
|
||||
}
|
||||
|
||||
@ -62,6 +65,8 @@ public:
|
||||
|
||||
return Pipe(std::make_shared<SourceFromSingleChunk>(sample_block, std::move(chunk)));
|
||||
}
|
||||
|
||||
static NamesAndAliases getNamesAndAliases() { return {}; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Storages/System/StorageSystemProcesses.h>
|
||||
@ -60,15 +61,21 @@ NamesAndTypesList StorageSystemProcesses::getNamesAndTypes()
|
||||
{"query", std::make_shared<DataTypeString>()},
|
||||
|
||||
{"thread_ids", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())},
|
||||
{"ProfileEvents.Names", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"ProfileEvents.Values", std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())},
|
||||
{"Settings.Names", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"Settings.Values", std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())},
|
||||
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||
{"Settings", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeString>())},
|
||||
|
||||
{"current_database", std::make_shared<DataTypeString>()},
|
||||
};
|
||||
}
|
||||
|
||||
NamesAndAliases StorageSystemProcesses::getNamesAndAliases()
|
||||
{
|
||||
return
|
||||
{
|
||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"}
|
||||
};
|
||||
}
|
||||
|
||||
void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
|
||||
{
|
||||
@ -127,28 +134,24 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr c
|
||||
}
|
||||
|
||||
{
|
||||
IColumn * column_profile_events_names = res_columns[i++].get();
|
||||
IColumn * column_profile_events_values = res_columns[i++].get();
|
||||
IColumn * column = res_columns[i++].get();
|
||||
|
||||
if (process.profile_counters)
|
||||
ProfileEvents::dumpToArrayColumns(*process.profile_counters, column_profile_events_names, column_profile_events_values, true);
|
||||
ProfileEvents::dumpToMapColumn(*process.profile_counters, column, true);
|
||||
else
|
||||
{
|
||||
column_profile_events_names->insertDefault();
|
||||
column_profile_events_values->insertDefault();
|
||||
column->insertDefault();
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
IColumn * column_settings_names = res_columns[i++].get();
|
||||
IColumn * column_settings_values = res_columns[i++].get();
|
||||
IColumn * column = res_columns[i++].get();
|
||||
|
||||
if (process.query_settings)
|
||||
process.query_settings->dumpToArrayColumns(column_settings_names, column_settings_values, true);
|
||||
process.query_settings->dumpToMapColumn(column, true);
|
||||
else
|
||||
{
|
||||
column_settings_names->insertDefault();
|
||||
column_settings_values->insertDefault();
|
||||
column->insertDefault();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,8 @@ public:
|
||||
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
|
||||
protected:
|
||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||
|
||||
|
@ -83,7 +83,7 @@ def get_query_user_info(node, query_pattern):
|
||||
def get_query_setting_on_shard(node, query_pattern, setting):
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
return node.query("""
|
||||
SELECT (arrayFilter(x -> ((x.1) = '{}'), arrayZip(Settings.Names, Settings.Values))[1]).2
|
||||
SELECT Settings['{}']
|
||||
FROM system.query_log
|
||||
WHERE
|
||||
query LIKE '%{}%' AND
|
||||
|
@ -24,7 +24,7 @@ def get_query_stat(instance, hint):
|
||||
result = {}
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
events = instance.query('''
|
||||
SELECT ProfileEvents.Names, ProfileEvents.Values
|
||||
SELECT ProfileEvents.keys, ProfileEvents.values
|
||||
FROM system.query_log
|
||||
ARRAY JOIN ProfileEvents
|
||||
WHERE type != 1 AND query LIKE '%{}%'
|
||||
|
@ -83,7 +83,7 @@ def get_query_stat(instance, hint):
|
||||
result = init_list.copy()
|
||||
instance.query("SYSTEM FLUSH LOGS")
|
||||
events = instance.query('''
|
||||
SELECT ProfileEvents.Names, ProfileEvents.Values
|
||||
SELECT ProfileEvents.keys, ProfileEvents.values
|
||||
FROM system.query_log
|
||||
ARRAY JOIN ProfileEvents
|
||||
WHERE type != 1 AND query LIKE '%{}%'
|
||||
|
@ -40,16 +40,16 @@ $CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS"
|
||||
$CLICKHOUSE_CLIENT $settings -q "
|
||||
WITH
|
||||
any(query_duration_ms*1000) AS duration,
|
||||
sumIf(PV, PN = 'RealTimeMicroseconds') AS threads_realtime,
|
||||
sumIf(PV, PN IN ('UserTimeMicroseconds', 'SystemTimeMicroseconds', 'OSIOWaitMicroseconds', 'OSCPUWaitMicroseconds')) AS threads_time_user_system_io
|
||||
sum(ProfileEvents['RealTimeMicroseconds']) AS threads_realtime,
|
||||
sum(ProfileEvents['UserTimeMicroseconds'] + ProfileEvents['SystemTimeMicroseconds'] + ProfileEvents['OSIOWaitMicroseconds'] + ProfileEvents['OSCPUWaitMicroseconds']) AS threads_time_user_system_io
|
||||
SELECT
|
||||
-- duration, threads_realtime, threads_time_user_system_io,
|
||||
threads_realtime >= 0.99 * duration,
|
||||
threads_realtime >= threads_time_user_system_io,
|
||||
any(length(thread_ids)) >= 1
|
||||
FROM
|
||||
(SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-2 AND current_database = currentDatabase() AND type=2 ORDER BY event_time DESC LIMIT 1)
|
||||
ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV"
|
||||
(SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-1 AND current_database = currentDatabase() AND type=2 ORDER BY event_time DESC LIMIT 1)
|
||||
"
|
||||
|
||||
# Clean
|
||||
rm "$server_logs_file"
|
||||
|
@ -28,6 +28,7 @@ $CLICKHOUSE_CLIENT $settings -q "$touching_many_parts_query" &> /dev/null
|
||||
|
||||
$CLICKHOUSE_CLIENT $settings -q "SYSTEM FLUSH LOGS"
|
||||
|
||||
$CLICKHOUSE_CLIENT $settings -q "SELECT pi.Values FROM system.query_log ARRAY JOIN ProfileEvents as pi WHERE query='$touching_many_parts_query' and current_database = currentDatabase() and pi.Names = 'FileOpen' ORDER BY event_time DESC LIMIT 1;"
|
||||
|
||||
$CLICKHOUSE_CLIENT $settings -q "SELECT ProfileEvents['FileOpen'] FROM system.query_log WHERE query='$touching_many_parts_query' and current_database = currentDatabase() ORDER BY event_time DESC LIMIT 1;"
|
||||
|
||||
$CLICKHOUSE_CLIENT $settings -q "DROP TABLE IF EXISTS merge_tree_table;"
|
||||
|
@ -20,7 +20,8 @@ $CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query_id="test-query-uncompresse
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SYSTEM FLUSH LOGS"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND current_database = currentDatabase() AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents['Seek'], ProfileEvents['ReadCompressedBytes'], ProfileEvents['UncompressedCacheHits'] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') and current_database = currentDatabase() AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
|
||||
|
||||
|
@ -2,9 +2,7 @@ SET log_queries = 1;
|
||||
SELECT 1 LIMIT 0;
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT arrayJoin AS kv_key
|
||||
FROM system.query_log
|
||||
ARRAY JOIN ProfileEvents.Names AS arrayJoin
|
||||
PREWHERE current_database = currentDatabase() AND has(arrayMap(key -> key, ProfileEvents.Names), 'Query')
|
||||
WHERE arrayJoin = 'Query'
|
||||
SELECT * FROM system.query_log
|
||||
PREWHERE ProfileEvents['Query'] > 0 and current_database = currentDatabase()
|
||||
|
||||
LIMIT 0;
|
||||
|
@ -29,6 +29,7 @@ set max_rows_to_read=0;
|
||||
select count() from system.query_log where
|
||||
current_database = currentDatabase() and
|
||||
query like 'select \'01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING%' and
|
||||
query not like '%system.query_log%' and
|
||||
event_date >= yesterday() and
|
||||
type = 'ExceptionWhileProcessing' and
|
||||
has(Settings.Names, 'max_rows_to_read');
|
||||
Settings['max_rows_to_read'] != '';
|
||||
|
@ -24,7 +24,7 @@ function show_processes_func()
|
||||
# These two system metrics for the generating query above are guaranteed to be nonzero when ProcFS is mounted at /proc
|
||||
$CLICKHOUSE_CLIENT -q "
|
||||
SELECT count() > 0 FROM system.processes\
|
||||
WHERE has(ProfileEvents.Names, 'OSCPUVirtualTimeMicroseconds') AND has(ProfileEvents.Names, 'OSReadChars')\
|
||||
WHERE ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0 AND ProfileEvents['OSReadChars'] > 0 \
|
||||
SETTINGS max_threads = 1
|
||||
" | grep '1' && break;
|
||||
done
|
||||
|
@ -6,6 +6,6 @@ SET min_bytes_to_use_mmap_io = 1;
|
||||
SELECT * FROM test_01343;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT PE.Values FROM system.query_log ARRAY JOIN ProfileEvents AS PE WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND PE.Names = 'CreatedReadBufferMMap' AND type = 2 ORDER BY event_time DESC LIMIT 1;
|
||||
SELECT ProfileEvents['CreatedReadBufferMMap'] AS value FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND type = 2 ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
DROP TABLE test_01343;
|
||||
|
@ -6,6 +6,7 @@ SET min_bytes_to_use_mmap_io = 1;
|
||||
SELECT * FROM test_01344 WHERE x = 'Hello, world';
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT PE.Values FROM system.query_log ARRAY JOIN ProfileEvents AS PE WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND PE.Names = 'CreatedReadBufferMMap' AND type = 2 ORDER BY event_time DESC LIMIT 1;
|
||||
SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log
|
||||
WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND type = 2 ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
DROP TABLE test_01344;
|
||||
|
@ -8,7 +8,9 @@ SET log_queries=1;
|
||||
SELECT 1;
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
|
||||
-- NOTE: can be rewritten using log_queries_min_query_duration_ms
|
||||
|
||||
CREATE MATERIALIZED VIEW slow_log Engine=Memory AS
|
||||
(
|
||||
SELECT * FROM
|
||||
|
@ -6,23 +6,24 @@ SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT written_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents
|
||||
WHERE current_database = currentDatabase() AND ProfileEvents.Names = 'InsertedRows' AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
|
||||
INSERT INTO /* test 01413, query 2 */ rows_events_test VALUES (2,2), (3,3);
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT written_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents
|
||||
WHERE current_database = currentDatabase() AND ProfileEvents.Names = 'InsertedRows' AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
|
||||
SELECT * FROM /* test 01413, query 3 */ rows_events_test WHERE v = 2;
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
SELECT ProfileEvents.Values as value FROM system.query_log ARRAY JOIN ProfileEvents
|
||||
WHERE current_database = currentDatabase() AND ProfileEvents.Names = 'SelectedRows' AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
SELECT ProfileEvents['SelectedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1;
|
||||
|
||||
|
||||
DROP TABLE rows_events_test;
|
||||
|
@ -28,8 +28,8 @@ select count(*) "'"'"initial query spans with proper parent"'"'"
|
||||
from
|
||||
(select *, attribute_name, attribute_value
|
||||
from system.opentelemetry_span_log
|
||||
array join attribute.names as attribute_name,
|
||||
attribute.values as attribute_value) o
|
||||
array join mapKeys(attribute) as attribute_name,
|
||||
mapValues(attribute) as attribute_value) o
|
||||
join system.query_log on query_id = o.attribute_value
|
||||
where
|
||||
trace_id = reinterpretAsUUID(reverse(unhex('$trace_id')))
|
||||
@ -45,7 +45,7 @@ select count(*) "'"'"initial query spans with proper parent"'"'"
|
||||
-- same non-empty value for all 'query' spans in this trace.
|
||||
select uniqExact(value) "'"'"unique non-empty tracestate values"'"'"
|
||||
from system.opentelemetry_span_log
|
||||
array join attribute.names as name, attribute.values as value
|
||||
array join mapKeys(attribute) as name, mapValues(attribute) as value
|
||||
where
|
||||
trace_id = reinterpretAsUUID(reverse(unhex('$trace_id')))
|
||||
and operation_name = 'query'
|
||||
@ -108,14 +108,11 @@ wait
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "system flush logs"
|
||||
${CLICKHOUSE_CLIENT} -q "
|
||||
with count(*) as c
|
||||
-- expect 200 * 0.1 = 20 sampled events on average
|
||||
select if(c > 1 and c < 50, 'OK', 'fail: ' || toString(c))
|
||||
select if(count() > 1 and count() < 50, 'OK', 'Fail')
|
||||
from system.opentelemetry_span_log
|
||||
array join attribute.names as name, attribute.values as value
|
||||
where name = 'clickhouse.query_id'
|
||||
and operation_name = 'query'
|
||||
where operation_name = 'query'
|
||||
and parent_span_id = 0 -- only account for the initial queries
|
||||
and value like '$query_id-%'
|
||||
and attribute['clickhouse.query_id'] like '$query_id-%'
|
||||
;
|
||||
"
|
||||
|
@ -7,7 +7,7 @@ SYSTEM DROP MARK CACHE;
|
||||
SELECT a.size0 FROM t_arr;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT a.size0 FROM %t_arr%'))
|
||||
AND current_database = currentDatabase();
|
||||
@ -24,7 +24,7 @@ SYSTEM DROP MARK CACHE;
|
||||
SELECT t.u FROM t_tup;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT t._ FROM %t_tup%'))
|
||||
AND current_database = currentDatabase();
|
||||
@ -38,7 +38,7 @@ SYSTEM DROP MARK CACHE;
|
||||
SELECT n.null FROM t_nul;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT n.null FROM %t_nul%'))
|
||||
AND current_database = currentDatabase();
|
||||
@ -57,7 +57,7 @@ SYSTEM DROP MARK CACHE;
|
||||
SELECT m.values FROM t_map;
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT m.% FROM %t_map%'))
|
||||
AND current_database = currentDatabase();
|
||||
|
@ -33,7 +33,7 @@ SELECT col1.a FROM nested FORMAT Null;
|
||||
|
||||
-- 4 files: (col1.size0, col1.a) x2
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col1.a FROM %nested%'))
|
||||
AND event_date >= yesterday() AND current_database = currentDatabase();
|
||||
@ -43,7 +43,7 @@ SELECT col3.n2.s FROM nested FORMAT Null;
|
||||
|
||||
-- 6 files: (col3.size0, col3.n2.size1, col3.n2.s) x2
|
||||
SYSTEM FLUSH LOGS;
|
||||
SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'FileOpen')]
|
||||
SELECT ProfileEvents['FileOpen']
|
||||
FROM system.query_log
|
||||
WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col3.n2.s FROM %nested%'))
|
||||
AND event_date >= yesterday() AND current_database = currentDatabase();
|
||||
|
Loading…
Reference in New Issue
Block a user