mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into ldap-per-user-authentication
This commit is contained in:
commit
e8144976fe
@ -15,15 +15,27 @@ using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
namespace
|
||||
{
|
||||
template <typename... Ts> constexpr size_t numArgs(Ts &&...) { return sizeof...(Ts); }
|
||||
template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...) { return std::forward<T>(x); }
|
||||
}
|
||||
|
||||
|
||||
/// Logs a message to a specified logger with that level.
|
||||
/// If more than one argument is provided,
|
||||
/// the first argument is interpreted as template with {}-substitutions
|
||||
/// and the latter arguments treat as values to substitute.
|
||||
/// If only one argument is provided, it is threat as message without substitutions.
|
||||
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
{ \
|
||||
const bool is_clients_log = (CurrentThread::getGroup() != nullptr) && \
|
||||
(CurrentThread::getGroup()->client_logs_level >= (priority)); \
|
||||
if ((logger)->is((PRIORITY)) || is_clients_log) \
|
||||
{ \
|
||||
std::string formatted_message = fmt::format(__VA_ARGS__); \
|
||||
std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \
|
||||
if (auto channel = (logger)->getChannel()) \
|
||||
{ \
|
||||
std::string file_function; \
|
||||
|
@ -55,6 +55,7 @@ function(protobuf_generate_cpp_impl SRCS HDRS MODES OUTPUT_FILE_EXTS PLUGIN)
|
||||
endif()
|
||||
|
||||
set (intermediate_dir ${CMAKE_CURRENT_BINARY_DIR}/intermediate)
|
||||
file (MAKE_DIRECTORY ${intermediate_dir})
|
||||
|
||||
set (protoc_args)
|
||||
foreach (mode ${MODES})
|
||||
@ -112,7 +113,7 @@ if (PROTOBUF_GENERATE_CPP_SCRIPT_MODE)
|
||||
set (intermediate_dir ${DIR}/intermediate)
|
||||
set (intermediate_output "${intermediate_dir}/${FILENAME}")
|
||||
|
||||
if (COMPILER_ID STREQUAL "Clang")
|
||||
if (COMPILER_ID MATCHES "Clang")
|
||||
set (pragma_push "#pragma clang diagnostic push\n")
|
||||
set (pragma_pop "#pragma clang diagnostic pop\n")
|
||||
set (pragma_disable_warnings "#pragma clang diagnostic ignored \"-Weverything\"\n")
|
||||
|
@ -80,7 +80,9 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
pigz \
|
||||
moreutils \
|
||||
libcctz-dev \
|
||||
libldap2-dev
|
||||
libldap2-dev \
|
||||
libsasl2-dev \
|
||||
heimdal-multidev
|
||||
|
||||
|
||||
|
||||
|
@ -32,8 +32,8 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
|
||||
ln -s /usr/lib/llvm-9/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; \
|
||||
if [ -n $USE_DATABASE_ATOMIC ] && [ $USE_DATABASE_ATOMIC -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [ -n $USE_DATABASE_ATOMIC ] && [ $USE_DATABASE_ATOMIC -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/; fi; \
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/; fi; \
|
||||
echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7'" >> /etc/environment; \
|
||||
echo "TSAN_SYMBOLIZER_PATH=/usr/lib/llvm-8/bin/llvm-symbolizer" >> /etc/environment; \
|
||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
|
||||
|
@ -78,9 +78,9 @@ CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
|
||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
|
||||
if [ -n $USE_POLYMORPHIC_PARTS ] && [ $USE_POLYMORPHIC_PARTS -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [ -n $USE_DATABASE_ATOMIC ] && [ $USE_DATABASE_ATOMIC -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [ -n $USE_DATABASE_ATOMIC ] && [ $USE_DATABASE_ATOMIC -eq 1 ]; then ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/; fi; \
|
||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/database_atomic_configd.xml /etc/clickhouse-server/config.d/; fi; \
|
||||
if [[ -n "$USE_DATABASE_ATOMIC" ]] && [[ "$USE_DATABASE_ATOMIC" -eq 1 ]]; then ln -s /usr/share/clickhouse-test/config/database_atomic_usersd.xml /etc/clickhouse-server/users.d/; fi; \
|
||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml; \
|
||||
service zookeeper start; sleep 5; \
|
||||
service clickhouse-server start && sleep 5 && clickhouse-test --testname --shard --zookeeper $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -586,11 +586,11 @@ If the table doesn’t exist, ClickHouse will create it. If the structure of the
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads) setting.
|
||||
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query-thread-log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
Queries are logged in the [system.query\_thread\_log](../../operations/system-tables.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||
|
||||
Use the following parameters to configure logging:
|
||||
|
||||
|
@ -598,7 +598,7 @@ log_queries_min_type='EXCEPTION_WHILE_PROCESSING'
|
||||
|
||||
Setting up query threads logging.
|
||||
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server configuration parameter.
|
||||
Queries’ threads runned by ClickHouse with this setup are logged according to the rules in the [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server configuration parameter.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -5,7 +5,7 @@ toc_title: System Tables
|
||||
|
||||
# System Tables {#system-tables}
|
||||
|
||||
## Introduction
|
||||
## Introduction {#system-tables-introduction}
|
||||
|
||||
System tables provide information about:
|
||||
|
||||
@ -18,9 +18,12 @@ System tables:
|
||||
- Available only for reading data.
|
||||
- Can't be dropped or altered, but can be detached.
|
||||
|
||||
The `metric_log`, `query_log`, `query_thread_log`, `trace_log` system tables store data in a storage filesystem. Other system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
Most of system tables store their data in RAM. ClickHouse server creates such system tables at the start.
|
||||
|
||||
### Sources of System Metrics
|
||||
The [metric_log](#system_tables-metric_log), [query_log](#system_tables-query_log), [query_thread_log](#system_tables-query_thread_log), [trace_log](#system_tables-trace_log) system tables store data in a storage filesystem. You can alter them or remove from a disk manually. If you remove one of that tables from a disk, the ClickHouse server creates the table again at the time of the next recording. A storage period for these tables is not limited, and ClickHouse server doesn't delete their data automatically. You need to organize removing of outdated logs by yourself. For example, you can use [TTL](../sql-reference/statements/alter.md#manipulations-with-table-ttl) settings for removing outdated log records.
|
||||
|
||||
|
||||
### Sources of System Metrics {#system-tables-sources-of-system-metrics}
|
||||
|
||||
For collecting system metrics ClickHouse server uses:
|
||||
|
||||
@ -587,97 +590,150 @@ Columns:
|
||||
- `source_file` (LowCardinality(String)) — Source file from which the logging was done.
|
||||
- `source_line` (UInt64) — Source line from which the logging was done.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
Contains information about executed queries, for example, start time, duration of processing, error messages.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
You can change settings of queries logging in the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
You can disable queries logging by setting [log_queries = 0](settings/settings.md#settings-log-queries). We don't recommend to turn off logging because information in this table is important for solving issues.
|
||||
|
||||
The flushing period of logs is set in `flush_interval_milliseconds` parameter of the [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing logs, use the [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||
|
||||
ClickHouse doesn't delete logs from the table automatically. See [Introduction](#system-tables-introduction) for more details.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status (see the `type` column) of the query:
|
||||
|
||||
1. If the query execution was successful, two rows with the `QueryStart` and `QueryFinish` types are created .
|
||||
2. If an error occurred during query processing, two events with the `QueryStart` and `ExceptionWhileProcessing` types are created .
|
||||
3. If an error occurred before launching the query, a single event with the `ExceptionBeforeStart` type is created.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — Type of an event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — Query starting date.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Query starting time.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution in milliseconds.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or rows read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_rows` includes the total number of rows read at all replicas. Each replica sends it's `read_rows` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Total number or bytes read from all tables and table functions participated in query. It includes usual subqueries, subqueries for `IN` and `JOIN`. For distributed queries `read_bytes` includes the total number of rows read at all replicas. Each replica sends it's `read_bytes` value, and the server-initiator of the query summarize all received and local values. The cache volumes doesn't affect this value.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of rows in a result of the `SELECT` query, or a number of rows in the `INSERT` query.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — RAM volume in bytes used to store a query result.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Memory consumption by the query.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — Query string.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — Exception message.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — Code of an exception.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [Stack trace](https://en.wikipedia.org/wiki/Stack_trace). An empty string, if the query was completed successfully.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 0 — Query was initiated by another query as part of distributed query execution.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID of the query.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — Operating system username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — The “quota key” specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
**Example**
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
```
|
||||
**See Also**
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — This table contains information about each query execution thread.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.query\_thread\_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
The table contains information about each query execution thread.
|
||||
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
ClickHouse creates this table only if the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_query\_threads](settings/settings.md#settings-log-query-threads) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
@ -729,14 +785,14 @@ Columns:
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics for this thread. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics for this thread that are listed in the `ProfileEvents.Names` column.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) server setting (see the `partition_by` parameter).
|
||||
You can specify an arbitrary partitioning key for the `system.query_thread_log` table in the [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server setting (see the `partition_by` parameter).
|
||||
|
||||
## system.trace\_log {#system_tables-trace_log}
|
||||
|
||||
|
@ -574,11 +574,11 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query\_thread\_log {#server_configuration_parameters-query-thread-log}
|
||||
## query\_thread\_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Настройка логирования потоков выполнения запросов, принятых с настройкой [log\_query\_threads=1](../settings/settings.md#settings-log-query-threads).
|
||||
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query-thread-log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
Запросы логируются не в отдельный файл, а в системную таблицу [system.query\_thread\_log](../../operations/server-configuration-parameters/settings.md#system_tables-query_thread_log). Вы можете изменить название этой таблицы в параметре `table` (см. ниже).
|
||||
|
||||
При настройке логирования используются следующие параметры:
|
||||
|
||||
|
@ -536,7 +536,7 @@ log_queries=1
|
||||
|
||||
Установка логирования информации о потоках выполнения запроса.
|
||||
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log).
|
||||
Лог информации о потоках выполнения запросов, переданных в ClickHouse с этой установкой, записывается согласно правилам конфигурационного параметра сервера [query\_thread\_log](../server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log).
|
||||
|
||||
Пример:
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
# Системные таблицы {#sistemnye-tablitsy}
|
||||
# Системные таблицы {#system-tables}
|
||||
|
||||
|
||||
## Введение {#system-tables-introduction}
|
||||
|
||||
Системные таблицы используются для реализации части функциональности системы, а также предоставляют доступ к информации о работе системы.
|
||||
Вы не можете удалить системную таблицу (хотя можете сделать DETACH).
|
||||
@ -544,182 +547,156 @@ CurrentMetric_ReplicatedChecks: 0
|
||||
- `source_file` (LowCardinality(String)) — Исходный файл, из которого была сделана запись.
|
||||
- `source_line` (UInt64) — Исходная строка, из которой была сделана запись.
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
## system.query_log {#system_tables-query_log}
|
||||
|
||||
Содержит информацию о выполнении запросов. Для каждого запроса вы можете увидеть время начала обработки, продолжительность обработки, сообщения об ошибках и другую информацию.
|
||||
Содержит информацию о выполняемых запросах, например, время начала обработки, продолжительность обработки, сообщения об ошибках.
|
||||
|
||||
!!! note "Внимание"
|
||||
Таблица не содержит входных данных для запросов `INSERT`.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
Настойки логгирования можно изменить в секции серверной конфигурации [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log).
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_queries](settings/settings.md#settings-log-queries) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
Можно отключить логгирование настройкой [log_queries = 0](settings/settings.md#settings-log-queries). По-возможности, не отключайте логгирование, поскольку информация из таблицы важна при решении проблем.
|
||||
|
||||
Период сброса логов в таблицу задаётся параметром `flush_interval_milliseconds` в конфигурационной секции [query_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос [SYSTEM FLUSH LOGS](../sql-reference/statements/system.md#query_language-system-flush_logs).
|
||||
|
||||
ClickHouse не удаляет логи из таблица автоматически. Смотрите [Введение](#system-tables-introduction).
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
|
||||
|
||||
Если таблицу удалить вручную, она создается заново автоматически «на лету». При этом все логи на момент удаления таблицы будут убраны.
|
||||
|
||||
Таблица `system.query_log` содержит информацию о двух видах запросов:
|
||||
|
||||
1. Первоначальные запросы, которые были выполнены непосредственно клиентом.
|
||||
2. Дочерние запросы, инициированные другими запросами (для выполнения распределенных запросов). Для дочерних запросов информация о первоначальном запросе содержится в столбцах `initial_*`.
|
||||
|
||||
В зависимости от статуса (столбец `type`) каждый запрос создаёт одну или две строки в таблице `query_log`:
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа `QueryStart` и `QueryFinish`.
|
||||
2. Если во время обработки запроса возникла ошибка, создаются два события с типами `QueryStart` и `ExceptionWhileProcessing`.
|
||||
3. Если ошибка произошла ещё до запуска запроса, создается одно событие с типом `ExceptionBeforeStart`.
|
||||
|
||||
Столбцы:
|
||||
|
||||
- `type` (`Enum8`) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `type` ([Enum8](../sql-reference/data-types/enum.md)) — тип события, произошедшего при выполнении запроса. Значения:
|
||||
- `'QueryStart' = 1` — успешное начало выполнения запроса.
|
||||
- `'QueryFinish' = 2` — успешное завершение выполнения запроса.
|
||||
- `'ExceptionBeforeStart' = 3` — исключение перед началом обработки запроса.
|
||||
- `'ExceptionWhileProcessing' = 4` — исключение во время обработки запроса.
|
||||
- `event_date` (Date) — дата начала запроса.
|
||||
- `event_time` (DateTime) — время начала запроса.
|
||||
- `query_start_time` (DateTime) — время начала обработки запроса.
|
||||
- `query_duration_ms` (UInt64) — длительность обработки запроса.
|
||||
- `read_rows` (UInt64) — количество прочитанных строк.
|
||||
- `read_bytes` (UInt64) — количество прочитанных байтов.
|
||||
- `written_rows` (UInt64) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` (UInt64) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` (UInt64) — количество строк в результате.
|
||||
- `result_bytes` (UInt64) — объём результата в байтах.
|
||||
- `memory_usage` (UInt64) — потребление RAM запросом.
|
||||
- `query` (String) — текст запроса.
|
||||
- `exception` (String) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `stack_trace` (String) — трассировка (список функций, последовательно вызванных перед ошибкой). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` (UInt8) — вид запроса. Возможные значения:
|
||||
- `event_date` ([Date](../sql-reference/data-types/date.md)) — дата начала запроса.
|
||||
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала запроса.
|
||||
- `query_start_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время начала обработки запроса.
|
||||
- `query_duration_ms` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — длительность выполнения запроса в миллисекундах.
|
||||
- `read_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество строк, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_rows` включает в себя общее количество строк, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_rows`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `read_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — Общее количество байтов, считанных из всех таблиц и табличных функций, участвующих в запросе. Включает в себя обычные подзапросы, подзапросы для `IN` и `JOIN`. Для распределенных запросов `read_bytes` включает в себя общее количество байтов, прочитанных на всех репликах. Каждая реплика передает собственное значение `read_bytes`, а сервер-инициатор запроса суммирует все полученные и локальные значения. Объемы кэша не учитываюся.
|
||||
- `written_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество записанных строк для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `written_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём записанных данных в байтах для запросов `INSERT`. Для других запросов, значение столбца 0.
|
||||
- `result_rows` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — количество строк в результате запроса `SELECT` или количество строк в запросе `INSERT`.
|
||||
- `result_bytes` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — объём RAM в байтах, использованный для хранения результата запроса.
|
||||
- `memory_usage` ([UInt64](../sql-reference/data-types/int-uint.md#uint-ranges)) — потребление RAM запросом.
|
||||
- `query` ([String](../sql-reference/data-types/string.md)) — текст запроса.
|
||||
- `exception` ([String](../sql-reference/data-types/string.md)) — сообщение исключения, если запрос завершился по исключению.
|
||||
- `exception_code` ([Int32](../sql-reference/data-types/int-uint.md)) — код исключения.
|
||||
- `stack_trace` ([String](../sql-reference/data-types/string.md)) — [stack trace](https://en.wikipedia.org/wiki/Stack_trace). Пустая строка, если запрос успешно завершен.
|
||||
- `is_initial_query` ([UInt8](../sql-reference/data-types/int-uint.md)) — вид запроса. Возможные значения:
|
||||
- 1 — запрос был инициирован клиентом.
|
||||
- 0 — запрос был инициирован другим запросом при распределенном запросе.
|
||||
- `user` (String) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` (String) — ID запроса.
|
||||
- `address` (IPv6) — IP адрес, с которого пришел запрос.
|
||||
- `port` (UInt16) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` (String) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` (String) — ID родительского запроса.
|
||||
- `initial_address` (IPv6) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` (UInt16) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` (UInt8) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
|
||||
- `user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
|
||||
- `query_id` ([String](../sql-reference/data-types/string.md)) — ID запроса.
|
||||
- `address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
|
||||
- `port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
|
||||
- `initial_user` ([String](../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
|
||||
- `initial_query_id` ([String](../sql-reference/data-types/string.md)) — ID родительского запроса.
|
||||
- `initial_address` ([IPv6](../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
|
||||
- `initial_port` ([UInt16](../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
|
||||
- `interface` ([UInt8](../sql-reference/data-types/int-uint.md)) — интерфейс, с которого ушёл запрос. Возможные значения:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — имя пользователя в OS, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` (String) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` (UInt32) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` (UInt32) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` (UInt32) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` (UInt32) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` (UInt8) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- `os_user` ([String](../sql-reference/data-types/string.md)) — имя пользователя операционной системы, который запустил [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` ([String](../sql-reference/data-types/string.md)) — имя сервера, с которого присоединился [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_name` ([String](../sql-reference/data-types/string.md)) — [clickhouse-client](../interfaces/cli.md) или другой TCP клиент.
|
||||
- `client_revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_major` ([UInt32](../sql-reference/data-types/int-uint.md)) — старшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_minor` ([UInt32](../sql-reference/data-types/int-uint.md)) — младшая версия [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `client_version_patch` ([UInt32](../sql-reference/data-types/int-uint.md)) — патч [clickhouse-client](../interfaces/cli.md) или другого TCP клиента.
|
||||
- `http_method` ([UInt8](../sql-reference/data-types/int-uint.md)) — HTTP метод, инициировавший запрос. Возможные значения:
|
||||
- 0 — запрос запущен с интерфейса TCP.
|
||||
- 1 — `GET`.
|
||||
- 2 — `POST`.
|
||||
- `http_user_agent` (String) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` (String) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` (UInt32) — ревизия ClickHouse.
|
||||
- `thread_numbers` (Array(UInt32)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` (Array(String)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` (Array(String)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
- `http_user_agent` ([String](../sql-reference/data-types/string.md)) — HTTP заголовок `UserAgent`.
|
||||
- `quota_key` ([String](../sql-reference/data-types/string.md)) — «ключ квоты» из настроек [квот](quotas.md) (см. `keyed`).
|
||||
- `revision` ([UInt32](../sql-reference/data-types/int-uint.md)) — ревизия ClickHouse.
|
||||
- `thread_numbers` ([Array(UInt32)](../sql-reference/data-types/array.md)) — количество потоков, участвующих в обработке запросов.
|
||||
- `ProfileEvents.Names` ([Array(String)](../sql-reference/data-types/array.md)) — Счетчики для изменения различных метрик. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` ([Array(UInt64)](../sql-reference/data-types/array.md)) — метрики, перечисленные в столбце `ProfileEvents.Names`.
|
||||
- `Settings.Names` ([Array(String)](../sql-reference/data-types/array.md)) — имена настроек, которые меняются, когда клиент выполняет запрос. Чтобы разрешить логирование изменений настроек, установите параметр `log_query_settings` равным 1.
|
||||
- `Settings.Values` ([Array(String)](../sql-reference/data-types/array.md)) — Значения настроек, которые перечислены в столбце `Settings.Names`.
|
||||
|
||||
Каждый запрос создаёт одну или две строки в таблице `query_log`, в зависимости от статуса запроса:
|
||||
**Пример**
|
||||
|
||||
1. Если запрос выполнен успешно, создаются два события типа 1 и 2 (смотрите столбец `type`).
|
||||
2. Если во время обработки запроса произошла ошибка, создаются два события с типами 1 и 4.
|
||||
3. Если ошибка произошла до запуска запроса, создается одно событие с типом 3.
|
||||
``` sql
|
||||
SELECT * FROM system.query_log LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
type: QueryStart
|
||||
event_date: 2020-05-13
|
||||
event_time: 2020-05-13 14:02:28
|
||||
query_start_time: 2020-05-13 14:02:28
|
||||
query_duration_ms: 0
|
||||
read_rows: 0
|
||||
read_bytes: 0
|
||||
written_rows: 0
|
||||
written_bytes: 0
|
||||
result_rows: 0
|
||||
result_bytes: 0
|
||||
memory_usage: 0
|
||||
query: SELECT 1
|
||||
exception_code: 0
|
||||
exception:
|
||||
stack_trace:
|
||||
is_initial_query: 1
|
||||
user: default
|
||||
query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
address: ::ffff:127.0.0.1
|
||||
port: 57720
|
||||
initial_user: default
|
||||
initial_query_id: 5e834082-6f6d-4e34-b47b-cd1934f4002a
|
||||
initial_address: ::ffff:127.0.0.1
|
||||
initial_port: 57720
|
||||
interface: 1
|
||||
os_user: bayonet
|
||||
client_hostname: clickhouse.ru-central1.internal
|
||||
client_name: ClickHouse client
|
||||
client_revision: 54434
|
||||
client_version_major: 20
|
||||
client_version_minor: 4
|
||||
client_version_patch: 1
|
||||
http_method: 0
|
||||
http_user_agent:
|
||||
quota_key:
|
||||
revision: 54434
|
||||
thread_ids: []
|
||||
ProfileEvents.Names: []
|
||||
ProfileEvents.Values: []
|
||||
Settings.Names: ['use_uncompressed_cache','load_balancing','log_queries','max_memory_usage']
|
||||
Settings.Values: ['0','random','1','10000000000']
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
```
|
||||
**Смотрите также**
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
- [system.query_thread_log](#system_tables-query_thread_log) — в этой таблице содержится информация о цепочке каждого выполненного запроса.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) (параметр `partition_by`).
|
||||
|
||||
## system.query\_log {#system_tables-query_log}
|
||||
|
||||
Contains information about execution of queries. For each query, you can see processing start time, duration of processing, error messages and other information.
|
||||
|
||||
!!! note "Note"
|
||||
The table doesn’t contain input data for `INSERT` queries.
|
||||
|
||||
ClickHouse creates this table only if the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server parameter is specified. This parameter sets the logging rules, such as the logging interval or the name of the table the queries will be logged in.
|
||||
|
||||
To enable query logging, set the [log\_queries](settings/settings.md#settings-log-queries) parameter to 1. For details, see the [Settings](settings/settings.md) section.
|
||||
|
||||
The `system.query_log` table registers two kinds of queries:
|
||||
|
||||
1. Initial queries that were run directly by the client.
|
||||
2. Child queries that were initiated by other queries (for distributed query execution). For these types of queries, information about the parent queries is shown in the `initial_*` columns.
|
||||
|
||||
Columns:
|
||||
|
||||
- `type` (`Enum8`) — Type of event that occurred when executing the query. Values:
|
||||
- `'QueryStart' = 1` — Successful start of query execution.
|
||||
- `'QueryFinish' = 2` — Successful end of query execution.
|
||||
- `'ExceptionBeforeStart' = 3` — Exception before the start of query execution.
|
||||
- `'ExceptionWhileProcessing' = 4` — Exception during the query execution.
|
||||
- `event_date` (Date) — Query starting date.
|
||||
- `event_time` (DateTime) — Query starting time.
|
||||
- `query_start_time` (DateTime) — Start time of query execution.
|
||||
- `query_duration_ms` (UInt64) — Duration of query execution.
|
||||
- `read_rows` (UInt64) — Number of read rows.
|
||||
- `read_bytes` (UInt64) — Number of read bytes.
|
||||
- `written_rows` (UInt64) — For `INSERT` queries, the number of written rows. For other queries, the column value is 0.
|
||||
- `written_bytes` (UInt64) — For `INSERT` queries, the number of written bytes. For other queries, the column value is 0.
|
||||
- `result_rows` (UInt64) — Number of rows in the result.
|
||||
- `result_bytes` (UInt64) — Number of bytes in the result.
|
||||
- `memory_usage` (UInt64) — Memory consumption by the query.
|
||||
- `query` (String) — Query string.
|
||||
- `exception` (String) — Exception message.
|
||||
- `stack_trace` (String) — Stack trace (a list of methods called before the error occurred). An empty string, if the query is completed successfully.
|
||||
- `is_initial_query` (UInt8) — Query type. Possible values:
|
||||
- 1 — Query was initiated by the client.
|
||||
- 0 — Query was initiated by another query for distributed query execution.
|
||||
- `user` (String) — Name of the user who initiated the current query.
|
||||
- `query_id` (String) — ID of the query.
|
||||
- `address` (IPv6) — IP address that was used to make the query.
|
||||
- `port` (UInt16) — The client port that was used to make the query.
|
||||
- `initial_user` (String) — Name of the user who ran the initial query (for distributed query execution).
|
||||
- `initial_query_id` (String) — ID of the initial query (for distributed query execution).
|
||||
- `initial_address` (IPv6) — IP address that the parent query was launched from.
|
||||
- `initial_port` (UInt16) — The client port that was used to make the parent query.
|
||||
- `interface` (UInt8) — Interface that the query was initiated from. Possible values:
|
||||
- 1 — TCP.
|
||||
- 2 — HTTP.
|
||||
- `os_user` (String) — OS’s username who runs [clickhouse-client](../interfaces/cli.md).
|
||||
- `client_hostname` (String) — Hostname of the client machine where the [clickhouse-client](../interfaces/cli.md) or another TCP client is run.
|
||||
- `client_name` (String) — The [clickhouse-client](../interfaces/cli.md) or another TCP client name.
|
||||
- `client_revision` (UInt32) — Revision of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_major` (UInt32) — Major version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_minor` (UInt32) — Minor version of the [clickhouse-client](../interfaces/cli.md) or another TCP client.
|
||||
- `client_version_patch` (UInt32) — Patch component of the [clickhouse-client](../interfaces/cli.md) or another TCP client version.
|
||||
- `http_method` (UInt8) — HTTP method that initiated the query. Possible values:
|
||||
- 0 — The query was launched from the TCP interface.
|
||||
- 1 — `GET` method was used.
|
||||
- 2 — `POST` method was used.
|
||||
- `http_user_agent` (String) — The `UserAgent` header passed in the HTTP request.
|
||||
- `quota_key` (String) — The «quota key» specified in the [quotas](quotas.md) setting (see `keyed`).
|
||||
- `revision` (UInt32) — ClickHouse revision.
|
||||
- `thread_numbers` (Array(UInt32)) — Number of threads that are participating in query execution.
|
||||
- `ProfileEvents.Names` (Array(String)) — Counters that measure different metrics. The description of them could be found in the table [system.events](#system_tables-events)
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — Values of metrics that are listed in the `ProfileEvents.Names` column.
|
||||
- `Settings.Names` (Array(String)) — Names of settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||
- `Settings.Values` (Array(String)) — Values of settings that are listed in the `Settings.Names` column.
|
||||
|
||||
Each query creates one or two rows in the `query_log` table, depending on the status of the query:
|
||||
|
||||
1. If the query execution is successful, two events with types 1 and 2 are created (see the `type` column).
|
||||
2. If an error occurred during query processing, two events with types 1 and 4 are created.
|
||||
3. If an error occurred before launching the query, a single event with type 3 is created.
|
||||
|
||||
By default, logs are added to the table at intervals of 7.5 seconds. You can set this interval in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `flush_interval_milliseconds` parameter). To flush the logs forcibly from the memory buffer into the table, use the `SYSTEM FLUSH LOGS` query.
|
||||
|
||||
When the table is deleted manually, it will be automatically created on the fly. Note that all the previous logs will be deleted.
|
||||
|
||||
!!! note "Note"
|
||||
The storage period for logs is unlimited. Logs aren’t automatically deleted from the table. You need to organize the removal of outdated logs yourself.
|
||||
|
||||
You can specify an arbitrary partitioning key for the `system.query_log` table in the [query\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server setting (see the `partition_by` parameter).
|
||||
\#\# system.query\_thread\_log {\#system\_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке выполняемых запросов.
|
||||
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
ClickHouse создаёт таблицу только в том случае, когда установлен конфигурационный параметр сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log). Параметр задаёт правила ведения лога, такие как интервал логирования или имя таблицы, в которую будут логгироваться запросы.
|
||||
|
||||
Чтобы включить логирование, задайте значение параметра [log\_query\_threads](settings/settings.md#settings-log-query-threads) равным 1. Подробности смотрите в разделе [Настройки](settings/settings.md#settings).
|
||||
|
||||
@ -770,16 +747,16 @@ ClickHouse создаёт таблицу только в том случае, к
|
||||
- `ProfileEvents.Names` (Array(String)) — Счетчики для изменения различных метрик для данного потока. Описание метрик можно получить из таблицы [system.events](#system_tables-events)(\#system\_tables-events
|
||||
- `ProfileEvents.Values` (Array(UInt64)) — метрики для данного потока, перечисленные в столбце `ProfileEvents.Names`.
|
||||
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
По умолчанию, строки добавляются в таблицу логирования с интервалом в 7,5 секунд. Можно задать интервал в конфигурационном параметре сервера [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (смотрите параметр `flush_interval_milliseconds`). Чтобы принудительно записать логи из буффера памяти в таблицу, используйте запрос `SYSTEM FLUSH LOGS`.
|
||||
|
||||
Если таблицу удалить вручную, она пересоздастся автоматически «на лету». При этом все логи на момент удаления таблицы будут удалены.
|
||||
|
||||
!!! note "Примечание"
|
||||
Срок хранения логов не ограничен. Логи не удаляются из таблицы автоматически. Вам необходимо самостоятельно организовать удаление устаревших логов.
|
||||
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query-thread-log) (параметр `partition_by`).
|
||||
Можно указать произвольный ключ партиционирования для таблицы `system.query_log` в конфигурации [query\_thread\_log](server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) (параметр `partition_by`).
|
||||
|
||||
## system.query_thread_log {#system_tables-query-thread-log}
|
||||
## system.query_thread_log {#system_tables-query_thread_log}
|
||||
|
||||
Содержит информацию о каждом потоке исполнения запроса.
|
||||
|
||||
|
@ -1293,7 +1293,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
local_context.setSettings(task_cluster->settings_pull);
|
||||
local_context.setSetting("skip_unavailable_shards", true);
|
||||
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().in);
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().getInputStream());
|
||||
count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0;
|
||||
}
|
||||
|
||||
@ -1403,7 +1403,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute();
|
||||
BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute();
|
||||
|
||||
input = io_select.in;
|
||||
input = io_select.getInputStream();
|
||||
output = io_insert.out;
|
||||
}
|
||||
|
||||
@ -1690,7 +1690,7 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
|
||||
|
||||
Context local_context = context;
|
||||
local_context.setSettings(task_cluster->settings_pull);
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().in);
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().getInputStream());
|
||||
|
||||
std::set<String> res;
|
||||
if (block)
|
||||
@ -1735,7 +1735,7 @@ const auto & settings = context.getSettingsRef();
|
||||
|
||||
Context local_context = context;
|
||||
local_context.setSettings(task_cluster->settings_pull);
|
||||
return InterpreterFactory::get(query_ast, local_context)->execute().in->read().rows() != 0;
|
||||
return InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows() != 0;
|
||||
}
|
||||
|
||||
bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTimeouts & timeouts,
|
||||
@ -1774,7 +1774,7 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
|
||||
|
||||
Context local_context = context;
|
||||
local_context.setSettings(task_cluster->settings_pull);
|
||||
auto result = InterpreterFactory::get(query_ast, local_context)->execute().in->read().rows();
|
||||
auto result = InterpreterFactory::get(query_ast, local_context)->execute().getInputStream()->read().rows();
|
||||
if (result != 0)
|
||||
LOG_DEBUG(log, "Partition {} piece number {} is PRESENT on shard {}", partition_quoted_name, std::to_string(current_piece_number), task_shard.getDescription());
|
||||
else
|
||||
|
@ -114,7 +114,7 @@ void ClusterCopierApp::mainImpl()
|
||||
registerDisks();
|
||||
|
||||
static const std::string default_database = "_local";
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context));
|
||||
context->setCurrentDatabase(default_database);
|
||||
|
||||
/// Initialize query scope just in case.
|
||||
|
@ -118,13 +118,13 @@ void LocalServer::tryInitPath()
|
||||
}
|
||||
|
||||
|
||||
static void attachSystemTables()
|
||||
static void attachSystemTables(const Context & context)
|
||||
{
|
||||
DatabasePtr system_database = DatabaseCatalog::instance().tryGetDatabase(DatabaseCatalog::SYSTEM_DATABASE);
|
||||
if (!system_database)
|
||||
{
|
||||
/// TODO: add attachTableDelayed into DatabaseMemory to speedup loading
|
||||
system_database = std::make_shared<DatabaseMemory>(DatabaseCatalog::SYSTEM_DATABASE);
|
||||
system_database = std::make_shared<DatabaseMemory>(DatabaseCatalog::SYSTEM_DATABASE, context);
|
||||
DatabaseCatalog::instance().attachDatabase(DatabaseCatalog::SYSTEM_DATABASE, system_database);
|
||||
}
|
||||
|
||||
@ -202,7 +202,7 @@ try
|
||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||
*/
|
||||
std::string default_database = config().getString("default_database", "_local");
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, *context));
|
||||
context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions();
|
||||
|
||||
@ -213,14 +213,14 @@ try
|
||||
|
||||
LOG_DEBUG(log, "Loading metadata from {}", context->getPath());
|
||||
loadMetadataSystem(*context);
|
||||
attachSystemTables();
|
||||
attachSystemTables(*context);
|
||||
loadMetadata(*context);
|
||||
DatabaseCatalog::instance().loadDatabases();
|
||||
LOG_DEBUG(log, "Loaded metadata.");
|
||||
}
|
||||
else
|
||||
{
|
||||
attachSystemTables();
|
||||
attachSystemTables(*context);
|
||||
}
|
||||
|
||||
processQueries();
|
||||
|
@ -236,6 +236,14 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
if (ThreadFuzzer::instance().isEffective())
|
||||
LOG_WARNING(log, "ThreadFuzzer is enabled. Application will run slowly and unstable.");
|
||||
|
||||
#if !defined(NDEBUG) || !defined(__OPTIMIZE__)
|
||||
LOG_WARNING(log, "Server was built in debug mode. It will work slowly.");
|
||||
#endif
|
||||
|
||||
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
|
||||
LOG_WARNING(log, "Server was built with sanitizer. It will work slowly.");
|
||||
#endif
|
||||
|
||||
/** Context contains all that query execution is dependent:
|
||||
* settings, available functions, data types, aggregate functions, databases...
|
||||
*/
|
||||
|
@ -68,15 +68,27 @@ void ExtendedRoleSet::init(const ASTExtendedRoleSet & ast, const AccessControlMa
|
||||
{
|
||||
all = ast.all;
|
||||
|
||||
auto name_to_id = [id_mode{ast.id_mode}, manager](const String & name) -> UUID
|
||||
auto name_to_id = [&ast, manager](const String & name) -> UUID
|
||||
{
|
||||
if (id_mode)
|
||||
if (ast.id_mode)
|
||||
return parse<UUID>(name);
|
||||
assert(manager);
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
if (ast.can_contain_users && ast.can_contain_roles)
|
||||
{
|
||||
auto id = manager->find<User>(name);
|
||||
if (id)
|
||||
return *id;
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
else if (ast.can_contain_users)
|
||||
{
|
||||
return manager->getID<User>(name);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(ast.can_contain_roles);
|
||||
return manager->getID<Role>(name);
|
||||
}
|
||||
};
|
||||
|
||||
if (!ast.names.empty() && !all)
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -19,6 +20,7 @@ namespace ErrorCodes
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int CANNOT_OPEN_FILE;
|
||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
||||
extern const int CANNOT_CLOSE_FILE;
|
||||
}
|
||||
|
||||
static constexpr auto filename = "/proc/self/statm";
|
||||
@ -35,7 +37,18 @@ MemoryStatisticsOS::MemoryStatisticsOS()
|
||||
MemoryStatisticsOS::~MemoryStatisticsOS()
|
||||
{
|
||||
if (0 != ::close(fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + std::string(filename) + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
DB::tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
||||
|
@ -49,11 +49,13 @@ MemoryTracker::~MemoryTracker()
|
||||
|
||||
void MemoryTracker::logPeakMemoryUsage() const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Logger::get("MemoryTracker"), "Peak memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(peak));
|
||||
}
|
||||
|
||||
void MemoryTracker::logMemoryUsage(Int64 current) const
|
||||
{
|
||||
const auto * description = description_ptr.load(std::memory_order_relaxed);
|
||||
LOG_DEBUG(&Logger::get("MemoryTracker"), "Current memory usage{}: {}.", (description ? " " + std::string(description) : ""), formatReadableSizeWithBinarySuffix(current));
|
||||
}
|
||||
|
||||
@ -85,7 +87,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
|
||||
std::stringstream message;
|
||||
message << "Memory tracker";
|
||||
if (description)
|
||||
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
||||
message << " " << description;
|
||||
message << ": fault injected. Would use " << formatReadableSizeWithBinarySuffix(will_be)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
@ -117,7 +119,7 @@ void MemoryTracker::alloc(Int64 size)
|
||||
|
||||
std::stringstream message;
|
||||
message << "Memory limit";
|
||||
if (description)
|
||||
if (const auto * description = description_ptr.load(std::memory_order_relaxed))
|
||||
message << " " << description;
|
||||
message << " exceeded: would use " << formatReadableSizeWithBinarySuffix(will_be)
|
||||
<< " (attempt to allocate chunk of " << size << " bytes)"
|
||||
|
@ -35,7 +35,7 @@ private:
|
||||
CurrentMetrics::Metric metric = CurrentMetrics::end();
|
||||
|
||||
/// This description will be used as prefix into log messages (if isn't nullptr)
|
||||
const char * description = nullptr;
|
||||
std::atomic<const char *> description_ptr = nullptr;
|
||||
|
||||
void updatePeak(Int64 will_be);
|
||||
void logMemoryUsage(Int64 current) const;
|
||||
@ -114,9 +114,9 @@ public:
|
||||
metric = metric_;
|
||||
}
|
||||
|
||||
void setDescription(const char * description_)
|
||||
void setDescription(const char * description)
|
||||
{
|
||||
description = description_;
|
||||
description_ptr.store(description, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Reset the accumulated data
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
#include <common/find_symbols.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <sys/types.h>
|
||||
@ -22,6 +23,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int CANNOT_OPEN_FILE;
|
||||
extern const int CANNOT_CLOSE_FILE;
|
||||
extern const int CANNOT_READ_FROM_FILE_DESCRIPTOR;
|
||||
}
|
||||
|
||||
@ -39,6 +41,20 @@ namespace
|
||||
errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE);
|
||||
}
|
||||
|
||||
inline void emitErrorMsgWithFailedToCloseFile(const std::string & filename)
|
||||
{
|
||||
try
|
||||
{
|
||||
throwFromErrno(
|
||||
"File descriptor for \"" + filename + "\" could not be closed. "
|
||||
"Something seems to have gone wrong. Inspect errno.", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
}
|
||||
catch (const ErrnoException &)
|
||||
{
|
||||
DB::tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t readFromFD(const int fd, const char * filename, char * buf, size_t buf_size)
|
||||
{
|
||||
ssize_t res = 0;
|
||||
@ -100,11 +116,11 @@ ProcfsMetricsProvider::ProcfsMetricsProvider(const pid_t /*tid*/)
|
||||
ProcfsMetricsProvider::~ProcfsMetricsProvider()
|
||||
{
|
||||
if (stats_version >= 3 && 0 != ::close(thread_io_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_io);
|
||||
if (0 != ::close(thread_stat_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_stat);
|
||||
if (0 != ::close(thread_schedstat_fd))
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
emitErrorMsgWithFailedToCloseFile(thread_schedstat);
|
||||
}
|
||||
|
||||
|
||||
|
19
src/Common/tests/gtest_log.cpp
Normal file
19
src/Common/tests/gtest_log.cpp
Normal file
@ -0,0 +1,19 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <common/logger_useful.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
|
||||
|
||||
TEST(Logger, Log)
|
||||
{
|
||||
Poco::Logger::root().setLevel("none");
|
||||
Poco::Logger::root().setChannel(Poco::AutoPtr<Poco::NullChannel>(new Poco::NullChannel()));
|
||||
Logger * log = &Logger::get("Log");
|
||||
|
||||
/// This test checks that we don't pass this string to fmtlib, because it is the only argument.
|
||||
EXPECT_NO_THROW(LOG_INFO(log, "Hello {} World"));
|
||||
}
|
@ -164,7 +164,7 @@ void ExternalTablesHandler::handlePart(const Poco::Net::MessageHeader & header,
|
||||
|
||||
/// Create table
|
||||
NamesAndTypesList columns = sample_block.getNamesAndTypesList();
|
||||
auto temporary_table = TemporaryTableHolder(context, ColumnsDescription{columns});
|
||||
auto temporary_table = TemporaryTableHolder(context, ColumnsDescription{columns}, {});
|
||||
auto storage = temporary_table.getTable();
|
||||
context.addExternalTable(data->table_name, std::move(temporary_table));
|
||||
BlockOutputStreamPtr output = storage->write(ASTPtr(), context);
|
||||
|
@ -126,7 +126,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingBool, force_optimize_skip_unused_shards_no_nested, false, "Do not apply force_optimize_skip_unused_shards for nested Distributed tables.", 0) \
|
||||
\
|
||||
M(SettingBool, input_format_parallel_parsing, true, "Enable parallel parsing for some data formats.", 0) \
|
||||
M(SettingUInt64, min_chunk_bytes_for_parallel_parsing, (1024 * 1024), "The minimum chunk size in bytes, which each thread will parse in parallel.", 0) \
|
||||
M(SettingUInt64, min_chunk_bytes_for_parallel_parsing, (10 * 1024 * 1024), "The minimum chunk size in bytes, which each thread will parse in parallel.", 0) \
|
||||
\
|
||||
M(SettingUInt64, merge_tree_min_rows_for_concurrent_read, (20 * 8192), "If at least as many lines are read from one file, the reading can be parallelized.", 0) \
|
||||
M(SettingUInt64, merge_tree_min_bytes_for_concurrent_read, (24 * 10 * 1024 * 1024), "If at least as many bytes are read from one file, the reading can be parallelized.", 0) \
|
||||
@ -437,6 +437,7 @@ struct Settings : public SettingsCollection<Settings>
|
||||
M(SettingUInt64, mark_cache_min_lifetime, 0, "Obsolete setting, does nothing. Will be removed after 2020-05-31", 0) \
|
||||
M(SettingBool, partial_merge_join, false, "Obsolete. Use join_algorithm='prefer_partial_merge' instead.", 0) \
|
||||
M(SettingUInt64, max_memory_usage_for_all_queries, 0, "Obsolete. Will be removed after 2020-10-20", 0) \
|
||||
M(SettingBool, experimental_use_processors, true, "Obsolete setting, does nothing. Will be removed after 2020-11-29.", 0) \
|
||||
|
||||
DECLARE_SETTINGS_COLLECTION(LIST_OF_SETTINGS)
|
||||
|
||||
|
@ -13,6 +13,7 @@ limitations under the License. */
|
||||
|
||||
#include <DataStreams/IBlockInputStream.h>
|
||||
#include <Processors/Sources/SourceWithProgress.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -38,7 +39,12 @@ protected:
|
||||
|
||||
Block res = *it;
|
||||
++it;
|
||||
return Chunk(res.getColumns(), res.rows());
|
||||
|
||||
auto info = std::make_shared<AggregatedChunkInfo>();
|
||||
info->bucket_num = res.info.bucket_num;
|
||||
info->is_overflows = res.info.is_overflows;
|
||||
|
||||
return Chunk(res.getColumns(), res.rows(), std::move(info));
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -58,7 +58,7 @@ InputStreamFromASTInsertQuery::InputStreamFromASTInsertQuery(
|
||||
|
||||
if (context.getSettingsRef().input_format_defaults_for_omitted_fields && ast_insert_query->table_id && !input_function)
|
||||
{
|
||||
StoragePtr storage = DatabaseCatalog::instance().getTable(ast_insert_query->table_id);
|
||||
StoragePtr storage = DatabaseCatalog::instance().getTable(ast_insert_query->table_id, context);
|
||||
auto column_defaults = storage->getColumns().getDefaults();
|
||||
if (!column_defaults.empty())
|
||||
res_stream = std::make_shared<AddingDefaultsBlockInputStream>(res_stream, column_defaults, context);
|
||||
|
@ -59,7 +59,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
|
||||
|
||||
for (const auto & database_table : dependencies)
|
||||
{
|
||||
auto dependent_table = DatabaseCatalog::instance().getTable(database_table);
|
||||
auto dependent_table = DatabaseCatalog::instance().getTable(database_table, context);
|
||||
|
||||
ASTPtr query;
|
||||
BlockOutputStreamPtr out;
|
||||
@ -274,7 +274,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n
|
||||
StorageValues::create(
|
||||
storage->getStorageID(), storage->getColumns(), block, storage->getVirtuals()));
|
||||
select.emplace(view.query, local_context, SelectQueryOptions());
|
||||
in = std::make_shared<MaterializingBlockInputStream>(select->execute().in);
|
||||
in = std::make_shared<MaterializingBlockInputStream>(select->execute().getInputStream());
|
||||
|
||||
/// Squashing is needed here because the materialized view query can generate a lot of blocks
|
||||
/// even when only one block is inserted into the parent table (e.g. if the query is a GROUP BY
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Interpreters/addTypeConversionToAST.h>
|
||||
#include <Storages/MergeTree/TTLMode.h>
|
||||
#include <Storages/TTLMode.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
@ -38,7 +38,7 @@ TTLBlockInputStream::TTLBlockInputStream(
|
||||
const auto & column_defaults = storage_columns.getDefaults();
|
||||
|
||||
ASTPtr default_expr_list = std::make_shared<ASTExpressionList>();
|
||||
for (const auto & [name, _] : storage.column_ttl_entries_by_name)
|
||||
for (const auto & [name, _] : storage.getColumnTTLs())
|
||||
{
|
||||
auto it = column_defaults.find(name);
|
||||
if (it != column_defaults.end())
|
||||
@ -70,21 +70,21 @@ TTLBlockInputStream::TTLBlockInputStream(
|
||||
defaults_expression = ExpressionAnalyzer{default_expr_list, syntax_result, storage.global_context}.getActions(true);
|
||||
}
|
||||
|
||||
if (storage.hasRowsTTL() && storage.rows_ttl_entry.mode == TTLMode::GROUP_BY)
|
||||
if (storage.hasRowsTTL() && storage.getRowsTTL().mode == TTLMode::GROUP_BY)
|
||||
{
|
||||
current_key_value.resize(storage.rows_ttl_entry.group_by_keys.size());
|
||||
current_key_value.resize(storage.getRowsTTL().group_by_keys.size());
|
||||
|
||||
ColumnNumbers keys;
|
||||
for (const auto & key : storage.rows_ttl_entry.group_by_keys)
|
||||
for (const auto & key : storage.getRowsTTL().group_by_keys)
|
||||
keys.push_back(header.getPositionByName(key));
|
||||
agg_key_columns.resize(storage.rows_ttl_entry.group_by_keys.size());
|
||||
agg_key_columns.resize(storage.getRowsTTL().group_by_keys.size());
|
||||
|
||||
AggregateDescriptions aggregates = storage.rows_ttl_entry.aggregate_descriptions;
|
||||
AggregateDescriptions aggregates = storage.getRowsTTL().aggregate_descriptions;
|
||||
for (auto & descr : aggregates)
|
||||
if (descr.arguments.empty())
|
||||
for (const auto & name : descr.argument_names)
|
||||
descr.arguments.push_back(header.getPositionByName(name));
|
||||
agg_aggregate_columns.resize(storage.rows_ttl_entry.aggregate_descriptions.size());
|
||||
agg_aggregate_columns.resize(storage.getRowsTTL().aggregate_descriptions.size());
|
||||
|
||||
const Settings & settings = storage.global_context.getSettingsRef();
|
||||
|
||||
@ -105,8 +105,8 @@ bool TTLBlockInputStream::isTTLExpired(time_t ttl) const
|
||||
Block TTLBlockInputStream::readImpl()
|
||||
{
|
||||
/// Skip all data if table ttl is expired for part
|
||||
if (storage.hasRowsTTL() && !storage.rows_ttl_entry.where_expression &&
|
||||
storage.rows_ttl_entry.mode != TTLMode::GROUP_BY && isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
if (storage.hasRowsTTL() && !storage.getRowsTTL().where_expression &&
|
||||
storage.getRowsTTL().mode != TTLMode::GROUP_BY && isTTLExpired(old_ttl_infos.table_ttl.max))
|
||||
{
|
||||
rows_removed = data_part->rows_count;
|
||||
return {};
|
||||
@ -151,15 +151,17 @@ void TTLBlockInputStream::readSuffixImpl()
|
||||
|
||||
void TTLBlockInputStream::removeRowsWithExpiredTableTTL(Block & block)
|
||||
{
|
||||
storage.rows_ttl_entry.expression->execute(block);
|
||||
if (storage.rows_ttl_entry.where_expression)
|
||||
storage.rows_ttl_entry.where_expression->execute(block);
|
||||
const auto & rows_ttl = storage.getRowsTTL();
|
||||
|
||||
rows_ttl.expression->execute(block);
|
||||
if (rows_ttl.where_expression)
|
||||
rows_ttl.where_expression->execute(block);
|
||||
|
||||
const IColumn * ttl_column =
|
||||
block.getByName(storage.rows_ttl_entry.result_column).column.get();
|
||||
block.getByName(rows_ttl.result_column).column.get();
|
||||
|
||||
const IColumn * where_result_column = storage.rows_ttl_entry.where_expression ?
|
||||
block.getByName(storage.rows_ttl_entry.where_result_column).column.get() : nullptr;
|
||||
const IColumn * where_result_column = storage.getRowsTTL().where_expression ?
|
||||
block.getByName(storage.getRowsTTL().where_result_column).column.get() : nullptr;
|
||||
|
||||
const auto & column_names = header.getNames();
|
||||
|
||||
@ -204,9 +206,9 @@ void TTLBlockInputStream::removeRowsWithExpiredTableTTL(Block & block)
|
||||
bool ttl_expired = isTTLExpired(cur_ttl) && where_filter_passed;
|
||||
|
||||
bool same_as_current = true;
|
||||
for (size_t j = 0; j < storage.rows_ttl_entry.group_by_keys.size(); ++j)
|
||||
for (size_t j = 0; j < storage.getRowsTTL().group_by_keys.size(); ++j)
|
||||
{
|
||||
const String & key_column = storage.rows_ttl_entry.group_by_keys[j];
|
||||
const String & key_column = storage.getRowsTTL().group_by_keys[j];
|
||||
const IColumn * values_column = block.getByName(key_column).column.get();
|
||||
if (!same_as_current || (*values_column)[i] != current_key_value[j])
|
||||
{
|
||||
@ -275,18 +277,18 @@ void TTLBlockInputStream::finalizeAggregates(MutableColumns & result_columns)
|
||||
auto aggregated_res = aggregator->convertToBlocks(agg_result, true, 1);
|
||||
for (auto & agg_block : aggregated_res)
|
||||
{
|
||||
for (const auto & it : storage.rows_ttl_entry.group_by_aggregations)
|
||||
std::get<2>(it)->execute(agg_block);
|
||||
for (const auto & name : storage.rows_ttl_entry.group_by_keys)
|
||||
for (const auto & it : storage.getRowsTTL().set_parts)
|
||||
it.expression->execute(agg_block);
|
||||
for (const auto & name : storage.getRowsTTL().group_by_keys)
|
||||
{
|
||||
const IColumn * values_column = agg_block.getByName(name).column.get();
|
||||
auto & result_column = result_columns[header.getPositionByName(name)];
|
||||
result_column->insertRangeFrom(*values_column, 0, agg_block.rows());
|
||||
}
|
||||
for (const auto & it : storage.rows_ttl_entry.group_by_aggregations)
|
||||
for (const auto & it : storage.getRowsTTL().set_parts)
|
||||
{
|
||||
const IColumn * values_column = agg_block.getByName(get<1>(it)).column.get();
|
||||
auto & result_column = result_columns[header.getPositionByName(std::get<0>(it))];
|
||||
const IColumn * values_column = agg_block.getByName(it.expression_result_column_name).column.get();
|
||||
auto & result_column = result_columns[header.getPositionByName(it.column_name)];
|
||||
result_column->insertRangeFrom(*values_column, 0, agg_block.rows());
|
||||
}
|
||||
}
|
||||
@ -304,7 +306,7 @@ void TTLBlockInputStream::removeValuesWithExpiredColumnTTL(Block & block)
|
||||
}
|
||||
|
||||
std::vector<String> columns_to_remove;
|
||||
for (const auto & [name, ttl_entry] : storage.column_ttl_entries_by_name)
|
||||
for (const auto & [name, ttl_entry] : storage.getColumnTTLs())
|
||||
{
|
||||
/// If we read not all table columns. E.g. while mutation.
|
||||
if (!block.has(name))
|
||||
@ -365,7 +367,7 @@ void TTLBlockInputStream::removeValuesWithExpiredColumnTTL(Block & block)
|
||||
void TTLBlockInputStream::updateMovesTTL(Block & block)
|
||||
{
|
||||
std::vector<String> columns_to_remove;
|
||||
for (const auto & ttl_entry : storage.move_ttl_entries)
|
||||
for (const auto & ttl_entry : storage.getMoveTTLs())
|
||||
{
|
||||
auto & new_ttl_info = new_ttl_infos.moves_ttl[ttl_entry.result_column];
|
||||
|
||||
|
@ -35,7 +35,7 @@ try
|
||||
Names column_names;
|
||||
column_names.push_back("WatchID");
|
||||
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable({"default", "hits6"});
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable({"default", "hits6"}, context);
|
||||
|
||||
QueryProcessingStage::Enum stage = table->getQueryProcessingStage(context);
|
||||
auto pipes = table->read(column_names, {}, context, stage, settings.max_block_size, settings.max_threads);
|
||||
|
@ -288,15 +288,15 @@ void DatabaseAtomic::assertCanBeDetached(bool cleenup)
|
||||
"because some tables are still in use. Retry later.", ErrorCodes::DATABASE_NOT_EMPTY);
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseAtomic::getTablesIterator(const IDatabase::FilterByNameFunction & filter_by_table_name)
|
||||
DatabaseTablesIteratorPtr DatabaseAtomic::getTablesIterator(const Context & context, const IDatabase::FilterByNameFunction & filter_by_table_name)
|
||||
{
|
||||
auto base_iter = DatabaseWithOwnTablesBase::getTablesIterator(filter_by_table_name);
|
||||
auto base_iter = DatabaseWithOwnTablesBase::getTablesIterator(context, filter_by_table_name);
|
||||
return std::make_unique<AtomicDatabaseTablesSnapshotIterator>(std::move(typeid_cast<DatabaseTablesSnapshotIterator &>(*base_iter)));
|
||||
}
|
||||
|
||||
UUID DatabaseAtomic::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
if (auto table = tryGetTable(table_name))
|
||||
if (auto table = tryGetTable(table_name, global_context))
|
||||
return table->getStorageID().uuid;
|
||||
return UUIDHelpers::Nil;
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
|
||||
void drop(const Context & /*context*/) override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name) override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;
|
||||
|
||||
void loadStoredObjects(Context & context, bool has_force_restore_data_flag) override;
|
||||
|
||||
|
@ -50,18 +50,18 @@ Tables DatabaseDictionary::listTables(const FilterByNameFunction & filter_by_nam
|
||||
return tables;
|
||||
}
|
||||
|
||||
bool DatabaseDictionary::isTableExist(const String & table_name) const
|
||||
bool DatabaseDictionary::isTableExist(const String & table_name, const Context &) const
|
||||
{
|
||||
return global_context.getExternalDictionariesLoader().getCurrentStatus(table_name) != ExternalLoader::Status::NOT_EXIST;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseDictionary::tryGetTable(const String & table_name) const
|
||||
StoragePtr DatabaseDictionary::tryGetTable(const String & table_name, const Context &) const
|
||||
{
|
||||
auto load_result = global_context.getExternalDictionariesLoader().getLoadResult(table_name);
|
||||
return createStorageDictionary(getDatabaseName(), load_result);
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseDictionary::getTablesIterator(const FilterByNameFunction & filter_by_table_name)
|
||||
DatabaseTablesIteratorPtr DatabaseDictionary::getTablesIterator(const Context &, const FilterByNameFunction & filter_by_table_name)
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(listTables(filter_by_table_name));
|
||||
}
|
||||
@ -71,7 +71,7 @@ bool DatabaseDictionary::empty() const
|
||||
return !global_context.getExternalDictionariesLoader().hasObjects();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const String & table_name, bool throw_on_error) const
|
||||
ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const String & table_name, const Context &, bool throw_on_error) const
|
||||
{
|
||||
String query;
|
||||
{
|
||||
|
@ -29,11 +29,11 @@ public:
|
||||
return "Dictionary";
|
||||
}
|
||||
|
||||
bool isTableExist(const String & table_name) const override;
|
||||
bool isTableExist(const String & table_name, const Context & context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name) const override;
|
||||
StoragePtr tryGetTable(const String & table_name, const Context & context) const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name) override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
@ -44,7 +44,7 @@ public:
|
||||
void shutdown() override;
|
||||
|
||||
protected:
|
||||
ASTPtr getCreateTableQueryImpl(const String & table_name, bool throw_on_error) const override;
|
||||
ASTPtr getCreateTableQueryImpl(const String & table_name, const Context & context, bool throw_on_error) const override;
|
||||
|
||||
private:
|
||||
mutable std::mutex mutex;
|
||||
|
@ -82,7 +82,7 @@ DatabasePtr DatabaseFactory::getImpl(
|
||||
else if (engine_name == "Atomic")
|
||||
return std::make_shared<DatabaseAtomic>(database_name, metadata_path, context);
|
||||
else if (engine_name == "Memory")
|
||||
return std::make_shared<DatabaseMemory>(database_name);
|
||||
return std::make_shared<DatabaseMemory>(database_name, context);
|
||||
else if (engine_name == "Dictionary")
|
||||
return std::make_shared<DatabaseDictionary>(database_name, context);
|
||||
|
||||
|
@ -132,7 +132,7 @@ StoragePtr DatabaseLazy::tryGetTable(const String & table_name) const
|
||||
return loadTable(table_name);
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseLazy::getTablesIterator(const FilterByNameFunction & filter_by_table_name)
|
||||
DatabaseTablesIteratorPtr DatabaseLazy::getTablesIterator(const Context &, const FilterByNameFunction & filter_by_table_name)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
Strings filtered_tables;
|
||||
|
@ -51,13 +51,15 @@ public:
|
||||
|
||||
time_t getObjectMetadataModificationTime(const String & table_name) const override;
|
||||
|
||||
bool isTableExist(const String & table_name) const override;
|
||||
bool isTableExist(const String & table_name, const Context &) const override { return isTableExist(table_name); }
|
||||
bool isTableExist(const String & table_name) const;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name) const override;
|
||||
StoragePtr tryGetTable(const String & table_name, const Context &) const override { return tryGetTable(table_name); }
|
||||
StoragePtr tryGetTable(const String & table_name) const;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name) override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;
|
||||
|
||||
void attachTable(const String & table_name, const StoragePtr & table, const String & relative_table_path) override;
|
||||
|
||||
|
@ -16,8 +16,8 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_TABLE;
|
||||
}
|
||||
|
||||
DatabaseMemory::DatabaseMemory(const String & name_)
|
||||
: DatabaseWithOwnTablesBase(name_, "DatabaseMemory(" + name_ + ")")
|
||||
DatabaseMemory::DatabaseMemory(const String & name_, const Context & context)
|
||||
: DatabaseWithOwnTablesBase(name_, "DatabaseMemory(" + name_ + ")", context)
|
||||
, data_path("data/" + escapeForFileName(database_name) + "/")
|
||||
{}
|
||||
|
||||
@ -64,7 +64,7 @@ ASTPtr DatabaseMemory::getCreateDatabaseQuery() const
|
||||
return create_query;
|
||||
}
|
||||
|
||||
ASTPtr DatabaseMemory::getCreateTableQueryImpl(const String & table_name, bool throw_on_error) const
|
||||
ASTPtr DatabaseMemory::getCreateTableQueryImpl(const String & table_name, const Context &, bool throw_on_error) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = create_queries.find(table_name);
|
||||
@ -80,7 +80,7 @@ ASTPtr DatabaseMemory::getCreateTableQueryImpl(const String & table_name, bool t
|
||||
|
||||
UUID DatabaseMemory::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
if (auto table = tryGetTable(table_name))
|
||||
if (auto table = tryGetTable(table_name, global_context))
|
||||
return table->getStorageID().uuid;
|
||||
return UUIDHelpers::Nil;
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ namespace DB
|
||||
class DatabaseMemory final : public DatabaseWithOwnTablesBase
|
||||
{
|
||||
public:
|
||||
DatabaseMemory(const String & name_);
|
||||
DatabaseMemory(const String & name_, const Context & context);
|
||||
|
||||
String getEngineName() const override { return "Memory"; }
|
||||
|
||||
@ -34,7 +34,7 @@ public:
|
||||
const String & table_name,
|
||||
bool no_delay) override;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, bool throw_on_error) const override;
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, const Context & context, bool throw_on_error) const override;
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
/// DatabaseMemory allows to create tables, which store data on disk.
|
||||
|
@ -89,7 +89,7 @@ bool DatabaseMySQL::empty() const
|
||||
return true;
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseMySQL::getTablesIterator(const FilterByNameFunction & filter_by_table_name)
|
||||
DatabaseTablesIteratorPtr DatabaseMySQL::getTablesIterator(const Context &, const FilterByNameFunction & filter_by_table_name)
|
||||
{
|
||||
Tables tables;
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
@ -103,12 +103,12 @@ DatabaseTablesIteratorPtr DatabaseMySQL::getTablesIterator(const FilterByNameFun
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(tables);
|
||||
}
|
||||
|
||||
bool DatabaseMySQL::isTableExist(const String & name) const
|
||||
bool DatabaseMySQL::isTableExist(const String & name, const Context &) const
|
||||
{
|
||||
return bool(tryGetTable(name));
|
||||
return bool(tryGetTable(name, global_context));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseMySQL::tryGetTable(const String & mysql_table_name) const
|
||||
StoragePtr DatabaseMySQL::tryGetTable(const String & mysql_table_name, const Context &) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
@ -155,7 +155,7 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, bool throw_on_error) const
|
||||
ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, const Context &, bool throw_on_error) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
@ -501,7 +501,7 @@ void DatabaseMySQL::createTable(const Context &, const String & table_name, cons
|
||||
/// XXX: hack
|
||||
/// In order to prevent users from broken the table structure by executing attach table database_name.table_name (...)
|
||||
/// we should compare the old and new create_query to make them completely consistent
|
||||
const auto & origin_create_query = getCreateTableQuery(table_name);
|
||||
const auto & origin_create_query = getCreateTableQuery(table_name, global_context);
|
||||
origin_create_query->as<ASTCreateQuery>()->attach = true;
|
||||
|
||||
if (queryToString(origin_create_query) != queryToString(create_query))
|
||||
|
@ -32,13 +32,13 @@ public:
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name) override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
bool isTableExist(const String & name) const override;
|
||||
bool isTableExist(const String & name, const Context & context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name) const override;
|
||||
StoragePtr tryGetTable(const String & name, const Context & context) const override;
|
||||
|
||||
time_t getObjectMetadataModificationTime(const String & name) const override;
|
||||
|
||||
@ -59,7 +59,7 @@ public:
|
||||
void attachTable(const String & table_name, const StoragePtr & storage, const String & relative_table_path) override;
|
||||
|
||||
protected:
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, bool throw_on_error) const override;
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, const Context & context, bool throw_on_error) const override;
|
||||
|
||||
private:
|
||||
const Context & global_context;
|
||||
|
@ -123,10 +123,9 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
|
||||
}
|
||||
|
||||
DatabaseOnDisk::DatabaseOnDisk(const String & name, const String & metadata_path_, const String & data_path_, const String & logger, const Context & context)
|
||||
: DatabaseWithOwnTablesBase(name, logger)
|
||||
: DatabaseWithOwnTablesBase(name, logger, context)
|
||||
, metadata_path(metadata_path_)
|
||||
, data_path(data_path_)
|
||||
, global_context(context.getGlobalContext())
|
||||
{
|
||||
Poco::File(context.getPath() + data_path).createDirectories();
|
||||
Poco::File(metadata_path).createDirectories();
|
||||
@ -160,7 +159,7 @@ void DatabaseOnDisk::createTable(
|
||||
throw Exception("Dictionary " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " already exists.",
|
||||
ErrorCodes::DICTIONARY_ALREADY_EXISTS);
|
||||
|
||||
if (isTableExist(table_name))
|
||||
if (isTableExist(table_name, global_context))
|
||||
throw Exception("Table " + backQuote(getDatabaseName()) + "." + backQuote(table_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
|
||||
|
||||
if (create.attach_short_syntax)
|
||||
@ -267,7 +266,7 @@ void DatabaseOnDisk::renameTable(
|
||||
String table_metadata_path;
|
||||
ASTPtr attach_query;
|
||||
/// DatabaseLazy::detachTable may return nullptr even if table exists, so we need tryGetTable for this case.
|
||||
StoragePtr table = tryGetTable(table_name);
|
||||
StoragePtr table = tryGetTable(table_name, global_context);
|
||||
detachTable(table_name);
|
||||
try
|
||||
{
|
||||
@ -304,10 +303,10 @@ void DatabaseOnDisk::renameTable(
|
||||
Poco::File(table_metadata_path).remove();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, bool throw_on_error) const
|
||||
ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, const Context &, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr ast;
|
||||
bool has_table = tryGetTable(table_name) != nullptr;
|
||||
bool has_table = tryGetTable(table_name, global_context) != nullptr;
|
||||
auto table_metadata_path = getObjectMetadataPath(table_name);
|
||||
try
|
||||
{
|
||||
|
@ -76,6 +76,7 @@ protected:
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(
|
||||
const String & table_name,
|
||||
const Context & context,
|
||||
bool throw_on_error) const override;
|
||||
|
||||
ASTPtr getCreateQueryFromMetadata(const String & metadata_path, bool throw_on_error) const;
|
||||
@ -85,7 +86,6 @@ protected:
|
||||
|
||||
const String metadata_path;
|
||||
const String data_path;
|
||||
const Context & global_context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ void DatabaseWithDictionaries::createDictionary(const Context & context, const S
|
||||
"Dictionary " + backQuote(getDatabaseName()) + "." + backQuote(dictionary_name) + " already exists.",
|
||||
ErrorCodes::DICTIONARY_ALREADY_EXISTS);
|
||||
|
||||
if (isTableExist(dictionary_name))
|
||||
if (isTableExist(dictionary_name, global_context))
|
||||
throw Exception("Table " + backQuote(getDatabaseName()) + "." + backQuote(dictionary_name) + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Databases/DatabasesCommon.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Storages/StorageDictionary.h>
|
||||
@ -18,18 +19,18 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_TABLE;
|
||||
}
|
||||
|
||||
DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger)
|
||||
: IDatabase(name_), log(&Logger::get(logger))
|
||||
DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, const Context & context)
|
||||
: IDatabase(name_), log(&Logger::get(logger)), global_context(context.getGlobalContext())
|
||||
{
|
||||
}
|
||||
|
||||
bool DatabaseWithOwnTablesBase::isTableExist(const String & table_name) const
|
||||
bool DatabaseWithOwnTablesBase::isTableExist(const String & table_name, const Context &) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return tables.find(table_name) != tables.end();
|
||||
}
|
||||
|
||||
StoragePtr DatabaseWithOwnTablesBase::tryGetTable(const String & table_name) const
|
||||
StoragePtr DatabaseWithOwnTablesBase::tryGetTable(const String & table_name, const Context &) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = tables.find(table_name);
|
||||
@ -38,7 +39,7 @@ StoragePtr DatabaseWithOwnTablesBase::tryGetTable(const String & table_name) con
|
||||
return {};
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseWithOwnTablesBase::getTablesIterator(const FilterByNameFunction & filter_by_table_name)
|
||||
DatabaseTablesIteratorPtr DatabaseWithOwnTablesBase::getTablesIterator(const Context &, const FilterByNameFunction & filter_by_table_name)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (!filter_by_table_name)
|
||||
|
@ -19,9 +19,9 @@ class Context;
|
||||
class DatabaseWithOwnTablesBase : public IDatabase
|
||||
{
|
||||
public:
|
||||
bool isTableExist(const String & table_name) const override;
|
||||
bool isTableExist(const String & table_name, const Context & context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name) const override;
|
||||
StoragePtr tryGetTable(const String & table_name, const Context & context) const override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
@ -29,18 +29,19 @@ public:
|
||||
|
||||
StoragePtr detachTable(const String & table_name) override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name) override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name) override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
virtual ~DatabaseWithOwnTablesBase() override;
|
||||
~DatabaseWithOwnTablesBase() override;
|
||||
|
||||
protected:
|
||||
mutable std::mutex mutex;
|
||||
Tables tables;
|
||||
Poco::Logger * log;
|
||||
const Context & global_context;
|
||||
|
||||
DatabaseWithOwnTablesBase(const String & name_, const String & logger);
|
||||
DatabaseWithOwnTablesBase(const String & name_, const String & logger, const Context & context);
|
||||
|
||||
void attachTableUnlocked(const String & table_name, const StoragePtr & table, std::unique_lock<std::mutex> & lock);
|
||||
StoragePtr detachTableUnlocked(const String & table_name, std::unique_lock<std::mutex> & lock);
|
||||
|
@ -130,7 +130,7 @@ public:
|
||||
virtual void loadStoredObjects(Context & /*context*/, bool /*has_force_restore_data_flag*/) {}
|
||||
|
||||
/// Check the existence of the table.
|
||||
virtual bool isTableExist(const String & name) const = 0;
|
||||
virtual bool isTableExist(const String & name, const Context & context) const = 0;
|
||||
|
||||
/// Check the existence of the dictionary
|
||||
virtual bool isDictionaryExist(const String & /*name*/) const
|
||||
@ -139,7 +139,7 @@ public:
|
||||
}
|
||||
|
||||
/// Get the table for work. Return nullptr if there is no table.
|
||||
virtual StoragePtr tryGetTable(const String & name) const = 0;
|
||||
virtual StoragePtr tryGetTable(const String & name, const Context & context) const = 0;
|
||||
|
||||
virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; }
|
||||
|
||||
@ -147,7 +147,7 @@ public:
|
||||
|
||||
/// Get an iterator that allows you to pass through all the tables.
|
||||
/// It is possible to have "hidden" tables that are not visible when passing through, but are visible if you get them by name using the functions above.
|
||||
virtual DatabaseTablesIteratorPtr getTablesIterator(const FilterByNameFunction & filter_by_table_name = {}) = 0;
|
||||
virtual DatabaseTablesIteratorPtr getTablesIterator(const Context & context, const FilterByNameFunction & filter_by_table_name = {}) = 0;
|
||||
|
||||
/// Get an iterator to pass through all the dictionaries.
|
||||
virtual DatabaseDictionariesIteratorPtr getDictionariesIterator([[maybe_unused]] const FilterByNameFunction & filter_by_dictionary_name = {})
|
||||
@ -249,14 +249,14 @@ public:
|
||||
}
|
||||
|
||||
/// Get the CREATE TABLE query for the table. It can also provide information for detached tables for which there is metadata.
|
||||
ASTPtr tryGetCreateTableQuery(const String & name) const noexcept
|
||||
ASTPtr tryGetCreateTableQuery(const String & name, const Context & context) const noexcept
|
||||
{
|
||||
return getCreateTableQueryImpl(name, false);
|
||||
return getCreateTableQueryImpl(name, context, false);
|
||||
}
|
||||
|
||||
ASTPtr getCreateTableQuery(const String & name) const
|
||||
ASTPtr getCreateTableQuery(const String & name, const Context & context) const
|
||||
{
|
||||
return getCreateTableQueryImpl(name, true);
|
||||
return getCreateTableQueryImpl(name, context, true);
|
||||
}
|
||||
|
||||
/// Get the CREATE DICTIONARY query for the dictionary. Returns nullptr if dictionary doesn't exists.
|
||||
@ -304,7 +304,7 @@ public:
|
||||
virtual ~IDatabase() {}
|
||||
|
||||
protected:
|
||||
virtual ASTPtr getCreateTableQueryImpl(const String & /*name*/, bool throw_on_error) const
|
||||
virtual ASTPtr getCreateTableQueryImpl(const String & /*name*/, const Context & /*context*/, bool throw_on_error) const
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception("There is no SHOW CREATE TABLE query for Database" + getEngineName(), ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY);
|
||||
|
@ -302,37 +302,33 @@ void CacheDictionary::getItemsString(
|
||||
|
||||
/// Request new values sync.
|
||||
/// We have request both cache_not_found_ids and cache_expired_ids.
|
||||
if (!cache_not_found_ids.empty())
|
||||
std::vector<Key> required_ids;
|
||||
required_ids.reserve(cache_not_found_ids.size() + cache_expired_ids.size());
|
||||
std::transform(
|
||||
std::begin(cache_not_found_ids), std::end(cache_not_found_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
std::transform(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
{
|
||||
std::vector<Key> required_ids;
|
||||
required_ids.reserve(cache_not_found_ids.size() + cache_expired_ids.size());
|
||||
std::transform(
|
||||
std::begin(cache_not_found_ids), std::end(cache_not_found_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
std::transform(
|
||||
std::begin(cache_expired_ids), std::end(cache_expired_ids),
|
||||
std::back_inserter(required_ids), [](auto & pair) { return pair.first; });
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
auto on_cell_updated = [&] (const auto id, const auto cell_idx)
|
||||
{
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
map[id] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * cache_not_found_ids[id].size();
|
||||
};
|
||||
|
||||
map[id] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * cache_not_found_ids[id].size();
|
||||
};
|
||||
auto on_id_not_found = [&] (const auto id, const auto)
|
||||
{
|
||||
for (const auto row : cache_not_found_ids[id])
|
||||
total_length += get_default(row).size + 1;
|
||||
};
|
||||
|
||||
auto on_id_not_found = [&] (const auto id, const auto)
|
||||
{
|
||||
for (const auto row : cache_not_found_ids[id])
|
||||
total_length += get_default(row).size + 1;
|
||||
};
|
||||
|
||||
auto update_unit_ptr = std::make_shared<UpdateUnit>(required_ids, on_cell_updated, on_id_not_found);
|
||||
|
||||
tryPushToUpdateQueueOrThrow(update_unit_ptr);
|
||||
waitForCurrentUpdateFinish(update_unit_ptr);
|
||||
}
|
||||
auto update_unit_ptr = std::make_shared<UpdateUnit>(required_ids, on_cell_updated, on_id_not_found);
|
||||
|
||||
tryPushToUpdateQueueOrThrow(update_unit_ptr);
|
||||
waitForCurrentUpdateFinish(update_unit_ptr);
|
||||
out->getChars().reserve(total_length);
|
||||
|
||||
for (const auto row : ext::range(0, ext::size(ids)))
|
||||
|
@ -24,8 +24,8 @@ SRCS(
|
||||
ComplexKeyCacheDictionary_generate3.cpp
|
||||
ComplexKeyCacheDictionary_setAttributeValue.cpp
|
||||
ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp
|
||||
ComplexKeyHashedDictionary.cpp
|
||||
ComplexKeyDirectDictionary.cpp
|
||||
ComplexKeyHashedDictionary.cpp
|
||||
DictionaryBlockInputStreamBase.cpp
|
||||
DictionaryFactory.cpp
|
||||
DictionarySourceFactory.cpp
|
||||
|
@ -43,7 +43,7 @@ static auto getJoin(const ColumnsWithTypeAndName & arguments, const Context & co
|
||||
++dot;
|
||||
}
|
||||
String table_name = join_name.substr(dot);
|
||||
auto table = DatabaseCatalog::instance().getTable({database_name, table_name});
|
||||
auto table = DatabaseCatalog::instance().getTable({database_name, table_name}, context);
|
||||
auto storage_join = std::dynamic_pointer_cast<StorageJoin>(table);
|
||||
if (!storage_join)
|
||||
throw Exception{"Table " + join_name + " should have engine StorageJoin", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
|
@ -113,7 +113,7 @@ void FunctionHasColumnInTable::executeImpl(Block & block, const ColumnNumbers &
|
||||
bool has_column;
|
||||
if (host_name.empty())
|
||||
{
|
||||
const StoragePtr & table = DatabaseCatalog::instance().getTable({database_name, table_name});
|
||||
const StoragePtr & table = DatabaseCatalog::instance().getTable({database_name, table_name}, global_context);
|
||||
has_column = table->getColumns().hasPhysical(column_name);
|
||||
}
|
||||
else
|
||||
|
@ -171,7 +171,6 @@ SRCS(
|
||||
FunctionsRound.cpp
|
||||
FunctionsStringArray.cpp
|
||||
FunctionsStringSimilarity.cpp
|
||||
FunctionUnixTimestamp64.h
|
||||
GatherUtils/concat.cpp
|
||||
GatherUtils/createArraySink.cpp
|
||||
GatherUtils/createArraySource.cpp
|
||||
@ -285,10 +284,10 @@ SRCS(
|
||||
rand64.cpp
|
||||
randConstant.cpp
|
||||
rand.cpp
|
||||
randomFixedString.cpp
|
||||
randomPrintableASCII.cpp
|
||||
randomString.cpp
|
||||
randomStringUTF8.cpp
|
||||
randomFixedString.cpp
|
||||
regexpQuoteMeta.cpp
|
||||
registerFunctionsArithmetic.cpp
|
||||
registerFunctionsComparison.cpp
|
||||
@ -308,8 +307,8 @@ SRCS(
|
||||
registerFunctionsStringRegexp.cpp
|
||||
registerFunctionsStringSearch.cpp
|
||||
registerFunctionsTuple.cpp
|
||||
registerFunctionsVisitParam.cpp
|
||||
registerFunctionsUnixTimestamp64.cpp
|
||||
registerFunctionsVisitParam.cpp
|
||||
reinterpretAsFixedString.cpp
|
||||
reinterpretAsString.cpp
|
||||
reinterpretStringAs.cpp
|
||||
@ -390,10 +389,10 @@ SRCS(
|
||||
toTime.cpp
|
||||
toTimeZone.cpp
|
||||
toTypeName.cpp
|
||||
toValidUTF8.cpp
|
||||
toUnixTimestamp64Micro.cpp
|
||||
toUnixTimestamp64Milli.cpp
|
||||
toUnixTimestamp64Nano.cpp
|
||||
toValidUTF8.cpp
|
||||
toYear.cpp
|
||||
toYYYYMM.cpp
|
||||
toYYYYMMDD.cpp
|
||||
@ -424,8 +423,8 @@ SRCS(
|
||||
URL/fragment.cpp
|
||||
URL/path.cpp
|
||||
URL/pathFull.cpp
|
||||
URL/protocol.cpp
|
||||
URL/port.cpp
|
||||
URL/protocol.cpp
|
||||
URL/queryStringAndFragment.cpp
|
||||
URL/queryString.cpp
|
||||
URL/registerFunctionsURL.cpp
|
||||
|
@ -283,7 +283,9 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
if (buf.eof())
|
||||
throw Exception("Cannot parse escape sequence", ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE);
|
||||
|
||||
if (*buf.position() == 'x')
|
||||
char char_after_backslash = *buf.position();
|
||||
|
||||
if (char_after_backslash == 'x')
|
||||
{
|
||||
++buf.position();
|
||||
/// escape sequence of the form \xAA
|
||||
@ -291,7 +293,7 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
readPODBinary(hex_code, buf);
|
||||
s.push_back(unhex2(hex_code));
|
||||
}
|
||||
else if (*buf.position() == 'N')
|
||||
else if (char_after_backslash == 'N')
|
||||
{
|
||||
/// Support for NULLs: \N sequence must be parsed as empty string.
|
||||
++buf.position();
|
||||
@ -299,7 +301,22 @@ static void parseComplexEscapeSequence(Vector & s, ReadBuffer & buf)
|
||||
else
|
||||
{
|
||||
/// The usual escape sequence of a single character.
|
||||
s.push_back(parseEscapeSequence(*buf.position()));
|
||||
char decoded_char = parseEscapeSequence(char_after_backslash);
|
||||
|
||||
/// For convenience using LIKE and regular expressions,
|
||||
/// we leave backslash when user write something like 'Hello 100\%':
|
||||
/// it is parsed like Hello 100\% instead of Hello 100%
|
||||
if (decoded_char != '\\'
|
||||
&& decoded_char != '\''
|
||||
&& decoded_char != '"'
|
||||
&& decoded_char != '`' /// MySQL style identifiers
|
||||
&& decoded_char != '/' /// JavaScript in HTML
|
||||
&& !isControlASCII(decoded_char))
|
||||
{
|
||||
s.push_back('\\');
|
||||
}
|
||||
|
||||
s.push_back(decoded_char);
|
||||
++buf.position();
|
||||
}
|
||||
}
|
||||
@ -633,7 +650,6 @@ void readCSVStringInto(Vector & s, ReadBuffer & buf, const FormatSettings::CSV &
|
||||
++next_pos;
|
||||
}();
|
||||
|
||||
|
||||
appendToStringOrVector(s, buf, next_pos);
|
||||
buf.position() = next_pos;
|
||||
|
||||
|
@ -37,9 +37,6 @@ target_link_libraries (parse_int_perf2 PRIVATE clickhouse_common_io)
|
||||
add_executable (read_write_int read_write_int.cpp)
|
||||
target_link_libraries (read_write_int PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (mempbrk mempbrk.cpp)
|
||||
target_link_libraries (mempbrk PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (o_direct_and_dirty_pages o_direct_and_dirty_pages.cpp)
|
||||
target_link_libraries (o_direct_and_dirty_pages PRIVATE clickhouse_common_io)
|
||||
|
||||
|
@ -1,90 +0,0 @@
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
|
||||
#include <Common/Stopwatch.h>
|
||||
|
||||
#include <Core/Types.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
|
||||
#include <common/find_symbols.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_PARSE_ESCAPE_SEQUENCE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
namespace test
|
||||
{
|
||||
static void readEscapedString(DB::String & s, DB::ReadBuffer & buf)
|
||||
{
|
||||
s = "";
|
||||
while (!buf.eof())
|
||||
{
|
||||
const char * next_pos = find_first_symbols<'\b', '\f', '\n', '\r', '\t', '\0', '\\'>(buf.position(), buf.buffer().end());
|
||||
|
||||
s.append(buf.position(), next_pos - buf.position());
|
||||
buf.position() += next_pos - buf.position();
|
||||
|
||||
if (!buf.hasPendingData())
|
||||
continue;
|
||||
|
||||
if (*buf.position() == '\t' || *buf.position() == '\n')
|
||||
return;
|
||||
|
||||
if (*buf.position() == '\\')
|
||||
{
|
||||
++buf.position();
|
||||
if (buf.eof())
|
||||
throw DB::Exception("Cannot parse escape sequence", DB::ErrorCodes::CANNOT_PARSE_ESCAPE_SEQUENCE);
|
||||
s += DB::parseEscapeSequence(*buf.position());
|
||||
++buf.position();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int, char **)
|
||||
{
|
||||
try
|
||||
{
|
||||
DB::ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
// DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO);
|
||||
std::string s;
|
||||
size_t rows = 0;
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
while (!in.eof())
|
||||
{
|
||||
test::readEscapedString(s, in);
|
||||
in.ignore();
|
||||
|
||||
++rows;
|
||||
|
||||
/* DB::writeEscapedString(s, out);
|
||||
DB::writeChar('\n', out);*/
|
||||
}
|
||||
|
||||
watch.stop();
|
||||
std::cerr << std::fixed << std::setprecision(2)
|
||||
<< "Read " << rows << " rows (" << in.count() / 1000000.0 << " MB) in " << watch.elapsedSeconds() << " sec., "
|
||||
<< rows / watch.elapsedSeconds() << " rows/sec. (" << in.count() / watch.elapsedSeconds() / 1000000 << " MB/s.)"
|
||||
<< std::endl;
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -19,23 +19,28 @@ namespace ActionLocks
|
||||
}
|
||||
|
||||
|
||||
ActionLocksManager::ActionLocksManager(const Context & context)
|
||||
: global_context(context.getGlobalContext())
|
||||
{
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
inline void forEachTable(F && f)
|
||||
inline void forEachTable(F && f, const Context & context)
|
||||
{
|
||||
for (auto & elem : DatabaseCatalog::instance().getDatabases())
|
||||
for (auto iterator = elem.second->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||
for (auto iterator = elem.second->getTablesIterator(context); iterator->isValid(); iterator->next())
|
||||
f(iterator->table());
|
||||
|
||||
}
|
||||
|
||||
void ActionLocksManager::add(StorageActionBlockType action_type)
|
||||
void ActionLocksManager::add(StorageActionBlockType action_type, const Context & context)
|
||||
{
|
||||
forEachTable([&](const StoragePtr & table) { add(table, action_type); });
|
||||
forEachTable([&](const StoragePtr & table) { add(table, action_type); }, context);
|
||||
}
|
||||
|
||||
void ActionLocksManager::add(const StorageID & table_id, StorageActionBlockType action_type)
|
||||
{
|
||||
if (auto table = DatabaseCatalog::instance().tryGetTable(table_id))
|
||||
if (auto table = DatabaseCatalog::instance().tryGetTable(table_id, global_context))
|
||||
add(table, action_type);
|
||||
}
|
||||
|
||||
@ -60,7 +65,7 @@ void ActionLocksManager::remove(StorageActionBlockType action_type)
|
||||
|
||||
void ActionLocksManager::remove(const StorageID & table_id, StorageActionBlockType action_type)
|
||||
{
|
||||
if (auto table = DatabaseCatalog::instance().tryGetTable(table_id))
|
||||
if (auto table = DatabaseCatalog::instance().tryGetTable(table_id, global_context))
|
||||
remove(table, action_type);
|
||||
}
|
||||
|
||||
|
@ -19,8 +19,10 @@ class Context;
|
||||
class ActionLocksManager
|
||||
{
|
||||
public:
|
||||
ActionLocksManager(const Context & context);
|
||||
|
||||
/// Adds new locks for each table
|
||||
void add(StorageActionBlockType action_type);
|
||||
void add(StorageActionBlockType action_type, const Context & context);
|
||||
/// Add new lock for a table if it has not been already added
|
||||
void add(const StorageID & table_id, StorageActionBlockType action_type);
|
||||
void add(const StoragePtr & table, StorageActionBlockType action_type);
|
||||
@ -41,6 +43,7 @@ private:
|
||||
|
||||
mutable std::mutex mutex;
|
||||
StorageLocks storage_locks;
|
||||
const Context & global_context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -670,7 +670,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su
|
||||
if (identifier)
|
||||
{
|
||||
auto table_id = data.context.resolveStorageID(right_in_operand);
|
||||
StoragePtr table = DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().tryGetTable(table_id, data.context);
|
||||
|
||||
if (table)
|
||||
{
|
||||
@ -706,7 +706,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su
|
||||
{
|
||||
auto interpreter = interpretSubquery(right_in_operand, data.context, data.subquery_depth, {});
|
||||
subquery_for_set.source = std::make_shared<LazyBlockInputStream>(
|
||||
interpreter->getSampleBlock(), [interpreter]() mutable { return interpreter->execute().in; });
|
||||
interpreter->getSampleBlock(), [interpreter]() mutable { return interpreter->execute().getInputStream(); });
|
||||
|
||||
/** Why is LazyBlockInputStream used?
|
||||
*
|
||||
|
@ -181,7 +181,7 @@ void AsynchronousMetrics::update()
|
||||
/// Lazy database can not contain MergeTree tables
|
||||
if (db.second->getEngineName() == "Lazy")
|
||||
continue;
|
||||
for (auto iterator = db.second->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||
for (auto iterator = db.second->getTablesIterator(context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
++total_number_of_tables;
|
||||
const auto & table = iterator->table();
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <Processors/Transforms/ConvertingTransform.h>
|
||||
#include <Processors/Sources/SourceFromInputStream.h>
|
||||
#include <Processors/Executors/TreeExecutorBlockInputStream.h>
|
||||
#include <Processors/Executors/PipelineExecutingBlockInputStream.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -70,35 +71,14 @@ SelectStreamFactory::SelectStreamFactory(
|
||||
namespace
|
||||
{
|
||||
|
||||
Pipe createLocalStream(
|
||||
const ASTPtr & query_ast, const Block & header, const Context & context, QueryProcessingStage::Enum processed_stage,
|
||||
bool add_totals_port, bool add_extremes_port, bool force_tree_shaped_pipeline)
|
||||
QueryPipeline createLocalStream(
|
||||
const ASTPtr & query_ast, const Block & header, const Context & context, QueryProcessingStage::Enum processed_stage)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
InterpreterSelectQuery interpreter{query_ast, context, SelectQueryOptions(processed_stage)};
|
||||
|
||||
if (force_tree_shaped_pipeline)
|
||||
{
|
||||
/// This flag means that pipeline must be tree-shaped,
|
||||
/// so we can't enable processors for InterpreterSelectQuery here.
|
||||
auto stream = interpreter.execute().in;
|
||||
auto source = std::make_shared<SourceFromInputStream>(std::move(stream));
|
||||
|
||||
if (add_totals_port)
|
||||
source->addTotalsPort();
|
||||
if (add_extremes_port)
|
||||
source->addExtremesPort();
|
||||
|
||||
Pipe pipe(std::move(source));
|
||||
|
||||
pipe.addSimpleTransform(std::make_shared<ConvertingTransform>(
|
||||
pipe.getHeader(), header, ConvertingTransform::MatchColumnsMode::Name));
|
||||
|
||||
return pipe;
|
||||
}
|
||||
|
||||
auto pipeline = interpreter.executeWithProcessors();
|
||||
auto pipeline = interpreter.execute().pipeline;
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & source_header)
|
||||
{
|
||||
@ -116,7 +96,8 @@ Pipe createLocalStream(
|
||||
*/
|
||||
/// return std::make_shared<MaterializingBlockInputStream>(stream);
|
||||
|
||||
return std::move(pipeline).getPipe();
|
||||
pipeline.setMaxThreads(1);
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
String formattedAST(const ASTPtr & ast)
|
||||
@ -134,7 +115,7 @@ void SelectStreamFactory::createForShard(
|
||||
const Cluster::ShardInfo & shard_info,
|
||||
const String &, const ASTPtr & query_ast,
|
||||
const Context & context, const ThrottlerPtr & throttler,
|
||||
const SelectQueryInfo & query_info,
|
||||
const SelectQueryInfo &,
|
||||
Pipes & res)
|
||||
{
|
||||
bool force_add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
|
||||
@ -152,8 +133,7 @@ void SelectStreamFactory::createForShard(
|
||||
|
||||
auto emplace_local_stream = [&]()
|
||||
{
|
||||
res.emplace_back(createLocalStream(modified_query_ast, header, context, processed_stage,
|
||||
add_totals_port, add_extremes_port, query_info.force_tree_shaped_pipeline));
|
||||
res.emplace_back(createLocalStream(modified_query_ast, header, context, processed_stage).getPipe());
|
||||
};
|
||||
|
||||
String modified_query = formattedAST(modified_query_ast);
|
||||
@ -191,7 +171,7 @@ void SelectStreamFactory::createForShard(
|
||||
else
|
||||
{
|
||||
auto resolved_id = context.resolveStorageID(main_table);
|
||||
main_table_storage = DatabaseCatalog::instance().tryGetTable(resolved_id);
|
||||
main_table_storage = DatabaseCatalog::instance().tryGetTable(resolved_id, context);
|
||||
}
|
||||
|
||||
|
||||
@ -266,7 +246,7 @@ void SelectStreamFactory::createForShard(
|
||||
auto lazily_create_stream = [
|
||||
pool = shard_info.pool, shard_num = shard_info.shard_num, modified_query, header = header, modified_query_ast, context, throttler,
|
||||
main_table = main_table, table_func_ptr = table_func_ptr, scalars = scalars, external_tables = external_tables,
|
||||
stage = processed_stage, local_delay, add_totals_port, add_extremes_port]()
|
||||
stage = processed_stage, local_delay]()
|
||||
-> BlockInputStreamPtr
|
||||
{
|
||||
auto current_settings = context.getSettingsRef();
|
||||
@ -297,8 +277,8 @@ void SelectStreamFactory::createForShard(
|
||||
}
|
||||
|
||||
if (try_results.empty() || local_delay < max_remote_delay)
|
||||
return std::make_shared<TreeExecutorBlockInputStream>(
|
||||
createLocalStream(modified_query_ast, header, context, stage, add_totals_port, add_extremes_port, true));
|
||||
return std::make_shared<PipelineExecutingBlockInputStream>(
|
||||
createLocalStream(modified_query_ast, header, context, stage));
|
||||
else
|
||||
{
|
||||
std::vector<IConnectionPool::Entry> connections;
|
||||
|
@ -2017,7 +2017,7 @@ std::shared_ptr<ActionLocksManager> Context::getActionLocksManager()
|
||||
auto lock = getLock();
|
||||
|
||||
if (!shared->action_locks_manager)
|
||||
shared->action_locks_manager = std::make_shared<ActionLocksManager>();
|
||||
shared->action_locks_manager = std::make_shared<ActionLocksManager>(*this);
|
||||
|
||||
return shared->action_locks_manager;
|
||||
}
|
||||
|
@ -634,7 +634,7 @@ void DDLWorker::processTask(DDLTask & task, const ZooKeeperPtr & zookeeper)
|
||||
{
|
||||
/// It's not CREATE DATABASE
|
||||
auto table_id = context.tryResolveStorageID(*query_with_table, Context::ResolveOrdinary);
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
}
|
||||
|
||||
/// For some reason we check consistency of cluster definition only
|
||||
|
@ -58,13 +58,17 @@ TemporaryTableHolder::TemporaryTableHolder(const Context & context_,
|
||||
}
|
||||
|
||||
|
||||
TemporaryTableHolder::TemporaryTableHolder(const Context & context_, const ColumnsDescription & columns, const ASTPtr & query)
|
||||
TemporaryTableHolder::TemporaryTableHolder(
|
||||
const Context & context_,
|
||||
const ColumnsDescription & columns,
|
||||
const ConstraintsDescription & constraints,
|
||||
const ASTPtr & query)
|
||||
: TemporaryTableHolder
|
||||
(
|
||||
context_,
|
||||
[&](const StorageID & table_id)
|
||||
{
|
||||
return StorageMemory::create(table_id, ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
return StorageMemory::create(table_id, ColumnsDescription{columns}, ConstraintsDescription{constraints});
|
||||
},
|
||||
query
|
||||
)
|
||||
@ -97,7 +101,7 @@ StorageID TemporaryTableHolder::getGlobalTableID() const
|
||||
|
||||
StoragePtr TemporaryTableHolder::getTable() const
|
||||
{
|
||||
auto table = temporary_tables->tryGetTable("_tmp_" + toString(id));
|
||||
auto table = temporary_tables->tryGetTable("_tmp_" + toString(id), *global_context);
|
||||
if (!table)
|
||||
throw Exception("Temporary table " + getGlobalTableID().getNameForLogs() + " not found", ErrorCodes::LOGICAL_ERROR);
|
||||
return table;
|
||||
@ -108,7 +112,7 @@ void DatabaseCatalog::loadDatabases()
|
||||
{
|
||||
drop_delay_sec = global_context->getConfigRef().getInt("database_atomic_delay_before_drop_table_sec", default_drop_delay_sec);
|
||||
|
||||
auto db_for_temporary_and_external_tables = std::make_shared<DatabaseMemory>(TEMPORARY_DATABASE);
|
||||
auto db_for_temporary_and_external_tables = std::make_shared<DatabaseMemory>(TEMPORARY_DATABASE, *global_context);
|
||||
attachDatabase(TEMPORARY_DATABASE, db_for_temporary_and_external_tables);
|
||||
|
||||
loadMarkedAsDroppedTables();
|
||||
@ -159,6 +163,7 @@ DatabaseAndTable DatabaseCatalog::tryGetByUUID(const UUID & uuid) const
|
||||
|
||||
DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
const StorageID & table_id,
|
||||
const Context & context,
|
||||
std::optional<Exception> * exception) const
|
||||
{
|
||||
if (!table_id)
|
||||
@ -206,7 +211,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
database = it->second;
|
||||
}
|
||||
|
||||
auto table = database->tryGetTable(table_id.table_name);
|
||||
auto table = database->tryGetTable(table_id.table_name, context);
|
||||
if (!table && exception)
|
||||
exception->emplace("Table " + table_id.getNameForLogs() + " doesn't exist.", ErrorCodes::UNKNOWN_TABLE);
|
||||
|
||||
@ -319,7 +324,7 @@ Databases DatabaseCatalog::getDatabases() const
|
||||
return databases;
|
||||
}
|
||||
|
||||
bool DatabaseCatalog::isTableExist(const DB::StorageID & table_id) const
|
||||
bool DatabaseCatalog::isTableExist(const DB::StorageID & table_id, const Context & context) const
|
||||
{
|
||||
if (table_id.hasUUID())
|
||||
return tryGetByUUID(table_id.uuid).second != nullptr;
|
||||
@ -331,12 +336,12 @@ bool DatabaseCatalog::isTableExist(const DB::StorageID & table_id) const
|
||||
if (iter != databases.end())
|
||||
db = iter->second;
|
||||
}
|
||||
return db && db->isTableExist(table_id.table_name);
|
||||
return db && db->isTableExist(table_id.table_name, context);
|
||||
}
|
||||
|
||||
void DatabaseCatalog::assertTableDoesntExist(const StorageID & table_id) const
|
||||
void DatabaseCatalog::assertTableDoesntExist(const StorageID & table_id, const Context & context) const
|
||||
{
|
||||
if (isTableExist(table_id))
|
||||
if (isTableExist(table_id, context))
|
||||
throw Exception("Table " + table_id.getNameForLogs() + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
@ -468,32 +473,32 @@ bool DatabaseCatalog::isDictionaryExist(const StorageID & table_id) const
|
||||
return db && db->isDictionaryExist(table_id.getTableName());
|
||||
}
|
||||
|
||||
StoragePtr DatabaseCatalog::getTable(const StorageID & table_id) const
|
||||
StoragePtr DatabaseCatalog::getTable(const StorageID & table_id, const Context & context) const
|
||||
{
|
||||
std::optional<Exception> exc;
|
||||
auto res = getTableImpl(table_id, &exc);
|
||||
auto res = getTableImpl(table_id, context, &exc);
|
||||
if (!res.second)
|
||||
throw Exception(*exc);
|
||||
return res.second;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseCatalog::tryGetTable(const StorageID & table_id) const
|
||||
StoragePtr DatabaseCatalog::tryGetTable(const StorageID & table_id, const Context & context) const
|
||||
{
|
||||
return getTableImpl(table_id, nullptr).second;
|
||||
return getTableImpl(table_id, context, nullptr).second;
|
||||
}
|
||||
|
||||
DatabaseAndTable DatabaseCatalog::getDatabaseAndTable(const StorageID & table_id) const
|
||||
DatabaseAndTable DatabaseCatalog::getDatabaseAndTable(const StorageID & table_id, const Context & context) const
|
||||
{
|
||||
std::optional<Exception> exc;
|
||||
auto res = getTableImpl(table_id, &exc);
|
||||
auto res = getTableImpl(table_id, context, &exc);
|
||||
if (!res.second)
|
||||
throw Exception(*exc);
|
||||
return res;
|
||||
}
|
||||
|
||||
DatabaseAndTable DatabaseCatalog::tryGetDatabaseAndTable(const StorageID & table_id) const
|
||||
DatabaseAndTable DatabaseCatalog::tryGetDatabaseAndTable(const StorageID & table_id, const Context & context) const
|
||||
{
|
||||
return getTableImpl(table_id, nullptr);
|
||||
return getTableImpl(table_id, context, nullptr);
|
||||
}
|
||||
|
||||
void DatabaseCatalog::loadMarkedAsDroppedTables()
|
||||
|
@ -21,6 +21,7 @@ class Context;
|
||||
class IDatabase;
|
||||
class Exception;
|
||||
class ColumnsDescription;
|
||||
struct ConstraintsDescription;
|
||||
|
||||
using DatabasePtr = std::shared_ptr<IDatabase>;
|
||||
using DatabaseAndTable = std::pair<DatabasePtr, StoragePtr>;
|
||||
@ -71,7 +72,11 @@ struct TemporaryTableHolder : boost::noncopyable
|
||||
TemporaryTableHolder(const Context & context, const Creator & creator, const ASTPtr & query = {});
|
||||
|
||||
/// Creates temporary table with Engine=Memory
|
||||
TemporaryTableHolder(const Context & context, const ColumnsDescription & columns, const ASTPtr & query = {});
|
||||
TemporaryTableHolder(
|
||||
const Context & context,
|
||||
const ColumnsDescription & columns,
|
||||
const ConstraintsDescription & constraints,
|
||||
const ASTPtr & query = {});
|
||||
|
||||
TemporaryTableHolder(TemporaryTableHolder && rhs);
|
||||
TemporaryTableHolder & operator = (TemporaryTableHolder && rhs);
|
||||
@ -129,15 +134,17 @@ public:
|
||||
DatabasePtr getDatabase(const String & database_name, const Context & local_context) const;
|
||||
|
||||
/// For all of the following methods database_name in table_id must be not empty (even for temporary tables).
|
||||
void assertTableDoesntExist(const StorageID & table_id) const;
|
||||
bool isTableExist(const StorageID & table_id) const;
|
||||
void assertTableDoesntExist(const StorageID & table_id, const Context & context) const;
|
||||
bool isTableExist(const StorageID & table_id, const Context & context) const;
|
||||
bool isDictionaryExist(const StorageID & table_id) const;
|
||||
|
||||
StoragePtr getTable(const StorageID & table_id) const;
|
||||
StoragePtr tryGetTable(const StorageID & table_id) const;
|
||||
DatabaseAndTable getDatabaseAndTable(const StorageID & table_id) const;
|
||||
DatabaseAndTable tryGetDatabaseAndTable(const StorageID & table_id) const;
|
||||
DatabaseAndTable getTableImpl(const StorageID & table_id, std::optional<Exception> * exception = nullptr) const;
|
||||
StoragePtr getTable(const StorageID & table_id, const Context & context) const;
|
||||
StoragePtr tryGetTable(const StorageID & table_id, const Context & context) const;
|
||||
DatabaseAndTable getDatabaseAndTable(const StorageID & table_id, const Context & context) const;
|
||||
DatabaseAndTable tryGetDatabaseAndTable(const StorageID & table_id, const Context & context) const;
|
||||
DatabaseAndTable getTableImpl(const StorageID & table_id,
|
||||
const Context & context,
|
||||
std::optional<Exception> * exception = nullptr) const;
|
||||
|
||||
void addDependency(const StorageID & from, const StorageID & where);
|
||||
void removeDependency(const StorageID & from, const StorageID & where);
|
||||
|
@ -111,11 +111,11 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
}
|
||||
else
|
||||
{
|
||||
BlockIO res = interpreter.execute();
|
||||
auto stream = interpreter.execute().getInputStream();
|
||||
|
||||
try
|
||||
{
|
||||
block = res.in->read();
|
||||
block = stream->read();
|
||||
|
||||
if (!block)
|
||||
{
|
||||
@ -126,7 +126,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
return;
|
||||
}
|
||||
|
||||
if (block.rows() != 1 || res.in->read())
|
||||
if (block.rows() != 1 || stream->read())
|
||||
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
|
@ -297,13 +297,13 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr
|
||||
}
|
||||
|
||||
auto interpreter_subquery = interpretSubquery(subquery_or_table_name, context, {}, query_options);
|
||||
BlockIO res = interpreter_subquery->execute();
|
||||
auto stream = interpreter_subquery->execute().getInputStream();
|
||||
|
||||
SetPtr set = std::make_shared<Set>(settings.size_limits_for_set, true, context.getSettingsRef().transform_null_in);
|
||||
set->setHeader(res.in->getHeader());
|
||||
set->setHeader(stream->getHeader());
|
||||
|
||||
res.in->readPrefix();
|
||||
while (Block block = res.in->read())
|
||||
stream->readPrefix();
|
||||
while (Block block = stream->read())
|
||||
{
|
||||
/// If the limits have been exceeded, give up and let the default subquery processing actions take place.
|
||||
if (!set->insertFromBlock(block))
|
||||
@ -311,7 +311,7 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr
|
||||
}
|
||||
|
||||
set->finishInsert();
|
||||
res.in->readSuffix();
|
||||
stream->readSuffix();
|
||||
|
||||
prepared_sets[set_key] = std::move(set);
|
||||
}
|
||||
@ -322,7 +322,7 @@ SetPtr SelectQueryExpressionAnalyzer::isPlainStorageSetInSubquery(const ASTPtr &
|
||||
if (!table)
|
||||
return nullptr;
|
||||
auto table_id = context.resolveStorageID(subquery_or_table_name);
|
||||
const auto storage = DatabaseCatalog::instance().getTable(table_id);
|
||||
const auto storage = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
if (storage->getName() != "Set")
|
||||
return nullptr;
|
||||
const auto storage_set = std::dynamic_pointer_cast<StorageSet>(storage);
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
Block sample = interpreter->getSampleBlock();
|
||||
NamesAndTypesList columns = sample.getNamesAndTypesList();
|
||||
|
||||
auto external_storage_holder = std::make_shared<TemporaryTableHolder>(context, ColumnsDescription{columns});
|
||||
auto external_storage_holder = std::make_shared<TemporaryTableHolder>(context, ColumnsDescription{columns}, ConstraintsDescription{});
|
||||
StoragePtr external_storage = external_storage_holder->getTable();
|
||||
|
||||
/** We replace the subquery with the name of the temporary table.
|
||||
@ -134,7 +134,7 @@ public:
|
||||
ast = database_and_table_name;
|
||||
|
||||
external_tables[external_table_name] = external_storage_holder;
|
||||
subqueries_for_sets[external_table_name].source = interpreter->execute().in;
|
||||
subqueries_for_sets[external_table_name].source = interpreter->execute().getInputStream();
|
||||
subqueries_for_sets[external_table_name].table = external_storage;
|
||||
|
||||
/** NOTE If it was written IN tmp_table - the existing temporary (but not external) table,
|
||||
|
@ -2,14 +2,8 @@
|
||||
|
||||
#include <DataStreams/BlockIO.h>
|
||||
|
||||
#include <Processors/QueryPipeline.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/** Interpreters interface for different queries.
|
||||
*/
|
||||
@ -22,14 +16,10 @@ public:
|
||||
*/
|
||||
virtual BlockIO execute() = 0;
|
||||
|
||||
virtual QueryPipeline executeWithProcessors() { throw Exception("executeWithProcessors not implemented", ErrorCodes::NOT_IMPLEMENTED); }
|
||||
|
||||
virtual bool canExecuteWithProcessors() const { return false; }
|
||||
|
||||
virtual bool ignoreQuota() const { return false; }
|
||||
virtual bool ignoreLimits() const { return false; }
|
||||
|
||||
virtual ~IInterpreter() {}
|
||||
virtual ~IInterpreter() = default;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ namespace
|
||||
StoragePtr tryGetTable(const ASTPtr & database_and_table, const Context & context)
|
||||
{
|
||||
auto table_id = context.resolveStorageID(database_and_table);
|
||||
return DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
return DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
}
|
||||
|
||||
using CheckShardsAndTables = InJoinSubqueriesPreprocessor::CheckShardsAndTables;
|
||||
|
@ -42,7 +42,7 @@ BlockIO InterpreterAlterQuery::execute()
|
||||
|
||||
context.checkAccess(getRequiredAccess());
|
||||
auto table_id = context.resolveStorageID(alter, Context::ResolveOrdinary);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
|
||||
/// Add default database to table identifiers that we can encounter in e.g. default expressions,
|
||||
/// mutation expression, etc.
|
||||
@ -244,12 +244,12 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS
|
||||
}
|
||||
case ASTAlterCommand::MOVE_PARTITION:
|
||||
{
|
||||
if ((command.move_destination_type == PartDestinationType::DISK)
|
||||
|| (command.move_destination_type == PartDestinationType::VOLUME))
|
||||
if ((command.move_destination_type == DataDestinationType::DISK)
|
||||
|| (command.move_destination_type == DataDestinationType::VOLUME))
|
||||
{
|
||||
required_access.emplace_back(AccessType::ALTER_MOVE_PARTITION, database, table);
|
||||
}
|
||||
else if (command.move_destination_type == PartDestinationType::TABLE)
|
||||
else if (command.move_destination_type == DataDestinationType::TABLE)
|
||||
{
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::ALTER_DELETE, database, table);
|
||||
required_access.emplace_back(AccessType::INSERT, command.to_database, command.to_table);
|
||||
|
@ -41,7 +41,7 @@ BlockIO InterpreterCheckQuery::execute()
|
||||
auto table_id = context.resolveStorageID(check, Context::ResolveOrdinary);
|
||||
|
||||
context.checkAccess(AccessType::SHOW_TABLES, table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
auto check_results = table->checkData(query_ptr, context);
|
||||
|
||||
Block block;
|
||||
|
@ -406,7 +406,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::setProperties(AS
|
||||
else if (!create.as_table.empty())
|
||||
{
|
||||
String as_database_name = context.resolveDatabase(create.as_database);
|
||||
StoragePtr as_storage = DatabaseCatalog::instance().getTable({as_database_name, create.as_table});
|
||||
StoragePtr as_storage = DatabaseCatalog::instance().getTable({as_database_name, create.as_table}, context);
|
||||
|
||||
/// as_storage->getColumns() and setEngine(...) must be called under structure lock of other_table for CREATE ... AS other_table.
|
||||
as_storage_lock = as_storage->lockStructureForShare(
|
||||
@ -504,7 +504,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
String as_database_name = context.resolveDatabase(create.as_database);
|
||||
String as_table_name = create.as_table;
|
||||
|
||||
ASTPtr as_create_ptr = DatabaseCatalog::instance().getDatabase(as_database_name)->getCreateTableQuery(as_table_name);
|
||||
ASTPtr as_create_ptr = DatabaseCatalog::instance().getDatabase(as_database_name)->getCreateTableQuery(as_table_name, context);
|
||||
const auto & as_create = as_create_ptr->as<ASTCreateQuery &>();
|
||||
|
||||
const String qualified_name = backQuoteIfNeed(as_database_name) + "." + backQuoteIfNeed(as_table_name);
|
||||
@ -546,7 +546,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
|
||||
bool if_not_exists = create.if_not_exists;
|
||||
|
||||
// Table SQL definition is available even if the table is detached
|
||||
auto query = database->getCreateTableQuery(create.table);
|
||||
auto query = database->getCreateTableQuery(create.table, context);
|
||||
create = query->as<ASTCreateQuery &>(); // Copy the saved create query, but use ATTACH instead of CREATE
|
||||
create.attach = true;
|
||||
create.attach_short_syntax = true;
|
||||
@ -608,7 +608,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
||||
guard = DatabaseCatalog::instance().getDDLGuard(create.database, table_name);
|
||||
|
||||
/// Table can be created before or it can be created concurrently in another thread, while we were waiting in DDLGuard.
|
||||
if (database->isTableExist(table_name))
|
||||
if (database->isTableExist(table_name, context))
|
||||
{
|
||||
/// TODO Check structure of table
|
||||
if (create.if_not_exists)
|
||||
@ -637,7 +637,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
||||
if (create.if_not_exists && context.tryResolveStorageID({"", table_name}, Context::ResolveExternal))
|
||||
return false;
|
||||
|
||||
auto temporary_table = TemporaryTableHolder(context, properties.columns, query_ptr);
|
||||
auto temporary_table = TemporaryTableHolder(context, properties.columns, properties.constraints, query_ptr);
|
||||
context.getSessionContext().addExternalTable(table_name, std::move(temporary_table));
|
||||
return true;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
||||
{
|
||||
auto table_id = context.resolveStorageID(table_expression.database_and_table_name);
|
||||
context.checkAccess(AccessType::SHOW_COLUMNS, table_id);
|
||||
table = DatabaseCatalog::instance().getTable(table_id);
|
||||
table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
}
|
||||
|
||||
auto table_lock = table->lockStructureForShare(
|
||||
|
@ -81,8 +81,8 @@ BlockIO InterpreterDropQuery::executeToTable(
|
||||
auto ddl_guard = (!query.no_ddl_lock ? DatabaseCatalog::instance().getDDLGuard(table_id.database_name, table_id.table_name) : nullptr);
|
||||
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto [database, table] = query.if_exists ? DatabaseCatalog::instance().tryGetDatabaseAndTable(table_id)
|
||||
: DatabaseCatalog::instance().getDatabaseAndTable(table_id);
|
||||
auto [database, table] = query.if_exists ? DatabaseCatalog::instance().tryGetDatabaseAndTable(table_id, context)
|
||||
: DatabaseCatalog::instance().getDatabaseAndTable(table_id, context);
|
||||
|
||||
if (database && table)
|
||||
{
|
||||
@ -182,7 +182,7 @@ BlockIO InterpreterDropQuery::executeToTemporaryTable(const String & table_name,
|
||||
auto resolved_id = context_handle.tryResolveStorageID(StorageID("", table_name), Context::ResolveExternal);
|
||||
if (resolved_id)
|
||||
{
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(resolved_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(resolved_id, context);
|
||||
if (kind == ASTDropQuery::Kind::Truncate)
|
||||
{
|
||||
auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout);
|
||||
@ -234,7 +234,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const String & database_name, AS
|
||||
ASTDropQuery query;
|
||||
query.kind = kind;
|
||||
query.database = database_name;
|
||||
for (auto iterator = database->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||
for (auto iterator = database->getTablesIterator(context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
query.table = iterator->name();
|
||||
executeToTable({query.database, query.table}, query);
|
||||
|
@ -50,7 +50,7 @@ BlockInputStreamPtr InterpreterExistsQuery::executeImpl()
|
||||
{
|
||||
String database = context.resolveDatabase(exists_query->database);
|
||||
context.checkAccess(AccessType::SHOW_TABLES, database, exists_query->table);
|
||||
result = DatabaseCatalog::instance().isTableExist({database, exists_query->table});
|
||||
result = DatabaseCatalog::instance().isTableExist({database, exists_query->table}, context);
|
||||
}
|
||||
}
|
||||
else if ((exists_query = query_ptr->as<ASTExistsDictionaryQuery>()))
|
||||
|
@ -76,7 +76,7 @@ namespace
|
||||
if (const auto * identifier = expression.database_and_table_name->as<ASTIdentifier>())
|
||||
{
|
||||
auto table_id = data.context.resolveStorageID(*identifier);
|
||||
const auto & storage = DatabaseCatalog::instance().getTable(table_id);
|
||||
const auto & storage = DatabaseCatalog::instance().getTable(table_id, data.context);
|
||||
|
||||
if (auto * storage_view = dynamic_cast<StorageView *>(storage.get()))
|
||||
storage_view->getRuntimeViewQuery(&select_query, data.context, true);
|
||||
|
@ -70,7 +70,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query)
|
||||
}
|
||||
|
||||
query.table_id = context.resolveStorageID(query.table_id);
|
||||
return DatabaseCatalog::instance().getTable(query.table_id);
|
||||
return DatabaseCatalog::instance().getTable(query.table_id, context);
|
||||
}
|
||||
|
||||
Block InterpreterInsertQuery::getSampleBlock(const ASTInsertQuery & query, const StoragePtr & table) const
|
||||
@ -204,7 +204,7 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
{
|
||||
/// Passing 1 as subquery_depth will disable limiting size of intermediate result.
|
||||
InterpreterSelectWithUnionQuery interpreter_select{ query.select, context, SelectQueryOptions(QueryProcessingStage::Complete, 1)};
|
||||
res.pipeline = interpreter_select.executeWithProcessors();
|
||||
res = interpreter_select.execute();
|
||||
|
||||
if (table->supportsParallelInsert() && settings.max_insert_threads > 1)
|
||||
out_streams_size = std::min(size_t(settings.max_insert_threads), res.pipeline.getNumStreams());
|
||||
@ -233,6 +233,21 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
else
|
||||
out = std::make_shared<PushingToViewsBlockOutputStream>(table, context, query_ptr, no_destination);
|
||||
|
||||
/// Note that we wrap transforms one on top of another, so we write them in reverse of data processing order.
|
||||
|
||||
/// Checking constraints. It must be done after calculation of all defaults, so we can check them on calculated columns.
|
||||
if (const auto & constraints = table->getConstraints(); !constraints.empty())
|
||||
out = std::make_shared<CheckConstraintsBlockOutputStream>(
|
||||
query.table_id, out, out->getHeader(), table->getConstraints(), context);
|
||||
|
||||
/// Actually we don't know structure of input blocks from query/table,
|
||||
/// because some clients break insertion protocol (columns != header)
|
||||
out = std::make_shared<AddingDefaultBlockOutputStream>(
|
||||
out, query_sample_block, out->getHeader(), table->getColumns().getDefaults(), context);
|
||||
|
||||
/// It's important to squash blocks as early as possible (before other transforms),
|
||||
/// because other transforms may work inefficient if block size is small.
|
||||
|
||||
/// Do not squash blocks if it is a sync INSERT into Distributed, since it lead to double bufferization on client and server side.
|
||||
/// Client-side bufferization might cause excessive timeouts (especially in case of big blocks).
|
||||
if (!(context.getSettingsRef().insert_distributed_sync && table->isRemote()) && !no_squash && !query.watch)
|
||||
@ -244,15 +259,6 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
context.getSettingsRef().min_insert_block_size_bytes);
|
||||
}
|
||||
|
||||
/// Actually we don't know structure of input blocks from query/table,
|
||||
/// because some clients break insertion protocol (columns != header)
|
||||
out = std::make_shared<AddingDefaultBlockOutputStream>(
|
||||
out, query_sample_block, out->getHeader(), table->getColumns().getDefaults(), context);
|
||||
|
||||
if (const auto & constraints = table->getConstraints(); !constraints.empty())
|
||||
out = std::make_shared<CheckConstraintsBlockOutputStream>(
|
||||
query.table_id, out, query_sample_block, table->getConstraints(), context);
|
||||
|
||||
auto out_wrapper = std::make_shared<CountingBlockOutputStream>(out);
|
||||
out_wrapper->setProcessListElement(context.getProcessListElement());
|
||||
out = std::move(out_wrapper);
|
||||
|
@ -261,7 +261,7 @@ BlockIO InterpreterKillQueryQuery::execute()
|
||||
CancellationCode code = CancellationCode::Unknown;
|
||||
if (!query.test)
|
||||
{
|
||||
auto storage = DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
auto storage = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
if (!storage)
|
||||
code = CancellationCode::NotFound;
|
||||
else
|
||||
|
@ -25,7 +25,7 @@ BlockIO InterpreterOptimizeQuery::execute()
|
||||
context.checkAccess(getRequiredAccess());
|
||||
|
||||
auto table_id = context.resolveStorageID(ast, Context::ResolveOrdinary);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
table->optimize(query_ptr, ast.partition, ast.final, ast.deduplicate, context);
|
||||
return {};
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ BlockIO InterpreterRenameQuery::execute()
|
||||
for (auto & elem : descriptions)
|
||||
{
|
||||
if (!rename.exchange)
|
||||
database_catalog.assertTableDoesntExist(StorageID(elem.to_database_name, elem.to_table_name));
|
||||
database_catalog.assertTableDoesntExist(StorageID(elem.to_database_name, elem.to_table_name), context);
|
||||
|
||||
database_catalog.getDatabase(elem.from_database_name)->renameTable(
|
||||
context,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -77,12 +77,6 @@ public:
|
||||
/// Execute a query. Get the stream of blocks to read.
|
||||
BlockIO execute() override;
|
||||
|
||||
/// Execute the query and return multuple streams for parallel processing.
|
||||
BlockInputStreams executeWithMultipleStreams(QueryPipeline & parent_pipeline);
|
||||
|
||||
QueryPipeline executeWithProcessors() override;
|
||||
bool canExecuteWithProcessors() const override { return true; }
|
||||
|
||||
bool ignoreLimits() const override { return options.ignore_limits; }
|
||||
bool ignoreQuota() const override { return options.ignore_quota; }
|
||||
|
||||
@ -108,89 +102,15 @@ private:
|
||||
|
||||
Block getSampleBlockImpl();
|
||||
|
||||
struct Pipeline
|
||||
{
|
||||
/** Streams of data.
|
||||
* The source data streams are produced in the executeFetchColumns function.
|
||||
* Then they are converted (wrapped in other streams) using the `execute*` functions,
|
||||
* to get the whole pipeline running the query.
|
||||
*/
|
||||
BlockInputStreams streams;
|
||||
|
||||
/** When executing FULL or RIGHT JOIN, there will be a data stream from which you can read "not joined" rows.
|
||||
* It has a special meaning, since reading from it should be done after reading from the main streams.
|
||||
* It is appended to the main streams in UnionBlockInputStream or ParallelAggregatingBlockInputStream.
|
||||
*/
|
||||
BlockInputStreamPtr stream_with_non_joined_data;
|
||||
bool union_stream = false;
|
||||
|
||||
/// Cache value of InterpreterSelectQuery::max_streams
|
||||
size_t max_threads = 1;
|
||||
|
||||
BlockInputStreamPtr & firstStream() { return streams.at(0); }
|
||||
|
||||
template <typename Transform>
|
||||
void transform(Transform && transformation)
|
||||
{
|
||||
for (auto & stream : streams)
|
||||
transformation(stream);
|
||||
|
||||
if (stream_with_non_joined_data)
|
||||
transformation(stream_with_non_joined_data);
|
||||
}
|
||||
|
||||
bool hasMoreThanOneStream() const
|
||||
{
|
||||
return streams.size() + (stream_with_non_joined_data ? 1 : 0) > 1;
|
||||
}
|
||||
|
||||
/// Resulting stream is mix of other streams data. Distinct and/or order guaranties are broken.
|
||||
bool hasMixedStreams() const
|
||||
{
|
||||
return hasMoreThanOneStream() || union_stream;
|
||||
}
|
||||
|
||||
bool hasDelayedStream() const { return stream_with_non_joined_data != nullptr; }
|
||||
bool initialized() const { return !streams.empty(); }
|
||||
|
||||
/// Compatibility with QueryPipeline (Processors)
|
||||
void setMaxThreads(size_t max_threads_) { max_threads = max_threads_; }
|
||||
size_t getNumThreads() const { return max_threads; }
|
||||
};
|
||||
|
||||
template <typename TPipeline>
|
||||
void executeImpl(TPipeline & pipeline, const BlockInputStreamPtr & prepared_input, std::optional<Pipe> prepared_pipe, QueryPipeline & save_context_and_storage);
|
||||
void executeImpl(QueryPipeline & pipeline, const BlockInputStreamPtr & prepared_input, std::optional<Pipe> prepared_pipe);
|
||||
|
||||
/// Different stages of query execution.
|
||||
|
||||
/// dry_run - don't read from table, use empty header block instead.
|
||||
void executeWithMultipleStreamsImpl(Pipeline & pipeline, const BlockInputStreamPtr & input, bool dry_run);
|
||||
|
||||
template <typename TPipeline>
|
||||
void executeFetchColumns(QueryProcessingStage::Enum processing_stage, TPipeline & pipeline,
|
||||
void executeFetchColumns(
|
||||
QueryProcessingStage::Enum processing_stage,
|
||||
QueryPipeline & pipeline,
|
||||
const PrewhereInfoPtr & prewhere_info,
|
||||
const Names & columns_to_remove_after_prewhere,
|
||||
QueryPipeline & save_context_and_storage);
|
||||
|
||||
void executeWhere(Pipeline & pipeline, const ExpressionActionsPtr & expression, bool remove_filter);
|
||||
void executeAggregation(Pipeline & pipeline, const ExpressionActionsPtr & expression, bool overflow_row, bool final);
|
||||
void executeMergeAggregated(Pipeline & pipeline, bool overflow_row, bool final);
|
||||
void executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row, bool final);
|
||||
void executeHaving(Pipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
static void executeExpression(Pipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeOrder(Pipeline & pipeline, InputSortingInfoPtr sorting_info);
|
||||
void executeWithFill(Pipeline & pipeline);
|
||||
void executeMergeSorted(Pipeline & pipeline);
|
||||
void executePreLimit(Pipeline & pipeline);
|
||||
void executeUnion(Pipeline & pipeline, Block header);
|
||||
void executeLimitBy(Pipeline & pipeline);
|
||||
void executeLimit(Pipeline & pipeline);
|
||||
void executeOffset(Pipeline & pipeline);
|
||||
static void executeProjection(Pipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeDistinct(Pipeline & pipeline, bool before_order, Names columns);
|
||||
void executeExtremes(Pipeline & pipeline);
|
||||
void executeSubqueriesInSetsAndJoins(Pipeline & pipeline, const std::unordered_map<String, SubqueryForSet> & subqueries_for_sets);
|
||||
void executeMergeSorted(Pipeline & pipeline, const SortDescription & sort_description, UInt64 limit);
|
||||
const Names & columns_to_remove_after_prewhere);
|
||||
|
||||
void executeWhere(QueryPipeline & pipeline, const ExpressionActionsPtr & expression, bool remove_filter);
|
||||
void executeAggregation(QueryPipeline & pipeline, const ExpressionActionsPtr & expression, bool overflow_row, bool final);
|
||||
@ -213,17 +133,12 @@ private:
|
||||
|
||||
String generateFilterActions(ExpressionActionsPtr & actions, const ASTPtr & row_policy_filter, const Names & prerequisite_columns = {}) const;
|
||||
|
||||
/// Add ConvertingBlockInputStream to specified header.
|
||||
static void unifyStreams(Pipeline & pipeline, Block header);
|
||||
|
||||
enum class Modificator
|
||||
{
|
||||
ROLLUP = 0,
|
||||
CUBE = 1
|
||||
};
|
||||
|
||||
void executeRollupOrCube(Pipeline & pipeline, Modificator modificator);
|
||||
|
||||
void executeRollupOrCube(QueryPipeline & pipeline, Modificator modificator);
|
||||
|
||||
/** If there is a SETTINGS section in the SELECT query, then apply settings from it.
|
||||
|
@ -3,15 +3,9 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <DataStreams/UnionBlockInputStream.h>
|
||||
#include <DataStreams/NullBlockInputStream.h>
|
||||
#include <DataStreams/ConcatBlockInputStream.h>
|
||||
#include <DataStreams/ConvertingBlockInputStream.h>
|
||||
#include <Columns/getLeastSuperColumn.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
|
||||
#include <Processors/Sources/NullSource.h>
|
||||
#include <Processors/QueryPipeline.h>
|
||||
@ -180,69 +174,10 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(
|
||||
}
|
||||
|
||||
|
||||
BlockInputStreams InterpreterSelectWithUnionQuery::executeWithMultipleStreams(QueryPipeline & parent_pipeline)
|
||||
{
|
||||
BlockInputStreams nested_streams;
|
||||
|
||||
for (auto & interpreter : nested_interpreters)
|
||||
{
|
||||
BlockInputStreams streams = interpreter->executeWithMultipleStreams(parent_pipeline);
|
||||
nested_streams.insert(nested_streams.end(), streams.begin(), streams.end());
|
||||
}
|
||||
|
||||
/// Unify data structure.
|
||||
if (nested_interpreters.size() > 1)
|
||||
{
|
||||
for (auto & stream : nested_streams)
|
||||
stream = std::make_shared<ConvertingBlockInputStream>(stream, result_header,ConvertingBlockInputStream::MatchColumnsMode::Position);
|
||||
parent_pipeline.addInterpreterContext(context);
|
||||
}
|
||||
|
||||
/// Update max_streams due to:
|
||||
/// - max_distributed_connections for Distributed() engine
|
||||
/// - max_streams_to_max_threads_ratio
|
||||
///
|
||||
/// XXX: res.pipeline.getMaxThreads() cannot be used since it is capped to
|
||||
/// number of streams, which is empty for non-Processors case.
|
||||
max_streams = (*std::min_element(nested_interpreters.begin(), nested_interpreters.end(), [](const auto &a, const auto &b)
|
||||
{
|
||||
return a->getMaxStreams() < b->getMaxStreams();
|
||||
}))->getMaxStreams();
|
||||
|
||||
return nested_streams;
|
||||
}
|
||||
|
||||
|
||||
BlockIO InterpreterSelectWithUnionQuery::execute()
|
||||
{
|
||||
BlockIO res;
|
||||
BlockInputStreams nested_streams = executeWithMultipleStreams(res.pipeline);
|
||||
BlockInputStreamPtr result_stream;
|
||||
|
||||
if (nested_streams.empty())
|
||||
{
|
||||
result_stream = std::make_shared<NullBlockInputStream>(getSampleBlock());
|
||||
}
|
||||
else if (nested_streams.size() == 1)
|
||||
{
|
||||
result_stream = nested_streams.front();
|
||||
nested_streams.clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
result_stream = std::make_shared<UnionBlockInputStream>(nested_streams, nullptr, max_streams);
|
||||
nested_streams.clear();
|
||||
}
|
||||
|
||||
res.in = result_stream;
|
||||
res.pipeline.addInterpreterContext(context);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors()
|
||||
{
|
||||
QueryPipeline main_pipeline;
|
||||
QueryPipeline & main_pipeline = res.pipeline;
|
||||
std::vector<QueryPipeline> pipelines;
|
||||
bool has_main_pipeline = false;
|
||||
|
||||
@ -254,12 +189,12 @@ QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors()
|
||||
if (!has_main_pipeline)
|
||||
{
|
||||
has_main_pipeline = true;
|
||||
main_pipeline = interpreter->executeWithProcessors();
|
||||
main_pipeline = interpreter->execute().pipeline;
|
||||
headers.emplace_back(main_pipeline.getHeader());
|
||||
}
|
||||
else
|
||||
{
|
||||
pipelines.emplace_back(interpreter->executeWithProcessors());
|
||||
pipelines.emplace_back(interpreter->execute().pipeline);
|
||||
headers.emplace_back(pipelines.back().getHeader());
|
||||
}
|
||||
}
|
||||
@ -280,7 +215,7 @@ QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors()
|
||||
|
||||
main_pipeline.addInterpreterContext(context);
|
||||
|
||||
return main_pipeline;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,12 +29,6 @@ public:
|
||||
|
||||
BlockIO execute() override;
|
||||
|
||||
/// Execute the query without union of streams.
|
||||
BlockInputStreams executeWithMultipleStreams(QueryPipeline & parent_pipeline);
|
||||
|
||||
QueryPipeline executeWithProcessors() override;
|
||||
bool canExecuteWithProcessors() const override { return true; }
|
||||
|
||||
bool ignoreLimits() const override { return options.ignore_limits; }
|
||||
bool ignoreQuota() const override { return options.ignore_quota; }
|
||||
|
||||
|
@ -62,7 +62,7 @@ void InterpreterSetRoleQuery::setRole(const ASTSetRoleQuery & query)
|
||||
|
||||
void InterpreterSetRoleQuery::setDefaultRole(const ASTSetRoleQuery & query)
|
||||
{
|
||||
context.checkAccess(AccessType::CREATE_USER | AccessType::DROP_USER);
|
||||
context.checkAccess(AccessType::ALTER_USER);
|
||||
|
||||
auto & access_control = context.getAccessControlManager();
|
||||
std::vector<UUID> to_users = ExtendedRoleSet{*query.to_users, access_control, context.getUserID()}.getMatchingIDs(access_control);
|
||||
|
@ -50,7 +50,7 @@ BlockInputStreamPtr InterpreterShowCreateQuery::executeImpl()
|
||||
auto resolve_table_type = show_query->temporary ? Context::ResolveExternal : Context::ResolveOrdinary;
|
||||
auto table_id = context.resolveStorageID(*show_query, resolve_table_type);
|
||||
context.checkAccess(AccessType::SHOW_COLUMNS, table_id);
|
||||
create_query = DatabaseCatalog::instance().getDatabase(table_id.database_name)->getCreateTableQuery(table_id.table_name);
|
||||
create_query = DatabaseCatalog::instance().getDatabase(table_id.database_name)->getCreateTableQuery(table_id.table_name, context);
|
||||
}
|
||||
else if ((show_query = query_ptr->as<ASTShowCreateDatabaseQuery>()))
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ void InterpreterSystemQuery::startStopAction(StorageActionBlockType action_type,
|
||||
auto access = context.getAccess();
|
||||
for (auto & elem : DatabaseCatalog::instance().getDatabases())
|
||||
{
|
||||
for (auto iterator = elem.second->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||
for (auto iterator = elem.second->getTablesIterator(context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
if (!access->isGranted(log, getRequiredAccessType(action_type), elem.first, iterator->name()))
|
||||
continue;
|
||||
@ -321,7 +321,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica,
|
||||
context.checkAccess(AccessType::SYSTEM_RESTART_REPLICA, replica);
|
||||
|
||||
auto table_ddl_guard = need_ddl_guard ? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName()) : nullptr;
|
||||
auto [database, table] = DatabaseCatalog::instance().tryGetDatabaseAndTable(replica);
|
||||
auto [database, table] = DatabaseCatalog::instance().tryGetDatabaseAndTable(replica, context);
|
||||
ASTPtr create_ast;
|
||||
|
||||
/// Detach actions
|
||||
@ -332,7 +332,7 @@ StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica,
|
||||
{
|
||||
/// If table was already dropped by anyone, an exception will be thrown
|
||||
auto table_lock = table->lockExclusively(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout);
|
||||
create_ast = database->getCreateTableQuery(replica.table_name);
|
||||
create_ast = database->getCreateTableQuery(replica.table_name, context);
|
||||
|
||||
database->detachTable(replica.table_name);
|
||||
}
|
||||
@ -369,7 +369,7 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context)
|
||||
for (auto & elem : catalog.getDatabases())
|
||||
{
|
||||
DatabasePtr & database = elem.second;
|
||||
for (auto iterator = database->getTablesIterator(); iterator->isValid(); iterator->next())
|
||||
for (auto iterator = database->getTablesIterator(context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
if (dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
|
||||
replica_names.emplace_back(StorageID{database->getDatabaseName(), iterator->name()});
|
||||
@ -394,7 +394,7 @@ void InterpreterSystemQuery::restartReplicas(Context & system_context)
|
||||
void InterpreterSystemQuery::syncReplica(ASTSystemQuery &)
|
||||
{
|
||||
context.checkAccess(AccessType::SYSTEM_SYNC_REPLICA, table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
|
||||
if (auto * storage_replicated = dynamic_cast<StorageReplicatedMergeTree *>(table.get()))
|
||||
{
|
||||
@ -416,7 +416,7 @@ void InterpreterSystemQuery::flushDistributed(ASTSystemQuery &)
|
||||
{
|
||||
context.checkAccess(AccessType::SYSTEM_FLUSH_DISTRIBUTED, table_id);
|
||||
|
||||
if (auto * storage_distributed = dynamic_cast<StorageDistributed *>(DatabaseCatalog::instance().getTable(table_id).get()))
|
||||
if (auto * storage_distributed = dynamic_cast<StorageDistributed *>(DatabaseCatalog::instance().getTable(table_id, context).get()))
|
||||
storage_distributed->flushClusterNodesAllData();
|
||||
else
|
||||
throw Exception("Table " + table_id.getNameForLogs() + " is not distributed", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
@ -40,7 +40,7 @@ BlockIO InterpreterWatchQuery::execute()
|
||||
auto table_id = context.resolveStorageID(query, Context::ResolveOrdinary);
|
||||
|
||||
/// Get storage
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
storage = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
|
||||
if (!storage)
|
||||
throw Exception("Table " + table_id.getNameForLogs() + " doesn't exist.",
|
||||
|
@ -181,7 +181,7 @@ StoragePtr JoinedTables::getLeftTableStorage()
|
||||
}
|
||||
|
||||
/// Read from table. Even without table expression (implicit SELECT ... FROM system.one).
|
||||
return DatabaseCatalog::instance().getTable(table_id);
|
||||
return DatabaseCatalog::instance().getTable(table_id, context);
|
||||
}
|
||||
|
||||
bool JoinedTables::resolveTables()
|
||||
@ -261,7 +261,7 @@ std::shared_ptr<TableJoin> JoinedTables::makeTableJoin(const ASTSelectQuery & se
|
||||
if (table_to_join.database_and_table_name)
|
||||
{
|
||||
auto joined_table_id = context.resolveStorageID(table_to_join.database_and_table_name);
|
||||
StoragePtr table = DatabaseCatalog::instance().tryGetTable(joined_table_id);
|
||||
StoragePtr table = DatabaseCatalog::instance().tryGetTable(joined_table_id, context);
|
||||
if (table)
|
||||
{
|
||||
if (dynamic_cast<StorageJoin *>(table.get()) ||
|
||||
|
@ -184,7 +184,7 @@ bool isStorageTouchedByMutations(
|
||||
/// For some reason it may copy context and and give it into ExpressionBlockInputStream
|
||||
/// after that we will use context from destroyed stack frame in our stream.
|
||||
InterpreterSelectQuery interpreter(select_query, context_copy, storage, SelectQueryOptions().ignoreLimits());
|
||||
BlockInputStreamPtr in = interpreter.execute().in;
|
||||
BlockInputStreamPtr in = interpreter.execute().getInputStream();
|
||||
|
||||
Block block = in->read();
|
||||
if (!block.rows())
|
||||
@ -687,7 +687,7 @@ void MutationsInterpreter::validate(TableStructureReadLockHolder &)
|
||||
}
|
||||
|
||||
/// Do not use getSampleBlock in order to check the whole pipeline.
|
||||
Block first_stage_header = select_interpreter->execute().in->getHeader();
|
||||
Block first_stage_header = select_interpreter->execute().getInputStream()->getHeader();
|
||||
BlockInputStreamPtr in = std::make_shared<NullBlockInputStream>(first_stage_header);
|
||||
addStreamsForLaterStages(stages, in)->getHeader();
|
||||
}
|
||||
@ -697,7 +697,7 @@ BlockInputStreamPtr MutationsInterpreter::execute(TableStructureReadLockHolder &
|
||||
if (!can_execute)
|
||||
throw Exception("Cannot execute mutations interpreter because can_execute flag set to false", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
BlockInputStreamPtr in = select_interpreter->execute().in;
|
||||
BlockInputStreamPtr in = select_interpreter->execute().getInputStream();
|
||||
|
||||
auto result_stream = addStreamsForLaterStages(stages, in);
|
||||
|
||||
|
@ -13,7 +13,7 @@ void SubqueryForSet::makeSource(std::shared_ptr<InterpreterSelectWithUnionQuery>
|
||||
{
|
||||
joined_block_aliases = std::move(joined_block_aliases_);
|
||||
source = std::make_shared<LazyBlockInputStream>(interpreter->getSampleBlock(),
|
||||
[interpreter]() mutable { return interpreter->execute().in; });
|
||||
[interpreter]() mutable { return interpreter->execute().getInputStream(); });
|
||||
|
||||
sample_block = source->getHeader();
|
||||
renameColumns(sample_block);
|
||||
|
@ -431,7 +431,7 @@ void SystemLog<LogElement>::prepareTable()
|
||||
{
|
||||
String description = table_id.getNameForLogs();
|
||||
|
||||
table = DatabaseCatalog::instance().tryGetTable(table_id);
|
||||
table = DatabaseCatalog::instance().tryGetTable(table_id, context);
|
||||
|
||||
if (table)
|
||||
{
|
||||
@ -442,7 +442,7 @@ void SystemLog<LogElement>::prepareTable()
|
||||
{
|
||||
/// Rename the existing table.
|
||||
int suffix = 0;
|
||||
while (DatabaseCatalog::instance().isTableExist({table_id.database_name, table_id.table_name + "_" + toString(suffix)}))
|
||||
while (DatabaseCatalog::instance().isTableExist({table_id.database_name, table_id.table_name + "_" + toString(suffix)}, context))
|
||||
++suffix;
|
||||
|
||||
auto rename = std::make_shared<ASTRenameQuery>();
|
||||
@ -483,7 +483,7 @@ void SystemLog<LogElement>::prepareTable()
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
|
||||
table = DatabaseCatalog::instance().getTable(table_id);
|
||||
table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
}
|
||||
|
||||
is_prepared = true;
|
||||
|
@ -280,7 +280,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
/// Copy query into string. It will be written to log and presented in processlist. If an INSERT query, string will not include data to insertion.
|
||||
String query(begin, query_end);
|
||||
BlockIO res;
|
||||
QueryPipeline & pipeline = res.pipeline;
|
||||
|
||||
String query_for_logging;
|
||||
|
||||
@ -338,7 +337,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
context.resetInputCallbacks();
|
||||
|
||||
auto interpreter = InterpreterFactory::get(ast, context, stage);
|
||||
bool use_processors = interpreter->canExecuteWithProcessors();
|
||||
|
||||
std::shared_ptr<const EnabledQuota> quota;
|
||||
if (!interpreter->ignoreQuota())
|
||||
@ -358,10 +356,9 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
||||
limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode);
|
||||
}
|
||||
|
||||
if (use_processors)
|
||||
pipeline = interpreter->executeWithProcessors();
|
||||
else
|
||||
res = interpreter->execute();
|
||||
res = interpreter->execute();
|
||||
QueryPipeline & pipeline = res.pipeline;
|
||||
bool use_processors = pipeline.initialized();
|
||||
|
||||
if (res.pipeline.initialized())
|
||||
use_processors = true;
|
||||
|
@ -96,7 +96,7 @@ static NamesAndTypesList getColumnsFromTableExpression(const ASTTableExpression
|
||||
else if (table_expression.database_and_table_name)
|
||||
{
|
||||
auto table_id = context.resolveStorageID(table_expression.database_and_table_name);
|
||||
const auto & table = DatabaseCatalog::instance().getTable(table_id);
|
||||
const auto & table = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
const auto & columns = table->getColumns();
|
||||
names_and_type_list = columns.getOrdinary();
|
||||
materialized = columns.getMaterialized();
|
||||
|
@ -96,7 +96,7 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
|
||||
else
|
||||
{
|
||||
auto table_id = context.resolveStorageID(table_expression);
|
||||
const auto & storage = DatabaseCatalog::instance().getTable(table_id);
|
||||
const auto & storage = DatabaseCatalog::instance().getTable(table_id, context);
|
||||
columns = storage->getColumns().getOrdinary();
|
||||
select_query->replaceDatabaseAndTable(table_id);
|
||||
}
|
||||
|
@ -181,13 +181,13 @@ void ASTAlterCommand::formatImpl(
|
||||
settings.ostr << " TO ";
|
||||
switch (move_destination_type)
|
||||
{
|
||||
case PartDestinationType::DISK:
|
||||
case DataDestinationType::DISK:
|
||||
settings.ostr << "DISK ";
|
||||
break;
|
||||
case PartDestinationType::VOLUME:
|
||||
case DataDestinationType::VOLUME:
|
||||
settings.ostr << "VOLUME ";
|
||||
break;
|
||||
case PartDestinationType::TABLE:
|
||||
case DataDestinationType::TABLE:
|
||||
settings.ostr << "TABLE ";
|
||||
if (!to_database.empty())
|
||||
{
|
||||
@ -201,7 +201,7 @@ void ASTAlterCommand::formatImpl(
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (move_destination_type != PartDestinationType::TABLE)
|
||||
if (move_destination_type != DataDestinationType::TABLE)
|
||||
{
|
||||
settings.ostr << quoteString(move_destination_name);
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ public:
|
||||
|
||||
bool if_exists = false; /// option for DROP_COLUMN, MODIFY_COLUMN, COMMENT_COLUMN
|
||||
|
||||
PartDestinationType move_destination_type; /// option for MOVE PART/PARTITION
|
||||
DataDestinationType move_destination_type; /// option for MOVE PART/PARTITION
|
||||
|
||||
String move_destination_name; /// option for MOVE PART/PARTITION
|
||||
|
||||
|
@ -15,7 +15,10 @@ public:
|
||||
bool all = false;
|
||||
Strings except_names;
|
||||
bool except_current_user = false;
|
||||
bool id_mode = false; /// If true then `names` and `except_names` keeps UUIDs, not names.
|
||||
|
||||
bool id_mode = false; /// true if `names` and `except_names` keep UUIDs, not names.
|
||||
bool can_contain_roles = true; /// true if this set can contain names of roles.
|
||||
bool can_contain_users = true; /// true if this set can contain names of users.
|
||||
|
||||
bool empty() const { return names.empty() && !current_user && !all; }
|
||||
void replaceCurrentUserTagWithName(const String & current_user_name);
|
||||
|
@ -28,11 +28,11 @@ ASTPtr ASTTTLElement::clone() const
|
||||
void ASTTTLElement::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
ttl()->formatImpl(settings, state, frame);
|
||||
if (mode == TTLMode::MOVE && destination_type == PartDestinationType::DISK)
|
||||
if (mode == TTLMode::MOVE && destination_type == DataDestinationType::DISK)
|
||||
{
|
||||
settings.ostr << " TO DISK " << quoteString(destination_name);
|
||||
}
|
||||
else if (mode == TTLMode::MOVE && destination_type == PartDestinationType::VOLUME)
|
||||
else if (mode == TTLMode::MOVE && destination_type == DataDestinationType::VOLUME)
|
||||
{
|
||||
settings.ostr << " TO VOLUME " << quoteString(destination_name);
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/MergeTree/PartDestinationType.h>
|
||||
#include <Storages/MergeTree/TTLMode.h>
|
||||
#include <Storages/DataDestinationType.h>
|
||||
#include <Storages/TTLMode.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -14,13 +14,13 @@ class ASTTTLElement : public IAST
|
||||
{
|
||||
public:
|
||||
TTLMode mode;
|
||||
PartDestinationType destination_type;
|
||||
DataDestinationType destination_type;
|
||||
String destination_name;
|
||||
|
||||
ASTs group_by_key;
|
||||
std::vector<std::pair<String, ASTPtr>> group_by_aggregations;
|
||||
|
||||
ASTTTLElement(TTLMode mode_, PartDestinationType destination_type_, const String & destination_name_)
|
||||
ASTTTLElement(TTLMode mode_, DataDestinationType destination_type_, const String & destination_name_)
|
||||
: mode(mode_)
|
||||
, destination_type(destination_type_)
|
||||
, destination_name(destination_name_)
|
||||
|
@ -1471,18 +1471,18 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
return false;
|
||||
|
||||
TTLMode mode;
|
||||
PartDestinationType destination_type = PartDestinationType::DELETE;
|
||||
DataDestinationType destination_type = DataDestinationType::DELETE;
|
||||
String destination_name;
|
||||
|
||||
if (s_to_disk.ignore(pos))
|
||||
{
|
||||
mode = TTLMode::MOVE;
|
||||
destination_type = PartDestinationType::DISK;
|
||||
destination_type = DataDestinationType::DISK;
|
||||
}
|
||||
else if (s_to_volume.ignore(pos))
|
||||
{
|
||||
mode = TTLMode::MOVE;
|
||||
destination_type = PartDestinationType::VOLUME;
|
||||
destination_type = DataDestinationType::VOLUME;
|
||||
}
|
||||
else if (s_group_by.ignore(pos))
|
||||
{
|
||||
|
@ -260,19 +260,19 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
command->part = true;
|
||||
|
||||
if (s_to_disk.ignore(pos))
|
||||
command->move_destination_type = PartDestinationType::DISK;
|
||||
command->move_destination_type = DataDestinationType::DISK;
|
||||
else if (s_to_volume.ignore(pos))
|
||||
command->move_destination_type = PartDestinationType::VOLUME;
|
||||
command->move_destination_type = DataDestinationType::VOLUME;
|
||||
else if (s_to_table.ignore(pos))
|
||||
{
|
||||
if (!parseDatabaseAndTableName(pos, expected, command->to_database, command->to_table))
|
||||
return false;
|
||||
command->move_destination_type = PartDestinationType::TABLE;
|
||||
command->move_destination_type = DataDestinationType::TABLE;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
|
||||
if (command->move_destination_type != PartDestinationType::TABLE)
|
||||
if (command->move_destination_type != DataDestinationType::TABLE)
|
||||
{
|
||||
ASTPtr ast_space_name;
|
||||
if (!parser_string_literal.parse(pos, ast_space_name, expected))
|
||||
@ -289,19 +289,19 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
command->type = ASTAlterCommand::MOVE_PARTITION;
|
||||
|
||||
if (s_to_disk.ignore(pos))
|
||||
command->move_destination_type = PartDestinationType::DISK;
|
||||
command->move_destination_type = DataDestinationType::DISK;
|
||||
else if (s_to_volume.ignore(pos))
|
||||
command->move_destination_type = PartDestinationType::VOLUME;
|
||||
command->move_destination_type = DataDestinationType::VOLUME;
|
||||
else if (s_to_table.ignore(pos))
|
||||
{
|
||||
if (!parseDatabaseAndTableName(pos, expected, command->to_database, command->to_table))
|
||||
return false;
|
||||
command->move_destination_type = PartDestinationType::TABLE;
|
||||
command->move_destination_type = DataDestinationType::TABLE;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
|
||||
if (command->move_destination_type != PartDestinationType::TABLE)
|
||||
if (command->move_destination_type != DataDestinationType::TABLE)
|
||||
{
|
||||
ASTPtr ast_space_name;
|
||||
if (!parser_string_literal.parse(pos, ast_space_name, expected))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user