Merge branch 'master' into add_performance_tests

This commit is contained in:
alesapin 2021-11-18 10:12:46 +03:00
commit b74f9e7047
120 changed files with 1313 additions and 724 deletions

View File

@ -25,6 +25,16 @@ void trim(String & s)
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
} }
std::string getEditor()
{
const char * editor = std::getenv("EDITOR");
if (!editor || !*editor)
editor = "vim";
return editor;
}
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx. /// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) /// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) /// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader(
Patterns delimiters_, Patterns delimiters_,
replxx::Replxx::highlighter_callback_t highlighter_) replxx::Replxx::highlighter_callback_t highlighter_)
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_)) : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_))
, editor(getEditor())
{ {
using namespace std::placeholders; using namespace std::placeholders;
using Replxx = replxx::Replxx; using Replxx = replxx::Replxx;
@ -236,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line)
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str()); rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
} }
int ReplxxLineReader::execute(const std::string & command) /// See comments in ShellCommand::executeImpl()
/// (for the vfork via dlsym())
int ReplxxLineReader::executeEditor(const std::string & path)
{ {
std::vector<char> argv0("sh", &("sh"[3])); std::vector<char> argv0(editor.data(), editor.data() + editor.size() + 1);
std::vector<char> argv1("-c", &("-c"[3])); std::vector<char> argv1(path.data(), path.data() + path.size() + 1);
std::vector<char> argv2(command.data(), command.data() + command.size() + 1); char * const argv[] = {argv0.data(), argv1.data(), nullptr};
const char * filename = "/bin/sh";
char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr};
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork"); static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
if (!real_vfork) if (!real_vfork)
@ -260,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command)
return -1; return -1;
} }
/// Child
if (0 == pid) if (0 == pid)
{ {
sigset_t mask; sigset_t mask;
@ -267,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command)
sigprocmask(0, nullptr, &mask); sigprocmask(0, nullptr, &mask);
sigprocmask(SIG_UNBLOCK, &mask, nullptr); sigprocmask(SIG_UNBLOCK, &mask, nullptr);
execv(filename, argv); execvp(editor.c_str(), argv);
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str());
_exit(-1); _exit(-1);
} }
int status = 0; int status = 0;
if (-1 == waitpid(pid, &status, 0)) do
{ {
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); int exited_pid = waitpid(pid, &status, 0);
return -1; if (exited_pid == -1)
} {
if (errno == EINTR)
continue;
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
return -1;
}
else
break;
} while (true);
return status; return status;
} }
@ -290,10 +311,6 @@ void ReplxxLineReader::openEditor()
return; return;
} }
const char * editor = std::getenv("EDITOR");
if (!editor || !*editor)
editor = "vim";
replxx::Replxx::State state(rx.get_state()); replxx::Replxx::State state(rx.get_state());
size_t bytes_written = 0; size_t bytes_written = 0;
@ -316,7 +333,7 @@ void ReplxxLineReader::openEditor()
return; return;
} }
if (0 == execute(fmt::format("{} {}", editor, filename))) if (0 == executeEditor(filename))
{ {
try try
{ {

View File

@ -22,7 +22,7 @@ public:
private: private:
InputStatus readOneLine(const String & prompt) override; InputStatus readOneLine(const String & prompt) override;
void addToHistory(const String & line) override; void addToHistory(const String & line) override;
int execute(const std::string & command); int executeEditor(const std::string & path);
void openEditor(); void openEditor();
replxx::Replxx rx; replxx::Replxx rx;
@ -31,4 +31,6 @@ private:
// used to call flock() to synchronize multiple clients using same history file // used to call flock() to synchronize multiple clients using same history file
int history_file_fd = -1; int history_file_fd = -1;
bool bracketed_paste_enabled = false; bool bracketed_paste_enabled = false;
std::string editor;
}; };

View File

@ -37,6 +37,12 @@ function configure()
# install test configs # install test configs
/usr/share/clickhouse-test/config/install.sh /usr/share/clickhouse-test/config/install.sh
# avoid too slow startup
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|<snapshot_distance>100000</snapshot_distance>|<snapshot_distance>10000</snapshot_distance>|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
# for clickhouse-server (via service) # for clickhouse-server (via service)
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
# for clickhouse-client # for clickhouse-client

View File

@ -11,7 +11,8 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
``` sql ``` sql
CREATE TABLE s3_engine_table (name String, value UInt32) CREATE TABLE s3_engine_table (name String, value UInt32)
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
[SETTINGS ...]
``` ```
**Engine parameters** **Engine parameters**
@ -23,21 +24,13 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
**Example** **Example**
1. Set up the `s3_engine_table` table:
``` sql ``` sql
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); CREATE TABLE s3_engine_table (name String, value UInt32)
``` ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
SETTINGS input_format_with_names_use_header = 0;
2. Fill file:
``` sql
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
```
3. Query the data:
``` sql
SELECT * FROM s3_engine_table LIMIT 2; SELECT * FROM s3_engine_table LIMIT 2;
``` ```
@ -73,57 +66,54 @@ For more information about virtual columns see [here](../../../engines/table-eng
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function. Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
**Example** !!! warning "Warning"
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
1. Suppose we have several files in CSV format with the following URIs on S3: **Example with wildcards 1**
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
There are several ways to make a table consisting of all six files:
The first way:
``` sql
CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
```
Another way:
``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
```
Table consists of all the files in both directories (all files should satisfy format and schema described in query):
``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
```
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
**Example**
Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`: Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
``` sql ``` sql
CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV'); CREATE TABLE big_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
``` ```
## Virtual Columns {#virtual-columns} **Example with wildcards 2**
- `_path` — Path to the file. Suppose we have several files in CSV format with the following URIs on S3:
- `_file` — Name of the file.
**See Also** - 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
## S3-related settings {#settings} There are several ways to make a table consisting of all six files:
1. Specify the range of file postfixes:
``` sql
CREATE TABLE table_with_range (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
```
2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders):
``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
```
3. Take all the files in both folders (all files should satisfy format and schema described in query):
``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
```
## S3-related Settings {#settings}
The following settings can be set before query execution or placed into configuration file. The following settings can be set before query execution or placed into configuration file.
@ -165,49 +155,6 @@ The following settings can be specified in configuration file for given endpoint
</s3> </s3>
``` ```
## Usage {#usage-examples}
Suppose we have several files in CSV format with the following URIs on S3:
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
1. There are several ways to make a table consisting of all six files:
``` sql
CREATE TABLE table_with_range (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
```
2. Another way:
``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
```
3. Table consists of all the files in both directories (all files should satisfy format and schema described in query):
``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
```
!!! warning "Warning"
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
4. Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
``` sql
CREATE TABLE big_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
```
## See also ## See also
- [s3 table function](../../../sql-reference/table-functions/s3.md) - [s3 table function](../../../sql-reference/table-functions/s3.md)

View File

@ -108,6 +108,7 @@ toc_title: Adopters
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) | | <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) | | <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
| <a href="https://piwik.pro/" class="favicon">Piwik PRO</a> | Web Analytics | Main Product | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) |
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | | <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) | | <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) |
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | | <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
@ -175,5 +176,6 @@ toc_title: Adopters
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) | | <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) | | <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
| <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 | | <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 |
| <a href="https://www.your-analytics.org/" class="favicon">YourAnalytics</a> | Web Analytics | — | — | — | [Tweet, Nov 2021](https://twitter.com/mikenikles/status/1460860140249235461) |
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide--> [Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->

View File

@ -11,7 +11,8 @@ toc_title: S3
``` sql ``` sql
CREATE TABLE s3_engine_table (name String, value UInt32) CREATE TABLE s3_engine_table (name String, value UInt32)
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression]) ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
[SETTINGS ...]
``` ```
**Параметры движка** **Параметры движка**
@ -24,9 +25,12 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
**Пример** **Пример**
``` sql ``` sql
CREATE TABLE s3_engine_table (name String, value UInt32) CREATE TABLE s3_engine_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip'); ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
SETTINGS input_format_with_names_use_header = 0;
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
SELECT * FROM s3_engine_table LIMIT 2; SELECT * FROM s3_engine_table LIMIT 2;
``` ```
@ -54,7 +58,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
## Символы подстановки {#wildcards-in-path} ## Символы подстановки {#wildcards-in-path}
Аргумент `path` может указывать на несколько файлов, используя подстановочные знаки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`). Аргумент `path` может указывать на несколько файлов, используя символы подстановки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`).
- `*` — заменяет любое количество любых символов, кроме `/`, включая пустую строку. - `*` — заменяет любое количество любых символов, кроме `/`, включая пустую строку.
- `?` — заменяет любые одиночные символы. - `?` — заменяет любые одиночные символы.
@ -63,6 +67,52 @@ SELECT * FROM s3_engine_table LIMIT 2;
Конструкции с `{}` аналогичны функции [remote](../../../sql-reference/table-functions/remote.md). Конструкции с `{}` аналогичны функции [remote](../../../sql-reference/table-functions/remote.md).
!!! warning "Примечание"
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
**Пример подстановки 1**
Таблица содержит данные из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
``` sql
CREATE TABLE big_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
```
**Пример подстановки 2**
Предположим, есть несколько файлов в формате CSV со следующими URL-адресами в S3:
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
1. Задайте диапазон для суффиксов в названии файла:
``` sql
CREATE TABLE table_with_range (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
```
2. Таблица содержит все файлы с префиксом `some_file_` (в каталогах не должно быть других файлов с таким префиксом):
``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
```
3. Таблица содержит все файлы в обоих каталогах (в каталогах не должно быть других файлов, соответствующих формату и схеме, описанным в запросе):
``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
```
## Настройки движка S3 {#s3-settings} ## Настройки движка S3 {#s3-settings}
Перед выполнением запроса или в конфигурационном файле могут быть установлены следующие настройки: Перед выполнением запроса или в конфигурационном файле могут быть установлены следующие настройки:
@ -108,47 +158,6 @@ SELECT * FROM s3_engine_table LIMIT 2;
</s3> </s3>
``` ```
## Примеры использования {#usage-examples}
Предположим, у нас есть несколько файлов в формате CSV со следующими URL-адресами в S3:
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
1. Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
``` sql
CREATE TABLE table_with_range (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
```
2. Другой способ:
``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
```
3. Таблица содержит все файлы в обоих каталогах (все файлы должны соответствовать формату и схеме, описанным в запросе):
``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
```
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
``` sql
CREATE TABLE big_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
```
**Смотрите также** **Смотрите также**
- [Табличная функция s3](../../../sql-reference/table-functions/s3.md) - [Табличная функция s3](../../../sql-reference/table-functions/s3.md)

View File

@ -9,7 +9,7 @@ toc_title: AggregatingMergeTree
Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений. Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений.
Движок обрабатывает все столбцы типа [AggregateFunction](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md). Движок обрабатывает все столбцы типа [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md).
Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки. Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки.

View File

@ -132,7 +132,7 @@ ClickHouse может слить куски данных таким образо
[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)] [(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)]
``` ```
При запросе данных используйте функцию [sumMap(key, value)](../../../engines/table-engines/mergetree-family/summingmergetree.md) для агрегации `Map`. При запросе данных используйте функцию [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) для агрегации `Map`.
Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования. Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования.

View File

@ -1,3 +1,3 @@
# 设置 {#set} # 集合 {#set}
可以用在 IN 表达式的右半部分。 可以用在 IN 表达式的右半部分。

View File

@ -1003,7 +1003,6 @@ void Client::addOptions(OptionsDescription & options_description)
("password", po::value<std::string>()->implicit_value("\n", ""), "password") ("password", po::value<std::string>()->implicit_value("\n", ""), "password")
("ask-password", "ask-password") ("ask-password", "ask-password")
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server") ("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
("pager", po::value<std::string>(), "pager")
("testmode,T", "enable test hints in comments") ("testmode,T", "enable test hints in comments")
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.") ("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
@ -1104,8 +1103,6 @@ void Client::processOptions(const OptionsDescription & options_description,
config().setString("host", options["host"].as<std::string>()); config().setString("host", options["host"].as<std::string>());
if (options.count("interleave-queries-file")) if (options.count("interleave-queries-file"))
interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>(); interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>();
if (options.count("pager"))
config().setString("pager", options["pager"].as<std::string>());
if (options.count("port") && !options["port"].defaulted()) if (options.count("port") && !options["port"].defaulted())
config().setInt("port", options["port"].as<int>()); config().setInt("port", options["port"].as<int>());
if (options.count("secure")) if (options.count("secure"))

View File

@ -744,8 +744,8 @@ std::shared_ptr<ASTCreateQuery> rewriteCreateQueryStorage(const ASTPtr & create_
if (create.storage == nullptr || new_storage_ast == nullptr) if (create.storage == nullptr || new_storage_ast == nullptr)
throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR); throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR);
res->database = new_table.first; res->setDatabase(new_table.first);
res->table = new_table.second; res->setTable(new_table.second);
res->children.clear(); res->children.clear();
res->set(res->columns_list, create.columns_list->clone()); res->set(res->columns_list, create.columns_list->clone());
@ -1659,7 +1659,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast)
{ {
const auto & create = create_ast->as<ASTCreateQuery &>(); const auto & create = create_ast->as<ASTCreateQuery &>();
dropLocalTableIfExists({create.database, create.table}); dropLocalTableIfExists({create.getDatabase(), create.getTable()});
auto create_context = Context::createCopy(getContext()); auto create_context = Context::createCopy(getContext());
@ -1671,8 +1671,8 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na
{ {
auto drop_ast = std::make_shared<ASTDropQuery>(); auto drop_ast = std::make_shared<ASTDropQuery>();
drop_ast->if_exists = true; drop_ast->if_exists = true;
drop_ast->database = table_name.first; drop_ast->setDatabase(table_name.first);
drop_ast->table = table_name.second; drop_ast->setTable(table_name.second);
auto drop_context = Context::createCopy(getContext()); auto drop_context = Context::createCopy(getContext());

View File

@ -312,11 +312,11 @@ namespace
String getDataPathInBackup(const IAST & create_query) String getDataPathInBackup(const IAST & create_query)
{ {
const auto & create = create_query.as<const ASTCreateQuery &>(); const auto & create = create_query.as<const ASTCreateQuery &>();
if (create.table.empty()) if (!create.table)
return {}; return {};
if (create.temporary) if (create.temporary)
return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()});
return getDataPathInBackup({create.database, create.table}); return getDataPathInBackup({create.getDatabase(), create.getTable()});
} }
String getMetadataPathInBackup(const DatabaseAndTableName & table_name) String getMetadataPathInBackup(const DatabaseAndTableName & table_name)
@ -336,11 +336,11 @@ namespace
String getMetadataPathInBackup(const IAST & create_query) String getMetadataPathInBackup(const IAST & create_query)
{ {
const auto & create = create_query.as<const ASTCreateQuery &>(); const auto & create = create_query.as<const ASTCreateQuery &>();
if (create.table.empty()) if (!create.table)
return getMetadataPathInBackup(create.database); return getMetadataPathInBackup(create.getDatabase());
if (create.temporary) if (create.temporary)
return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()});
return getMetadataPathInBackup({create.database, create.table}); return getMetadataPathInBackup({create.getDatabase(), create.getTable()});
} }
void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries) void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries)
@ -419,7 +419,7 @@ namespace
/// We create and execute `create` query for the database name. /// We create and execute `create` query for the database name.
auto create_query = std::make_shared<ASTCreateQuery>(); auto create_query = std::make_shared<ASTCreateQuery>();
create_query->database = database_name; create_query->setDatabase(database_name);
create_query->if_not_exists = true; create_query->if_not_exists = true;
InterpreterCreateQuery create_interpreter{create_query, context}; InterpreterCreateQuery create_interpreter{create_query, context};
create_interpreter.execute(); create_interpreter.execute();
@ -460,7 +460,7 @@ namespace
restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks
{ {
DatabaseAndTableName new_table_name{new_create_query->database, new_create_query->table}; DatabaseAndTableName new_table_name{new_create_query->getDatabase(), new_create_query->getTable()};
if (new_create_query->temporary) if (new_create_query->temporary)
new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE;
@ -536,7 +536,7 @@ namespace
restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks
{ {
const String & new_database_name = new_create_query->database; const String & new_database_name = new_create_query->getDatabase();
context->checkAccess(AccessType::SHOW_TABLES, new_database_name); context->checkAccess(AccessType::SHOW_TABLES, new_database_name);
if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name)) if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name))

View File

@ -48,21 +48,23 @@ namespace
{ {
if (create.temporary) if (create.temporary)
{ {
if (create.table.empty()) if (!create.table)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty"); throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty");
create.table = data.renaming_config->getNewTemporaryTableName(create.table); create.setTable(data.renaming_config->getNewTemporaryTableName(create.getTable()));
} }
else if (create.table.empty()) else if (!create.table)
{ {
if (create.database.empty()) if (!create.database)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty"); throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty");
create.database = data.renaming_config->getNewDatabaseName(create.database); create.setDatabase(data.renaming_config->getNewDatabaseName(create.getDatabase()));
} }
else else
{ {
if (create.database.empty()) if (!create.database)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty"); throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty");
std::tie(create.database, create.table) = data.renaming_config->getNewTableName({create.database, create.table}); auto table_and_database_name = data.renaming_config->getNewTableName({create.getDatabase(), create.getTable()});
create.setDatabase(table_and_database_name.first);
create.setTable(table_and_database_name.second);
} }
create.uuid = UUIDHelpers::Nil; create.uuid = UUIDHelpers::Nil;

View File

@ -490,7 +490,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
ReplaceQueryParameterVisitor visitor(query_parameters); ReplaceQueryParameterVisitor visitor(query_parameters);
visitor.visit(parsed_query); visitor.visit(parsed_query);
/// Get new query after substitutions. Note that it cannot be done for INSERT query with embedded data. /// Get new query after substitutions.
query = serializeAST(*parsed_query); query = serializeAST(*parsed_query);
} }
@ -824,6 +824,17 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de
void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query) void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query)
{ {
auto query = query_to_execute;
if (!query_parameters.empty())
{
/// Replace ASTQueryParameter with ASTLiteral for prepared statements.
ReplaceQueryParameterVisitor visitor(query_parameters);
visitor.visit(parsed_query);
/// Get new query after substitutions.
query = serializeAST(*parsed_query);
}
/// Process the query that requires transferring data blocks to the server. /// Process the query that requires transferring data blocks to the server.
const auto parsed_insert_query = parsed_query->as<ASTInsertQuery &>(); const auto parsed_insert_query = parsed_query->as<ASTInsertQuery &>();
if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && std_in.eof()))) if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && std_in.eof())))
@ -831,7 +842,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
connection->sendQuery( connection->sendQuery(
connection_parameters.timeouts, connection_parameters.timeouts,
query_to_execute, query,
global_context->getCurrentQueryId(), global_context->getCurrentQueryId(),
query_processing_stage, query_processing_stage,
&global_context->getSettingsRef(), &global_context->getSettingsRef(),
@ -884,8 +895,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
/// Get name of this file (path to file) /// Get name of this file (path to file)
const auto & in_file_node = parsed_insert_query->infile->as<ASTLiteral &>(); const auto & in_file_node = parsed_insert_query->infile->as<ASTLiteral &>();
const auto in_file = in_file_node.value.safeGet<std::string>(); const auto in_file = in_file_node.value.safeGet<std::string>();
/// Get name of table
const auto table_name = parsed_insert_query->table_id.getTableName();
std::string compression_method; std::string compression_method;
/// Compression method can be specified in query /// Compression method can be specified in query
if (parsed_insert_query->compression) if (parsed_insert_query->compression)
@ -1703,6 +1713,7 @@ void ClientBase::init(int argc, char ** argv)
("profile-events-delay-ms", po::value<UInt64>()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)") ("profile-events-delay-ms", po::value<UInt64>()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)")
("interactive", "Process queries-file or --query query and start interactive mode") ("interactive", "Process queries-file or --query query and start interactive mode")
("pager", po::value<std::string>(), "Pipe all output into this command (less or similar)")
; ;
addOptions(options_description); addOptions(options_description);
@ -1774,6 +1785,8 @@ void ClientBase::init(int argc, char ** argv)
config().setBool("verbose", true); config().setBool("verbose", true);
if (options.count("interactive")) if (options.count("interactive"))
config().setBool("interactive", true); config().setBool("interactive", true);
if (options.count("pager"))
config().setString("pager", options["pager"].as<std::string>());
if (options.count("log-level")) if (options.count("log-level"))
Poco::Logger::root().setLevel(options["log-level"].as<std::string>()); Poco::Logger::root().setLevel(options["log-level"].as<std::string>());

View File

@ -12,6 +12,8 @@
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/setThreadName.h> #include <Common/setThreadName.h>
#define THREAD_NAME_SIZE 16
namespace DB namespace DB
{ {
@ -23,13 +25,13 @@ namespace ErrorCodes
/// Cache thread_name to avoid prctl(PR_GET_NAME) for query_log/text_log /// Cache thread_name to avoid prctl(PR_GET_NAME) for query_log/text_log
static thread_local std::string thread_name; static thread_local char thread_name[THREAD_NAME_SIZE]{};
void setThreadName(const char * name) void setThreadName(const char * name)
{ {
#ifndef NDEBUG #ifndef NDEBUG
if (strlen(name) > 15) if (strlen(name) > THREAD_NAME_SIZE - 1)
throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR); throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR);
#endif #endif
@ -45,28 +47,25 @@ void setThreadName(const char * name)
#endif #endif
DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR); DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
thread_name = name; memcpy(thread_name, name, 1 + strlen(name));
} }
const std::string & getThreadName() const char * getThreadName()
{ {
if (!thread_name.empty()) if (thread_name[0])
return thread_name; return thread_name;
thread_name.resize(16);
#if defined(__APPLE__) || defined(OS_SUNOS) #if defined(__APPLE__) || defined(OS_SUNOS)
if (pthread_getname_np(pthread_self(), thread_name.data(), thread_name.size())) if (pthread_getname_np(pthread_self(), thread_name, THREAD_NAME_SIZE))
throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR); throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR);
#elif defined(__FreeBSD__) #elif defined(__FreeBSD__)
// TODO: make test. freebsd will have this function soon https://freshbsd.org/commit/freebsd/r337983 // TODO: make test. freebsd will have this function soon https://freshbsd.org/commit/freebsd/r337983
// if (pthread_get_name_np(pthread_self(), thread_name.data(), thread_name.size())) // if (pthread_get_name_np(pthread_self(), thread_name, THREAD_NAME_SIZE))
// throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR); // throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR);
#else #else
if (0 != prctl(PR_GET_NAME, thread_name.data(), 0, 0, 0)) if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0))
DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR); DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR);
#endif #endif
thread_name.resize(std::strlen(thread_name.data()));
return thread_name; return thread_name;
} }

View File

@ -7,4 +7,4 @@
*/ */
void setThreadName(const char * name); void setThreadName(const char * name);
const std::string & getThreadName(); const char * getThreadName();

View File

@ -44,7 +44,7 @@
/// The boundary on which the blocks for asynchronous file operations should be aligned. /// The boundary on which the blocks for asynchronous file operations should be aligned.
#define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096
#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800 #define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 180
#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1 #define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1
/// Maximum number of http-connections between two endpoints /// Maximum number of http-connections between two endpoints
/// the number is unmotivated /// the number is unmotivated

View File

@ -295,9 +295,9 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
try try
{ {
std::unique_lock lock{mutex}; std::unique_lock lock{mutex};
if (query.database != database_name) if (query.getDatabase() != database_name)
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`", throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`",
database_name, query.database); database_name, query.getDatabase());
/// Do some checks before renaming file from .tmp to .sql /// Do some checks before renaming file from .tmp to .sql
not_in_use = cleanupDetachedTables(); not_in_use = cleanupDetachedTables();
assertDetachedTableNotInUse(query.uuid); assertDetachedTableNotInUse(query.uuid);
@ -314,8 +314,8 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
/// It throws if `table_metadata_path` already exists (it's possible if table was detached) /// It throws if `table_metadata_path` already exists (it's possible if table was detached)
renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of) renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of)
attachTableUnlocked(query.table, table, lock); /// Should never throw attachTableUnlocked(query.getTable(), table, lock); /// Should never throw
table_name_to_path.emplace(query.table, table_data_path); table_name_to_path.emplace(query.getTable(), table_data_path);
} }
catch (...) catch (...)
{ {
@ -325,7 +325,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
throw; throw;
} }
if (table->storesDataOnDisk()) if (table->storesDataOnDisk())
tryCreateSymlink(query.table, table_data_path); tryCreateSymlink(query.getTable(), table_data_path);
} }
void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path, void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path,

View File

@ -103,7 +103,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng
DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context)
{ {
auto * engine_define = create.storage; auto * engine_define = create.storage;
const String & database_name = create.database; const String & database_name = create.getDatabase();
const String & engine_name = engine_define->engine->name; const String & engine_name = engine_define->engine->name;
const UUID & uuid = create.uuid; const UUID & uuid = create.uuid;

View File

@ -75,7 +75,7 @@ void DatabaseMemory::dropTable(
ASTPtr DatabaseMemory::getCreateDatabaseQuery() const ASTPtr DatabaseMemory::getCreateDatabaseQuery() const
{ {
auto create_query = std::make_shared<ASTCreateQuery>(); auto create_query = std::make_shared<ASTCreateQuery>();
create_query->database = getDatabaseName(); create_query->setDatabase(getDatabaseName());
create_query->set(create_query->storage, std::make_shared<ASTStorage>()); create_query->set(create_query->storage, std::make_shared<ASTStorage>());
create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName())); create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName()));

View File

@ -42,7 +42,7 @@ public:
/// TODO May be it's better to use DiskMemory for such tables. /// TODO May be it's better to use DiskMemory for such tables.
/// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local. /// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local.
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); }
UUID tryGetTableUUID(const String & table_name) const override; UUID tryGetTableUUID(const String & table_name) const override;

View File

@ -51,7 +51,7 @@ std::pair<String, StoragePtr> createTableFromAST(
bool force_restore) bool force_restore)
{ {
ast_create_query.attach = true; ast_create_query.attach = true;
ast_create_query.database = database_name; ast_create_query.setDatabase(database_name);
if (ast_create_query.as_table_function) if (ast_create_query.as_table_function)
{ {
@ -60,9 +60,9 @@ std::pair<String, StoragePtr> createTableFromAST(
ColumnsDescription columns; ColumnsDescription columns;
if (ast_create_query.columns_list && ast_create_query.columns_list->columns) if (ast_create_query.columns_list && ast_create_query.columns_list->columns)
columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true); columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true);
StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.table, std::move(columns)); StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.getTable(), std::move(columns));
storage->renameInMemory(ast_create_query); storage->renameInMemory(ast_create_query);
return {ast_create_query.table, storage}; return {ast_create_query.getTable(), storage};
} }
ColumnsDescription columns; ColumnsDescription columns;
@ -82,7 +82,7 @@ std::pair<String, StoragePtr> createTableFromAST(
return return
{ {
ast_create_query.table, ast_create_query.getTable(),
StorageFactory::instance().get( StorageFactory::instance().get(
ast_create_query, ast_create_query,
table_data_path_relative, table_data_path_relative,
@ -112,7 +112,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
/// We remove everything that is not needed for ATTACH from the query. /// We remove everything that is not needed for ATTACH from the query.
assert(!create->temporary); assert(!create->temporary);
create->database.clear(); create->database.reset();
create->as_database.clear(); create->as_database.clear();
create->as_table.clear(); create->as_table.clear();
create->if_not_exists = false; create->if_not_exists = false;
@ -129,7 +129,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
create->out_file = nullptr; create->out_file = nullptr;
if (create->uuid != UUIDHelpers::Nil) if (create->uuid != UUIDHelpers::Nil)
create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER; create->setTable(TABLE_WITH_UUID_NAME_PLACEHOLDER);
WriteBufferFromOwnString statement_buf; WriteBufferFromOwnString statement_buf;
formatAST(*create, statement_buf, false); formatAST(*create, statement_buf, false);
@ -161,7 +161,7 @@ void DatabaseOnDisk::createTable(
{ {
const auto & settings = local_context->getSettingsRef(); const auto & settings = local_context->getSettingsRef();
const auto & create = query->as<ASTCreateQuery &>(); const auto & create = query->as<ASTCreateQuery &>();
assert(table_name == create.table); assert(table_name == create.getTable());
/// Create a file with metadata if necessary - if the query is not ATTACH. /// Create a file with metadata if necessary - if the query is not ATTACH.
/// Write the query of `ATTACH table` to it. /// Write the query of `ATTACH table` to it.
@ -251,7 +251,7 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
try try
{ {
/// Add a table to the map of known tables. /// Add a table to the map of known tables.
attachTable(query.table, table, getTableDataPath(query)); attachTable(query.getTable(), table, getTableDataPath(query));
/// If it was ATTACH query and file with table metadata already exist /// If it was ATTACH query and file with table metadata already exist
/// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one. /// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one.
@ -382,8 +382,8 @@ void DatabaseOnDisk::renameTable(
table_metadata_path = getObjectMetadataPath(table_name); table_metadata_path = getObjectMetadataPath(table_name);
attach_query = parseQueryFromMetadata(log, local_context, table_metadata_path); attach_query = parseQueryFromMetadata(log, local_context, table_metadata_path);
auto & create = attach_query->as<ASTCreateQuery &>(); auto & create = attach_query->as<ASTCreateQuery &>();
create.database = to_database.getDatabaseName(); create.setDatabase(to_database.getDatabaseName());
create.table = to_table_name; create.setTable(to_table_name);
if (from_ordinary_to_atomic) if (from_ordinary_to_atomic)
create.uuid = UUIDHelpers::generateV4(); create.uuid = UUIDHelpers::generateV4();
if (from_atomic_to_ordinary) if (from_atomic_to_ordinary)
@ -458,7 +458,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const
ast = parseQueryFromMetadata(log, getContext(), database_metadata_path, true); ast = parseQueryFromMetadata(log, getContext(), database_metadata_path, true);
auto & ast_create_query = ast->as<ASTCreateQuery &>(); auto & ast_create_query = ast->as<ASTCreateQuery &>();
ast_create_query.attach = false; ast_create_query.attach = false;
ast_create_query.database = database_name; ast_create_query.setDatabase(database_name);
} }
if (!ast) if (!ast)
{ {
@ -642,18 +642,18 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
return nullptr; return nullptr;
auto & create = ast->as<ASTCreateQuery &>(); auto & create = ast->as<ASTCreateQuery &>();
if (!create.table.empty() && create.uuid != UUIDHelpers::Nil) if (create.table && create.uuid != UUIDHelpers::Nil)
{ {
String table_name = unescapeForFileName(fs::path(metadata_file_path).stem()); String table_name = unescapeForFileName(fs::path(metadata_file_path).stem());
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger) if (create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger)
LOG_WARNING( LOG_WARNING(
logger, logger,
"File {} contains both UUID and table name. Will use name `{}` instead of `{}`", "File {} contains both UUID and table name. Will use name `{}` instead of `{}`",
metadata_file_path, metadata_file_path,
table_name, table_name,
create.table); create.getTable());
create.table = table_name; create.setTable(table_name);
} }
return ast; return ast;
@ -667,7 +667,7 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const String & database_metada
{ {
auto & ast_create_query = ast->as<ASTCreateQuery &>(); auto & ast_create_query = ast->as<ASTCreateQuery &>();
ast_create_query.attach = false; ast_create_query.attach = false;
ast_create_query.database = getDatabaseName(); ast_create_query.setDatabase(getDatabaseName());
} }
return ast; return ast;

View File

@ -63,7 +63,7 @@ public:
String getDataPath() const override { return data_path; } String getDataPath() const override { return data_path; }
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); }
String getMetadataPath() const override { return metadata_path; } String getMetadataPath() const override { return metadata_path; }
static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false);

View File

@ -55,7 +55,7 @@ namespace
catch (Exception & e) catch (Exception & e)
{ {
e.addMessage( e.addMessage(
"Cannot attach table " + backQuote(database_name) + "." + backQuote(query.table) + " from metadata file " + metadata_path "Cannot attach table " + backQuote(database_name) + "." + backQuote(query.getTable()) + " from metadata file " + metadata_path
+ " from query " + serializeAST(query)); + " from query " + serializeAST(query));
throw; throw;
} }
@ -168,7 +168,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
if (ast) if (ast)
{ {
auto * create_query = ast->as<ASTCreateQuery>(); auto * create_query = ast->as<ASTCreateQuery>();
create_query->database = database_name; create_query->setDatabase(database_name);
if (fs::exists(full_path.string() + detached_suffix)) if (fs::exists(full_path.string() + detached_suffix))
{ {
@ -182,7 +182,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
} }
TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext(), ast); TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext(), ast);
QualifiedTableName qualified_name{database_name, create_query->table}; QualifiedTableName qualified_name{database_name, create_query->getTable()};
std::lock_guard lock{metadata.mutex}; std::lock_guard lock{metadata.mutex};
metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast}; metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast};

View File

@ -349,9 +349,9 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
/// Replicas will set correct name of current database in query context (database name can be different on replicas) /// Replicas will set correct name of current database in query context (database name can be different on replicas)
if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get())) if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()))
{ {
if (ddl_query->database != getDatabaseName()) if (ddl_query->getDatabase() != getDatabaseName())
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed"); throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed");
ddl_query->database.clear(); ddl_query->database.reset();
if (auto * create = query->as<ASTCreateQuery>()) if (auto * create = query->as<ASTCreateQuery>())
{ {
@ -391,7 +391,7 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
/// NOTE: we cannot check here that substituted values will be actually different on shards and replicas. /// NOTE: we cannot check here that substituted values will be actually different on shards and replicas.
Macros::MacroExpansionInfo info; Macros::MacroExpansionInfo info;
info.table_id = {getDatabaseName(), create->table, create->uuid}; info.table_id = {getDatabaseName(), create->getTable(), create->uuid};
query_context->getMacros()->expand(maybe_path, info); query_context->getMacros()->expand(maybe_path, info);
bool maybe_shard_macros = info.expanded_other; bool maybe_shard_macros = info.expanded_other;
info.expanded_other = false; info.expanded_other = false;
@ -715,13 +715,13 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node
auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth); auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth);
auto & create = ast->as<ASTCreateQuery &>(); auto & create = ast->as<ASTCreateQuery &>();
if (create.uuid == UUIDHelpers::Nil || create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER || !create.database.empty()) if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query); throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query);
bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty(); bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty();
create.database = getDatabaseName(); create.setDatabase(getDatabaseName());
create.table = unescapeForFileName(node_name); create.setTable(unescapeForFileName(node_name));
create.attach = is_materialized_view_with_inner_table; create.attach = is_materialized_view_with_inner_table;
return ast; return ast;
@ -811,7 +811,7 @@ void DatabaseReplicated::commitCreateTable(const ASTCreateQuery & query, const S
assert(!ddl_worker->isCurrentlyActive() || txn); assert(!ddl_worker->isCurrentlyActive() || txn);
if (txn && txn->isInitialQuery()) if (txn && txn->isInitialQuery())
{ {
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.table); String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.getTable());
String statement = getObjectDefinitionFromCreateQuery(query.clone()); String statement = getObjectDefinitionFromCreateQuery(query.clone());
/// zk::multi(...) will throw if `metadata_zk_path` exists /// zk::multi(...) will throw if `metadata_zk_path` exists
txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent)); txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent));

View File

@ -29,7 +29,7 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo
bool has_structure = ast_create_query.columns_list && ast_create_query.columns_list->columns; bool has_structure = ast_create_query.columns_list && ast_create_query.columns_list->columns;
if (ast_create_query.as_table_function && !has_structure) if (ast_create_query.as_table_function && !has_structure)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot alter table {} because it was created AS table function" throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot alter table {} because it was created AS table function"
" and doesn't have structure in metadata", backQuote(ast_create_query.table)); " and doesn't have structure in metadata", backQuote(ast_create_query.getTable()));
assert(has_structure); assert(has_structure);
ASTPtr new_columns = InterpreterCreateQuery::formatColumns(metadata.columns); ASTPtr new_columns = InterpreterCreateQuery::formatColumns(metadata.columns);

View File

@ -129,8 +129,8 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr
{ {
/// init create query. /// init create query.
auto table_id = storage->getStorageID(); auto table_id = storage->getStorageID();
create_table_query->table = table_id.table_name; create_table_query->setTable(table_id.table_name);
create_table_query->database = table_id.database_name; create_table_query->setDatabase(table_id.database_name);
auto metadata_snapshot = storage->getInMemoryMetadataPtr(); auto metadata_snapshot = storage->getInMemoryMetadataPtr();
for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary())
@ -192,7 +192,7 @@ time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_nam
ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const
{ {
const auto & create_query = std::make_shared<ASTCreateQuery>(); const auto & create_query = std::make_shared<ASTCreateQuery>();
create_query->database = getDatabaseName(); create_query->setDatabase(getDatabaseName());
create_query->set(create_query->storage, database_engine_define); create_query->set(create_query->storage, database_engine_define);
if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) if (const auto comment_value = getDatabaseComment(); !comment_value.empty())

View File

@ -3,26 +3,27 @@
#if USE_MYSQL #if USE_MYSQL
#include <Databases/MySQL/MaterializedMySQLSyncThread.h> #include <Databases/MySQL/MaterializedMySQLSyncThread.h>
# include <cstdlib> #include <cstdlib>
# include <random> #include <random>
# include <Columns/ColumnTuple.h> #include <string_view>
# include <Columns/ColumnDecimal.h> #include <Columns/ColumnTuple.h>
# include <QueryPipeline/QueryPipelineBuilder.h> #include <Columns/ColumnDecimal.h>
# include <Processors/Executors/PullingPipelineExecutor.h> #include <QueryPipeline/QueryPipelineBuilder.h>
# include <Processors/Executors/CompletedPipelineExecutor.h> #include <Processors/Executors/PullingPipelineExecutor.h>
# include <Processors/Sources/SourceFromSingleChunk.h> #include <Processors/Executors/CompletedPipelineExecutor.h>
# include <Processors/Transforms/CountingTransform.h> #include <Processors/Sources/SourceFromSingleChunk.h>
# include <Databases/MySQL/DatabaseMaterializedMySQL.h> #include <Processors/Transforms/CountingTransform.h>
# include <Databases/MySQL/MaterializeMetadata.h> #include <Databases/MySQL/DatabaseMaterializedMySQL.h>
# include <Processors/Sources/MySQLSource.h> #include <Databases/MySQL/MaterializeMetadata.h>
# include <IO/ReadBufferFromString.h> #include <Processors/Sources/MySQLSource.h>
# include <Interpreters/Context.h> #include <IO/ReadBufferFromString.h>
# include <Interpreters/executeQuery.h> #include <Interpreters/Context.h>
# include <Storages/StorageMergeTree.h> #include <Interpreters/executeQuery.h>
# include <Common/quoteString.h> #include <Storages/StorageMergeTree.h>
# include <Common/setThreadName.h> #include <Common/quoteString.h>
# include <base/sleep.h> #include <Common/setThreadName.h>
# include <base/bit_cast.h> #include <base/sleep.h>
#include <base/bit_cast.h>
namespace DB namespace DB
{ {
@ -765,7 +766,7 @@ void MaterializedMySQLSyncThread::executeDDLAtomic(const QueryEvent & query_even
bool MaterializedMySQLSyncThread::isMySQLSyncThread() bool MaterializedMySQLSyncThread::isMySQLSyncThread()
{ {
return getThreadName() == MYSQL_BACKGROUND_THREAD_NAME; return getThreadName() == std::string_view(MYSQL_BACKGROUND_THREAD_NAME);
} }
void MaterializedMySQLSyncThread::setSynchronizationThreadException(const std::exception_ptr & exception) void MaterializedMySQLSyncThread::setSynchronizationThreadException(const std::exception_ptr & exception)

View File

@ -238,7 +238,7 @@ ASTPtr DatabaseMaterializedPostgreSQL::createAlterSettingsQuery(const SettingCha
auto * alter = query->as<ASTAlterQuery>(); auto * alter = query->as<ASTAlterQuery>();
alter->alter_object = ASTAlterQuery::AlterObjectType::DATABASE; alter->alter_object = ASTAlterQuery::AlterObjectType::DATABASE;
alter->database = database_name; alter->setDatabase(database_name);
alter->set(alter->command_list, command_list); alter->set(alter->command_list, command_list);
return query; return query;

View File

@ -355,7 +355,7 @@ void DatabasePostgreSQL::shutdown()
ASTPtr DatabasePostgreSQL::getCreateDatabaseQuery() const ASTPtr DatabasePostgreSQL::getCreateDatabaseQuery() const
{ {
const auto & create_query = std::make_shared<ASTCreateQuery>(); const auto & create_query = std::make_shared<ASTCreateQuery>();
create_query->database = getDatabaseName(); create_query->setDatabase(getDatabaseName());
create_query->set(create_query->storage, database_engine_define); create_query->set(create_query->storage, database_engine_define);
if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) if (const auto comment_value = getDatabaseComment(); !comment_value.empty())
@ -388,8 +388,8 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co
/// init create query. /// init create query.
auto table_id = storage->getStorageID(); auto table_id = storage->getStorageID();
create_table_query->table = table_id.table_name; create_table_query->setTable(table_id.table_name);
create_table_query->database = table_id.database_name; create_table_query->setDatabase(table_id.database_name);
auto metadata_snapshot = storage->getInMemoryMetadataPtr(); auto metadata_snapshot = storage->getInMemoryMetadataPtr();
for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary())

View File

@ -160,7 +160,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca
ASTPtr DatabaseSQLite::getCreateDatabaseQuery() const ASTPtr DatabaseSQLite::getCreateDatabaseQuery() const
{ {
const auto & create_query = std::make_shared<ASTCreateQuery>(); const auto & create_query = std::make_shared<ASTCreateQuery>();
create_query->database = getDatabaseName(); create_query->setDatabase(getDatabaseName());
create_query->set(create_query->storage, database_engine_define); create_query->set(create_query->storage, database_engine_define);
if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) if (const auto comment_value = getDatabaseComment(); !comment_value.empty())
@ -193,8 +193,8 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex
/// init create query. /// init create query.
auto table_id = storage->getStorageID(); auto table_id = storage->getStorageID();
create_table_query->table = table_id.table_name; create_table_query->setTable(table_id.table_name);
create_table_query->database = table_id.database_name; create_table_query->setDatabase(table_id.database_name);
auto metadata_snapshot = storage->getInMemoryMetadataPtr(); auto metadata_snapshot = storage->getInMemoryMetadataPtr();
for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary())

View File

@ -536,12 +536,12 @@ getDictionaryConfigurationFromAST(const ASTCreateQuery & query, ContextPtr conte
AutoPtr<Poco::XML::Element> name_element(xml_document->createElement("name")); AutoPtr<Poco::XML::Element> name_element(xml_document->createElement("name"));
current_dictionary->appendChild(name_element); current_dictionary->appendChild(name_element);
AutoPtr<Text> name(xml_document->createTextNode(query.table)); AutoPtr<Text> name(xml_document->createTextNode(query.getTable()));
name_element->appendChild(name); name_element->appendChild(name);
AutoPtr<Poco::XML::Element> database_element(xml_document->createElement("database")); AutoPtr<Poco::XML::Element> database_element(xml_document->createElement("database"));
current_dictionary->appendChild(database_element); current_dictionary->appendChild(database_element);
AutoPtr<Text> database(xml_document->createTextNode(!database_.empty() ? database_ : query.database)); AutoPtr<Text> database(xml_document->createTextNode(!database_.empty() ? database_ : query.getDatabase()));
database_element->appendChild(database); database_element->appendChild(database);
if (query.uuid != UUIDHelpers::Nil) if (query.uuid != UUIDHelpers::Nil)

View File

@ -0,0 +1,23 @@
#pragma once
#include <IO/ReadBuffer.h>
namespace DB
{
/// In case of empty file it does not make any sense to read it.
///
/// Plus regular readers from file has an assert that buffer is not empty, that will fail:
/// - ReadBufferFromFileDescriptor
/// - SynchronousReader
/// - ThreadPoolReader
class ReadBufferFromEmptyFile : public ReadBufferFromFileBase
{
private:
bool nextImpl() override { return false; }
std::string getFileName() const override { return "<empty>"; }
off_t seek(off_t /*off*/, int /*whence*/) override { return 0; }
off_t getPosition() override { return 0; }
};
}

View File

@ -51,6 +51,9 @@ std::string ReadBufferFromFileDescriptor::getFileName() const
bool ReadBufferFromFileDescriptor::nextImpl() bool ReadBufferFromFileDescriptor::nextImpl()
{ {
/// If internal_buffer size is empty, then read() cannot be distinguished from EOF
assert(!internal_buffer.empty());
size_t bytes_read = 0; size_t bytes_read = 0;
while (!bytes_read) while (!bytes_read)
{ {

View File

@ -36,6 +36,9 @@ namespace ErrorCodes
std::future<IAsynchronousReader::Result> SynchronousReader::submit(Request request) std::future<IAsynchronousReader::Result> SynchronousReader::submit(Request request)
{ {
/// If size is zero, then read() cannot be distinguished from EOF
assert(request.size);
int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd; int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd;
#if defined(POSIX_FADV_WILLNEED) #if defined(POSIX_FADV_WILLNEED)

View File

@ -76,6 +76,9 @@ ThreadPoolReader::ThreadPoolReader(size_t pool_size, size_t queue_size_)
std::future<IAsynchronousReader::Result> ThreadPoolReader::submit(Request request) std::future<IAsynchronousReader::Result> ThreadPoolReader::submit(Request request)
{ {
/// If size is zero, then read() cannot be distinguished from EOF
assert(request.size);
int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd; int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd;
#if defined(__linux__) #if defined(__linux__)

View File

@ -1,4 +1,5 @@
#include <IO/createReadBufferFromFileBase.h> #include <IO/createReadBufferFromFileBase.h>
#include <IO/ReadBufferFromEmptyFile.h>
#include <IO/ReadBufferFromFile.h> #include <IO/ReadBufferFromFile.h>
#include <IO/MMapReadBufferFromFileWithCache.h> #include <IO/MMapReadBufferFromFileWithCache.h>
#include <IO/AsynchronousReadBufferFromFile.h> #include <IO/AsynchronousReadBufferFromFile.h>
@ -33,6 +34,8 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
char * existing_memory, char * existing_memory,
size_t alignment) size_t alignment)
{ {
if (size.has_value() && !*size)
return std::make_unique<ReadBufferFromEmptyFile>();
size_t estimated_size = size.has_value() ? *size : 0; size_t estimated_size = size.has_value() ? *size : 0;
if (!existing_memory if (!existing_memory

View File

@ -239,8 +239,8 @@ private:
if (only_replace_current_database_function) if (only_replace_current_database_function)
return; return;
if (node.database.empty()) if (!node.database)
node.database = database_name; node.setDatabase(database_name);
} }
void visitDDL(ASTRenameQuery & node, ASTPtr &) const void visitDDL(ASTRenameQuery & node, ASTPtr &) const
@ -262,8 +262,8 @@ private:
if (only_replace_current_database_function) if (only_replace_current_database_function)
return; return;
if (node.database.empty()) if (!node.database)
node.database = database_name; node.setDatabase(database_name);
for (const auto & child : node.command_list->children) for (const auto & child : node.command_list->children)
{ {

View File

@ -257,12 +257,12 @@ bool DDLTask::tryFindHostInCluster()
* */ * */
is_circular_replicated = true; is_circular_replicated = true;
auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()); auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get());
if (!query_with_table || query_with_table->database.empty()) if (!query_with_table || !query_with_table->database)
{ {
throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION,
"For a distributed DDL on circular replicated cluster its table name must be qualified by database name."); "For a distributed DDL on circular replicated cluster its table name must be qualified by database name.");
} }
if (default_database == query_with_table->database) if (default_database == query_with_table->getDatabase())
return true; return true;
} }
} }
@ -351,8 +351,8 @@ void DatabaseReplicatedTask::parseQueryFromEntry(ContextPtr context)
if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get())) if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()))
{ {
/// Update database name with actual name of local database /// Update database name with actual name of local database
assert(ddl_query->database.empty()); assert(!ddl_query->database);
ddl_query->database = database->getDatabaseName(); ddl_query->setDatabase(database->getDatabaseName());
} }
} }

View File

@ -662,7 +662,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper)
StoragePtr storage; StoragePtr storage;
if (auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(task.query.get()); query_with_table) if (auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(task.query.get()); query_with_table)
{ {
if (!query_with_table->table.empty()) if (query_with_table->table)
{ {
/// It's not CREATE DATABASE /// It's not CREATE DATABASE
auto table_id = context->tryResolveStorageID(*query_with_table, Context::ResolveOrdinary); auto table_id = context->tryResolveStorageID(*query_with_table, Context::ResolveOrdinary);

View File

@ -66,9 +66,9 @@ TemporaryTableHolder::TemporaryTableHolder(ContextPtr context_, const TemporaryT
if (create->uuid == UUIDHelpers::Nil) if (create->uuid == UUIDHelpers::Nil)
create->uuid = UUIDHelpers::generateV4(); create->uuid = UUIDHelpers::generateV4();
id = create->uuid; id = create->uuid;
create->table = "_tmp_" + toString(id); create->setTable("_tmp_" + toString(id));
global_name = create->table; global_name = create->getTable();
create->database = DatabaseCatalog::TEMPORARY_DATABASE; create->setDatabase(DatabaseCatalog::TEMPORARY_DATABASE);
} }
else else
{ {
@ -786,8 +786,8 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr
if (create) if (create)
{ {
String data_path = "store/" + getPathForUUID(table_id.uuid); String data_path = "store/" + getPathForUUID(table_id.uuid);
create->database = table_id.database_name; create->setDatabase(table_id.database_name);
create->table = table_id.table_name; create->setTable(table_id.table_name);
try try
{ {
table = createTableFromAST(*create, table_id.getDatabaseName(), data_path, getContext(), false).second; table = createTableFromAST(*create, table_id.getDatabaseName(), data_path, getContext(), false).second;

View File

@ -62,7 +62,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
getContext()->checkAccess(getRequiredAccess()); getContext()->checkAccess(getRequiredAccess());
auto table_id = getContext()->resolveStorageID(alter, Context::ResolveOrdinary); auto table_id = getContext()->resolveStorageID(alter, Context::ResolveOrdinary);
query_ptr->as<ASTAlterQuery &>().database = table_id.database_name; query_ptr->as<ASTAlterQuery &>().setDatabase(table_id.database_name);
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name); DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
if (typeid_cast<DatabaseReplicated *>(database.get()) if (typeid_cast<DatabaseReplicated *>(database.get())
@ -175,7 +175,7 @@ BlockIO InterpreterAlterQuery::executeToDatabase(const ASTAlterQuery & alter)
{ {
BlockIO res; BlockIO res;
getContext()->checkAccess(getRequiredAccess()); getContext()->checkAccess(getRequiredAccess());
DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.database); DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.getDatabase());
AlterCommands alter_commands; AlterCommands alter_commands;
for (const auto & child : alter.command_list->children) for (const auto & child : alter.command_list->children)
@ -215,7 +215,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccess() const
AccessRightsElements required_access; AccessRightsElements required_access;
const auto & alter = query_ptr->as<ASTAlterQuery &>(); const auto & alter = query_ptr->as<ASTAlterQuery &>();
for (const auto & child : alter.command_list->children) for (const auto & child : alter.command_list->children)
boost::range::push_back(required_access, getRequiredAccessForCommand(child->as<ASTAlterCommand&>(), alter.database, alter.table)); boost::range::push_back(required_access, getRequiredAccessForCommand(child->as<ASTAlterCommand&>(), alter.getDatabase(), alter.getTable()));
return required_access; return required_access;
} }

View File

@ -101,7 +101,7 @@ InterpreterCreateQuery::InterpreterCreateQuery(const ASTPtr & query_ptr_, Contex
BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
{ {
String database_name = create.database; String database_name = create.getDatabase();
auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, "");
@ -127,11 +127,11 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
/// Short syntax: try read database definition from file /// Short syntax: try read database definition from file
auto ast = DatabaseOnDisk::parseQueryFromMetadata(nullptr, getContext(), metadata_file_path); auto ast = DatabaseOnDisk::parseQueryFromMetadata(nullptr, getContext(), metadata_file_path);
create = ast->as<ASTCreateQuery &>(); create = ast->as<ASTCreateQuery &>();
if (!create.table.empty() || !create.storage) if (create.table || !create.storage)
throw Exception(ErrorCodes::INCORRECT_QUERY, "Metadata file {} contains incorrect CREATE DATABASE query", metadata_file_path.string()); throw Exception(ErrorCodes::INCORRECT_QUERY, "Metadata file {} contains incorrect CREATE DATABASE query", metadata_file_path.string());
create.attach = true; create.attach = true;
create.attach_short_syntax = true; create.attach_short_syntax = true;
create.database = database_name; create.setDatabase(database_name);
} }
else if (!create.storage) else if (!create.storage)
{ {
@ -161,7 +161,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
{ {
if (create.attach && create.uuid == UUIDHelpers::Nil) if (create.attach && create.uuid == UUIDHelpers::Nil)
throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. " throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. "
"If you want to attach existing database, use just ATTACH DATABASE {};", create.database); "If you want to attach existing database, use just ATTACH DATABASE {};", create.getDatabase());
else if (create.uuid == UUIDHelpers::Nil) else if (create.uuid == UUIDHelpers::Nil)
create.uuid = UUIDHelpers::generateV4(); create.uuid = UUIDHelpers::generateV4();
@ -238,7 +238,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext()); DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext());
if (create.uuid != UUIDHelpers::Nil) if (create.uuid != UUIDHelpers::Nil)
create.database = TABLE_WITH_UUID_NAME_PLACEHOLDER; create.setDatabase(TABLE_WITH_UUID_NAME_PLACEHOLDER);
bool need_write_metadata = !create.attach || !fs::exists(metadata_file_path); bool need_write_metadata = !create.attach || !fs::exists(metadata_file_path);
@ -823,7 +823,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data
bool has_uuid = create.uuid != UUIDHelpers::Nil || create.to_inner_uuid != UUIDHelpers::Nil; bool has_uuid = create.uuid != UUIDHelpers::Nil || create.to_inner_uuid != UUIDHelpers::Nil;
if (has_uuid && !is_on_cluster) if (has_uuid && !is_on_cluster)
throw Exception(ErrorCodes::INCORRECT_QUERY, throw Exception(ErrorCodes::INCORRECT_QUERY,
"{} UUID specified, but engine of database {} is not Atomic", kind, create.database); "{} UUID specified, but engine of database {} is not Atomic", kind, create.getDatabase());
/// Ignore UUID if it's ON CLUSTER query /// Ignore UUID if it's ON CLUSTER query
create.uuid = UUIDHelpers::Nil; create.uuid = UUIDHelpers::Nil;
@ -835,12 +835,12 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data
BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
{ {
/// Temporary tables are created out of databases. /// Temporary tables are created out of databases.
if (create.temporary && !create.database.empty()) if (create.temporary && create.database)
throw Exception("Temporary tables cannot be inside a database. You should not specify a database for a temporary table.", throw Exception("Temporary tables cannot be inside a database. You should not specify a database for a temporary table.",
ErrorCodes::BAD_DATABASE_FOR_TEMPORARY_TABLE); ErrorCodes::BAD_DATABASE_FOR_TEMPORARY_TABLE);
String current_database = getContext()->getCurrentDatabase(); String current_database = getContext()->getCurrentDatabase();
auto database_name = create.database.empty() ? current_database : create.database; auto database_name = create.database ? create.getDatabase() : current_database;
// If this is a stub ATTACH query, read the query definition from the database // If this is a stub ATTACH query, read the query definition from the database
if (create.attach && !create.storage && !create.columns_list) if (create.attach && !create.storage && !create.columns_list)
@ -849,12 +849,12 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
if (database->getEngineName() == "Replicated") if (database->getEngineName() == "Replicated")
{ {
auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.table); auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.getTable());
if (auto* ptr = typeid_cast<DatabaseReplicated *>(database.get()); if (auto* ptr = typeid_cast<DatabaseReplicated *>(database.get());
ptr && !getContext()->getClientInfo().is_replicated_database_internal) ptr && !getContext()->getClientInfo().is_replicated_database_internal)
{ {
create.database = database_name; create.setDatabase(database_name);
guard->releaseTableLock(); guard->releaseTableLock();
return ptr->tryEnqueueReplicatedDDL(query_ptr, getContext()); return ptr->tryEnqueueReplicatedDDL(query_ptr, getContext());
} }
@ -863,18 +863,18 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
bool if_not_exists = create.if_not_exists; bool if_not_exists = create.if_not_exists;
// Table SQL definition is available even if the table is detached (even permanently) // Table SQL definition is available even if the table is detached (even permanently)
auto query = database->getCreateTableQuery(create.table, getContext()); auto query = database->getCreateTableQuery(create.getTable(), getContext());
auto create_query = query->as<ASTCreateQuery &>(); auto create_query = query->as<ASTCreateQuery &>();
if (!create.is_dictionary && create_query.is_dictionary) if (!create.is_dictionary && create_query.is_dictionary)
throw Exception(ErrorCodes::INCORRECT_QUERY, throw Exception(ErrorCodes::INCORRECT_QUERY,
"Cannot ATTACH TABLE {}.{}, it is a Dictionary", "Cannot ATTACH TABLE {}.{}, it is a Dictionary",
backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable()));
if (create.is_dictionary && !create_query.is_dictionary) if (create.is_dictionary && !create_query.is_dictionary)
throw Exception(ErrorCodes::INCORRECT_QUERY, throw Exception(ErrorCodes::INCORRECT_QUERY,
"Cannot ATTACH DICTIONARY {}.{}, it is a Table", "Cannot ATTACH DICTIONARY {}.{}, it is a Table",
backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable()));
create = create_query; // Copy the saved create query, but use ATTACH instead of CREATE create = create_query; // Copy the saved create query, but use ATTACH instead of CREATE
@ -917,11 +917,11 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
"use either ATTACH TABLE {}; to attach existing table " "use either ATTACH TABLE {}; to attach existing table "
"or CREATE TABLE {} <table definition>; to create new table " "or CREATE TABLE {} <table definition>; to create new table "
"or ATTACH TABLE {} FROM '/path/to/data/' <table definition>; to create new table and attach data.", "or ATTACH TABLE {} FROM '/path/to/data/' <table definition>; to create new table and attach data.",
create.table, create.table, create.table); create.getTable(), create.getTable(), create.getTable());
} }
if (!create.temporary && create.database.empty()) if (!create.temporary && !create.database)
create.database = current_database; create.setDatabase(current_database);
if (create.to_table_id && create.to_table_id.database_name.empty()) if (create.to_table_id && create.to_table_id.database_name.empty())
create.to_table_id.database_name = current_database; create.to_table_id.database_name = current_database;
@ -949,7 +949,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
if (need_add_to_database && database->getEngineName() == "Replicated") if (need_add_to_database && database->getEngineName() == "Replicated")
{ {
auto guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); auto guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable());
if (auto * ptr = typeid_cast<DatabaseReplicated *>(database.get()); if (auto * ptr = typeid_cast<DatabaseReplicated *>(database.get());
ptr && !getContext()->getClientInfo().is_replicated_database_internal) ptr && !getContext()->getClientInfo().is_replicated_database_internal)
@ -972,7 +972,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
/// If table has dependencies - add them to the graph /// If table has dependencies - add them to the graph
TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext()->getGlobalContext(), query_ptr); TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext()->getGlobalContext(), query_ptr);
if (!loading_dependencies.empty()) if (!loading_dependencies.empty())
DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{database_name, create.table}, std::move(loading_dependencies)); DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{database_name, create.getTable()}, std::move(loading_dependencies));
return fillTableIfNeeded(create); return fillTableIfNeeded(create);
} }
@ -991,16 +991,16 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
/** If the request specifies IF NOT EXISTS, we allow concurrent CREATE queries (which do nothing). /** If the request specifies IF NOT EXISTS, we allow concurrent CREATE queries (which do nothing).
* If table doesn't exist, one thread is creating table, while others wait in DDLGuard. * If table doesn't exist, one thread is creating table, while others wait in DDLGuard.
*/ */
guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable());
database = DatabaseCatalog::instance().getDatabase(create.database); database = DatabaseCatalog::instance().getDatabase(create.getDatabase());
assertOrSetUUID(create, database); assertOrSetUUID(create, database);
String storage_name = create.is_dictionary ? "Dictionary" : "Table"; String storage_name = create.is_dictionary ? "Dictionary" : "Table";
auto storage_already_exists_error_code = create.is_dictionary ? ErrorCodes::DICTIONARY_ALREADY_EXISTS : ErrorCodes::TABLE_ALREADY_EXISTS; auto storage_already_exists_error_code = create.is_dictionary ? ErrorCodes::DICTIONARY_ALREADY_EXISTS : ErrorCodes::TABLE_ALREADY_EXISTS;
/// Table can be created before or it can be created concurrently in another thread, while we were waiting in DDLGuard. /// Table can be created before or it can be created concurrently in another thread, while we were waiting in DDLGuard.
if (database->isTableExist(create.table, getContext())) if (database->isTableExist(create.getTable(), getContext()))
{ {
/// TODO Check structure of table /// TODO Check structure of table
if (create.if_not_exists) if (create.if_not_exists)
@ -1009,8 +1009,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
{ {
/// when executing CREATE OR REPLACE VIEW, drop current existing view /// when executing CREATE OR REPLACE VIEW, drop current existing view
auto drop_ast = std::make_shared<ASTDropQuery>(); auto drop_ast = std::make_shared<ASTDropQuery>();
drop_ast->database = create.database; drop_ast->setDatabase(create.getDatabase());
drop_ast->table = create.table; drop_ast->setTable(create.getTable());
drop_ast->no_ddl_lock = true; drop_ast->no_ddl_lock = true;
auto drop_context = Context::createCopy(context); auto drop_context = Context::createCopy(context);
@ -1019,7 +1019,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
} }
else else
throw Exception(storage_already_exists_error_code, throw Exception(storage_already_exists_error_code,
"{} {}.{} already exists", storage_name, backQuoteIfNeed(create.database), backQuoteIfNeed(create.table)); "{} {}.{} already exists", storage_name, backQuoteIfNeed(create.getDatabase()), backQuoteIfNeed(create.getTable()));
} }
data_path = database->getTableDataPath(create); data_path = database->getTableDataPath(create);
@ -1030,10 +1030,10 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
} }
else else
{ {
if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.table}, Context::ResolveExternal)) if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.getTable()}, Context::ResolveExternal))
return false; return false;
String temporary_table_name = create.table; String temporary_table_name = create.getTable();
auto temporary_table = TemporaryTableHolder(getContext(), properties.columns, properties.constraints, query_ptr); auto temporary_table = TemporaryTableHolder(getContext(), properties.columns, properties.constraints, query_ptr);
getContext()->getSessionContext()->addExternalTable(temporary_table_name, std::move(temporary_table)); getContext()->getSessionContext()->addExternalTable(temporary_table_name, std::move(temporary_table));
return true; return true;
@ -1070,8 +1070,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
{ {
const auto & factory = TableFunctionFactory::instance(); const auto & factory = TableFunctionFactory::instance();
auto table_func = factory.get(create.as_table_function, getContext()); auto table_func = factory.get(create.as_table_function, getContext());
res = table_func->execute(create.as_table_function, getContext(), create.table, properties.columns); res = table_func->execute(create.as_table_function, getContext(), create.getTable(), properties.columns);
res->renameInMemory({create.database, create.table, create.uuid}); res->renameInMemory({create.getDatabase(), create.getTable(), create.uuid});
} }
else else
{ {
@ -1089,12 +1089,12 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
"ATTACH ... FROM ... query is not supported for {} table engine, " "ATTACH ... FROM ... query is not supported for {} table engine, "
"because such tables do not store any data on disk. Use CREATE instead.", res->getName()); "because such tables do not store any data on disk. Use CREATE instead.", res->getName());
database->createTable(getContext(), create.table, res, query_ptr); database->createTable(getContext(), create.getTable(), res, query_ptr);
/// Move table data to the proper place. Wo do not move data earlier to avoid situations /// Move table data to the proper place. Wo do not move data earlier to avoid situations
/// when data directory moved, but table has not been created due to some error. /// when data directory moved, but table has not been created due to some error.
if (from_path) if (from_path)
res->rename(actual_data_path, {create.database, create.table, create.uuid}); res->rename(actual_data_path, {create.getDatabase(), create.getTable(), create.uuid});
/// We must call "startup" and "shutdown" while holding DDLGuard. /// We must call "startup" and "shutdown" while holding DDLGuard.
/// Because otherwise method "shutdown" (from InterpreterDropQuery) can be called before startup /// Because otherwise method "shutdown" (from InterpreterDropQuery) can be called before startup
@ -1142,30 +1142,30 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create,
}; };
auto ast_drop = std::make_shared<ASTDropQuery>(); auto ast_drop = std::make_shared<ASTDropQuery>();
String table_to_replace_name = create.table; String table_to_replace_name = create.getTable();
{ {
auto database = DatabaseCatalog::instance().getDatabase(create.database); auto database = DatabaseCatalog::instance().getDatabase(create.getDatabase());
if (database->getUUID() == UUIDHelpers::Nil) if (database->getUUID() == UUIDHelpers::Nil)
throw Exception(ErrorCodes::INCORRECT_QUERY, throw Exception(ErrorCodes::INCORRECT_QUERY,
"{} query is supported only for Atomic databases", "{} query is supported only for Atomic databases",
create.create_or_replace ? "CREATE OR REPLACE TABLE" : "REPLACE TABLE"); create.create_or_replace ? "CREATE OR REPLACE TABLE" : "REPLACE TABLE");
UInt64 name_hash = sipHash64(create.database + create.table); UInt64 name_hash = sipHash64(create.getDatabase() + create.getTable());
UInt16 random_suffix = thread_local_rng(); UInt16 random_suffix = thread_local_rng();
if (auto txn = current_context->getZooKeeperMetadataTransaction()) if (auto txn = current_context->getZooKeeperMetadataTransaction())
{ {
/// Avoid different table name on database replicas /// Avoid different table name on database replicas
random_suffix = sipHash64(txn->getTaskZooKeeperPath()); random_suffix = sipHash64(txn->getTaskZooKeeperPath());
} }
create.table = fmt::format("_tmp_replace_{}_{}", create.setTable(fmt::format("_tmp_replace_{}_{}",
getHexUIntLowercase(name_hash), getHexUIntLowercase(name_hash),
getHexUIntLowercase(random_suffix)); getHexUIntLowercase(random_suffix)));
ast_drop->table = create.table; ast_drop->setTable(create.getTable());
ast_drop->is_dictionary = create.is_dictionary; ast_drop->is_dictionary = create.is_dictionary;
ast_drop->database = create.database; ast_drop->setDatabase(create.getDatabase());
ast_drop->kind = ASTDropQuery::Drop; ast_drop->kind = ASTDropQuery::Drop;
} }
@ -1186,8 +1186,8 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create,
auto ast_rename = std::make_shared<ASTRenameQuery>(); auto ast_rename = std::make_shared<ASTRenameQuery>();
ASTRenameQuery::Element elem ASTRenameQuery::Element elem
{ {
ASTRenameQuery::Table{create.database, create.table}, ASTRenameQuery::Table{create.getDatabase(), create.getTable()},
ASTRenameQuery::Table{create.database, table_to_replace_name} ASTRenameQuery::Table{create.getDatabase(), table_to_replace_name}
}; };
ast_rename->elements.push_back(std::move(elem)); ast_rename->elements.push_back(std::move(elem));
@ -1217,7 +1217,7 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create,
InterpreterDropQuery(ast_drop, drop_context).execute(); InterpreterDropQuery(ast_drop, drop_context).execute();
} }
create.table = table_to_replace_name; create.setTable(table_to_replace_name);
return {}; return {};
} }
@ -1240,7 +1240,7 @@ BlockIO InterpreterCreateQuery::fillTableIfNeeded(const ASTCreateQuery & create)
&& !create.is_ordinary_view && !create.is_live_view && (!create.is_materialized_view || create.is_populate)) && !create.is_ordinary_view && !create.is_live_view && (!create.is_materialized_view || create.is_populate))
{ {
auto insert = std::make_shared<ASTInsertQuery>(); auto insert = std::make_shared<ASTInsertQuery>();
insert->table_id = {create.database, create.table, create.uuid}; insert->table_id = {create.getDatabase(), create.getTable(), create.uuid};
insert->select = create.select->clone(); insert->select = create.select->clone();
if (create.temporary && !getContext()->getSessionContext()->hasQueryContext()) if (create.temporary && !getContext()->getSessionContext()->hasQueryContext())
@ -1316,7 +1316,7 @@ BlockIO InterpreterCreateQuery::execute()
ASTQueryWithOutput::resetOutputASTIfExist(create); ASTQueryWithOutput::resetOutputASTIfExist(create);
/// CREATE|ATTACH DATABASE /// CREATE|ATTACH DATABASE
if (!create.database.empty() && create.table.empty()) if (create.database && !create.table)
return createDatabase(create); return createDatabase(create);
else else
return createTable(create); return createTable(create);
@ -1332,21 +1332,21 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const
AccessRightsElements required_access; AccessRightsElements required_access;
const auto & create = query_ptr->as<const ASTCreateQuery &>(); const auto & create = query_ptr->as<const ASTCreateQuery &>();
if (create.table.empty()) if (!create.table)
{ {
required_access.emplace_back(AccessType::CREATE_DATABASE, create.database); required_access.emplace_back(AccessType::CREATE_DATABASE, create.getDatabase());
} }
else if (create.is_dictionary) else if (create.is_dictionary)
{ {
required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.database, create.table); required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.getDatabase(), create.getTable());
} }
else if (create.isView()) else if (create.isView())
{ {
assert(!create.temporary); assert(!create.temporary);
if (create.replace_view) if (create.replace_view)
required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.database, create.table); required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.getDatabase(), create.getTable());
else else
required_access.emplace_back(AccessType::CREATE_VIEW, create.database, create.table); required_access.emplace_back(AccessType::CREATE_VIEW, create.getDatabase(), create.getTable());
} }
else else
{ {
@ -1355,8 +1355,8 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const
else else
{ {
if (create.replace_table) if (create.replace_table)
required_access.emplace_back(AccessType::DROP_TABLE, create.database, create.table); required_access.emplace_back(AccessType::DROP_TABLE, create.getDatabase(), create.getTable());
required_access.emplace_back(AccessType::CREATE_TABLE, create.database, create.table); required_access.emplace_back(AccessType::CREATE_TABLE, create.getDatabase(), create.getTable());
} }
} }

View File

@ -56,9 +56,9 @@ BlockIO InterpreterDropQuery::execute()
if (getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously) if (getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously)
drop.no_delay = true; drop.no_delay = true;
if (!drop.table.empty()) if (drop.table)
return executeToTable(drop); return executeToTable(drop);
else if (!drop.database.empty()) else if (drop.database)
return executeToDatabase(drop); return executeToDatabase(drop);
else else
throw Exception("Nothing to drop, both names are empty", ErrorCodes::LOGICAL_ERROR); throw Exception("Nothing to drop, both names are empty", ErrorCodes::LOGICAL_ERROR);
@ -95,7 +95,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP
if (getContext()->tryResolveStorageID(table_id, Context::ResolveExternal)) if (getContext()->tryResolveStorageID(table_id, Context::ResolveExternal))
return executeToTemporaryTable(table_id.getTableName(), query.kind); return executeToTemporaryTable(table_id.getTableName(), query.kind);
else else
query.database = table_id.database_name = getContext()->getCurrentDatabase(); query.setDatabase(table_id.database_name = getContext()->getCurrentDatabase());
} }
if (query.temporary) if (query.temporary)
@ -130,7 +130,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP
table_id.uuid = database->tryGetTableUUID(table_id.table_name); table_id.uuid = database->tryGetTableUUID(table_id.table_name);
/// Prevents recursive drop from drop database query. The original query must specify a table. /// Prevents recursive drop from drop database query. The original query must specify a table.
bool is_drop_or_detach_database = query_ptr->as<ASTDropQuery>()->table.empty(); bool is_drop_or_detach_database = !query_ptr->as<ASTDropQuery>()->table;
bool is_replicated_ddl_query = typeid_cast<DatabaseReplicated *>(database.get()) && bool is_replicated_ddl_query = typeid_cast<DatabaseReplicated *>(database.get()) &&
!getContext()->getClientInfo().is_replicated_database_internal && !getContext()->getClientInfo().is_replicated_database_internal &&
!is_drop_or_detach_database; !is_drop_or_detach_database;
@ -302,7 +302,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query)
BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector<UUID> & uuids_to_wait) BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector<UUID> & uuids_to_wait)
{ {
const auto & database_name = query.database; const auto & database_name = query.getDatabase();
auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, "");
database = tryGetDatabase(database_name, query.if_exists); database = tryGetDatabase(database_name, query.if_exists);
@ -336,7 +336,7 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
ASTDropQuery query_for_table; ASTDropQuery query_for_table;
query_for_table.kind = query.kind; query_for_table.kind = query.kind;
query_for_table.if_exists = true; query_for_table.if_exists = true;
query_for_table.database = database_name; query_for_table.setDatabase(database_name);
query_for_table.no_delay = query.no_delay; query_for_table.no_delay = query.no_delay;
/// Flush should not be done if shouldBeEmptyOnDetach() == false, /// Flush should not be done if shouldBeEmptyOnDetach() == false,
@ -351,7 +351,7 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
{ {
DatabasePtr db; DatabasePtr db;
UUID table_to_wait = UUIDHelpers::Nil; UUID table_to_wait = UUIDHelpers::Nil;
query_for_table.table = iterator->name(); query_for_table.setTable(iterator->name());
query_for_table.is_dictionary = iterator->table()->isDictionary(); query_for_table.is_dictionary = iterator->table()->isDictionary();
executeToTableImpl(query_for_table, db, table_to_wait); executeToTableImpl(query_for_table, db, table_to_wait);
uuids_to_wait.push_back(table_to_wait); uuids_to_wait.push_back(table_to_wait);
@ -385,29 +385,29 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co
AccessRightsElements required_access; AccessRightsElements required_access;
const auto & drop = query_ptr->as<const ASTDropQuery &>(); const auto & drop = query_ptr->as<const ASTDropQuery &>();
if (drop.table.empty()) if (!drop.table)
{ {
if (drop.kind == ASTDropQuery::Kind::Detach) if (drop.kind == ASTDropQuery::Kind::Detach)
required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase());
else if (drop.kind == ASTDropQuery::Kind::Drop) else if (drop.kind == ASTDropQuery::Kind::Drop)
required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase());
} }
else if (drop.is_dictionary) else if (drop.is_dictionary)
{ {
if (drop.kind == ASTDropQuery::Kind::Detach) if (drop.kind == ASTDropQuery::Kind::Detach)
required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable());
else if (drop.kind == ASTDropQuery::Kind::Drop) else if (drop.kind == ASTDropQuery::Kind::Drop)
required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable());
} }
else if (!drop.temporary) else if (!drop.temporary)
{ {
/// It can be view or table. /// It can be view or table.
if (drop.kind == ASTDropQuery::Kind::Drop) if (drop.kind == ASTDropQuery::Kind::Drop)
required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable());
else if (drop.kind == ASTDropQuery::Kind::Truncate) else if (drop.kind == ASTDropQuery::Kind::Truncate)
required_access.emplace_back(AccessType::TRUNCATE, drop.database, drop.table); required_access.emplace_back(AccessType::TRUNCATE, drop.getDatabase(), drop.getTable());
else if (drop.kind == ASTDropQuery::Kind::Detach) else if (drop.kind == ASTDropQuery::Kind::Detach)
required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable());
} }
return required_access; return required_access;
@ -424,8 +424,8 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr
{ {
/// We create and execute `drop` query for internal table. /// We create and execute `drop` query for internal table.
auto drop_query = std::make_shared<ASTDropQuery>(); auto drop_query = std::make_shared<ASTDropQuery>();
drop_query->database = target_table_id.database_name; drop_query->setDatabase(target_table_id.database_name);
drop_query->table = target_table_id.table_name; drop_query->setTable(target_table_id.table_name);
drop_query->kind = kind; drop_query->kind = kind;
drop_query->no_delay = no_delay; drop_query->no_delay = no_delay;
drop_query->if_exists = true; drop_query->if_exists = true;

View File

@ -44,25 +44,25 @@ QueryPipeline InterpreterExistsQuery::executeImpl()
if (exists_query->temporary) if (exists_query->temporary)
{ {
result = static_cast<bool>(getContext()->tryResolveStorageID( result = static_cast<bool>(getContext()->tryResolveStorageID(
{"", exists_query->table}, Context::ResolveExternal)); {"", exists_query->getTable()}, Context::ResolveExternal));
} }
else else
{ {
String database = getContext()->resolveDatabase(exists_query->database); String database = getContext()->resolveDatabase(exists_query->getDatabase());
getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable());
result = DatabaseCatalog::instance().isTableExist({database, exists_query->table}, getContext()); result = DatabaseCatalog::instance().isTableExist({database, exists_query->getTable()}, getContext());
} }
} }
else if ((exists_query = query_ptr->as<ASTExistsViewQuery>())) else if ((exists_query = query_ptr->as<ASTExistsViewQuery>()))
{ {
String database = getContext()->resolveDatabase(exists_query->database); String database = getContext()->resolveDatabase(exists_query->getDatabase());
getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable());
auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->table}, getContext()); auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->getTable()}, getContext());
result = table && table->isView(); result = table && table->isView();
} }
else if ((exists_query = query_ptr->as<ASTExistsDatabaseQuery>())) else if ((exists_query = query_ptr->as<ASTExistsDatabaseQuery>()))
{ {
String database = getContext()->resolveDatabase(exists_query->database); String database = getContext()->resolveDatabase(exists_query->getDatabase());
getContext()->checkAccess(AccessType::SHOW_DATABASES, database); getContext()->checkAccess(AccessType::SHOW_DATABASES, database);
result = DatabaseCatalog::instance().isDatabaseExist(database); result = DatabaseCatalog::instance().isDatabaseExist(database);
} }
@ -70,9 +70,9 @@ QueryPipeline InterpreterExistsQuery::executeImpl()
{ {
if (exists_query->temporary) if (exists_query->temporary)
throw Exception("Temporary dictionaries are not possible.", ErrorCodes::SYNTAX_ERROR); throw Exception("Temporary dictionaries are not possible.", ErrorCodes::SYNTAX_ERROR);
String database = getContext()->resolveDatabase(exists_query->database); String database = getContext()->resolveDatabase(exists_query->getDatabase());
getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->table); getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->getTable());
result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->table}); result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->getTable()});
} }
return QueryPipeline(std::make_shared<SourceFromSingleChunk>(Block{{ return QueryPipeline(std::make_shared<SourceFromSingleChunk>(Block{{

View File

@ -62,7 +62,18 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query)
return table_function_ptr->execute(query.table_function, getContext(), table_function_ptr->getName()); return table_function_ptr->execute(query.table_function, getContext(), table_function_ptr->getName());
} }
query.table_id = getContext()->resolveStorageID(query.table_id); if (query.table_id)
{
query.table_id = getContext()->resolveStorageID(query.table_id);
}
else
{
/// Insert query parser does not fill table_id because table and
/// database can be parameters and be filled after parsing.
StorageID local_table_id(query.getDatabase(), query.getTable());
query.table_id = getContext()->resolveStorageID(local_table_id);
}
return DatabaseCatalog::instance().getTable(query.table_id, getContext()); return DatabaseCatalog::instance().getTable(query.table_id, getContext());
} }

View File

@ -79,7 +79,7 @@ AccessRightsElements InterpreterOptimizeQuery::getRequiredAccess() const
{ {
const auto & optimize = query_ptr->as<const ASTOptimizeQuery &>(); const auto & optimize = query_ptr->as<const ASTOptimizeQuery &>();
AccessRightsElements required_access; AccessRightsElements required_access;
required_access.emplace_back(AccessType::OPTIMIZE, optimize.database, optimize.table); required_access.emplace_back(AccessType::OPTIMIZE, optimize.getDatabase(), optimize.getTable());
return required_access; return required_access;
} }

View File

@ -64,26 +64,26 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl()
{ {
if (!ast_create_query.isView()) if (!ast_create_query.isView())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a VIEW", throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a VIEW",
backQuote(ast_create_query.database), backQuote(ast_create_query.table)); backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable()));
} }
else if (is_dictionary) else if (is_dictionary)
{ {
if (!ast_create_query.is_dictionary) if (!ast_create_query.is_dictionary)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a DICTIONARY", throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a DICTIONARY",
backQuote(ast_create_query.database), backQuote(ast_create_query.table)); backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable()));
} }
} }
else if ((show_query = query_ptr->as<ASTShowCreateDatabaseQuery>())) else if ((show_query = query_ptr->as<ASTShowCreateDatabaseQuery>()))
{ {
if (show_query->temporary) if (show_query->temporary)
throw Exception("Temporary databases are not possible.", ErrorCodes::SYNTAX_ERROR); throw Exception("Temporary databases are not possible.", ErrorCodes::SYNTAX_ERROR);
show_query->database = getContext()->resolveDatabase(show_query->database); show_query->setDatabase(getContext()->resolveDatabase(show_query->getDatabase()));
getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->database); getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->getDatabase());
create_query = DatabaseCatalog::instance().getDatabase(show_query->database)->getCreateDatabaseQuery(); create_query = DatabaseCatalog::instance().getDatabase(show_query->getDatabase())->getCreateDatabaseQuery();
} }
if (!create_query) if (!create_query)
throw Exception("Unable to show the create query of " + show_query->table + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); throw Exception("Unable to show the create query of " + show_query->getTable() + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY);
if (!getContext()->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) if (!getContext()->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil)
{ {

View File

@ -217,12 +217,12 @@ BlockIO InterpreterSystemQuery::execute()
/// Make canonical query for simpler processing /// Make canonical query for simpler processing
if (query.type == Type::RELOAD_DICTIONARY) if (query.type == Type::RELOAD_DICTIONARY)
{ {
if (!query.database.empty()) if (query.database)
query.table = query.database + "." + query.table; query.setTable(query.getDatabase() + "." + query.getTable());
} }
else if (!query.table.empty()) else if (query.table)
{ {
table_id = getContext()->resolveStorageID(StorageID(query.database, query.table), Context::ResolveOrdinary); table_id = getContext()->resolveStorageID(StorageID(query.getDatabase(), query.getTable()), Context::ResolveOrdinary);
} }
@ -302,7 +302,7 @@ BlockIO InterpreterSystemQuery::execute()
getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY);
auto & external_dictionaries_loader = system_context->getExternalDictionariesLoader(); auto & external_dictionaries_loader = system_context->getExternalDictionariesLoader();
external_dictionaries_loader.reloadDictionary(query.table, getContext()); external_dictionaries_loader.reloadDictionary(query.getTable(), getContext());
ExternalDictionariesLoader::resetAll(); ExternalDictionariesLoader::resetAll();
break; break;
@ -594,10 +594,10 @@ void InterpreterSystemQuery::dropReplica(ASTSystemQuery & query)
if (!dropReplicaImpl(query, table)) if (!dropReplicaImpl(query, table))
throw Exception(ErrorCodes::BAD_ARGUMENTS, table_is_not_replicated.data(), table_id.getNameForLogs()); throw Exception(ErrorCodes::BAD_ARGUMENTS, table_is_not_replicated.data(), table_id.getNameForLogs());
} }
else if (!query.database.empty()) else if (query.database)
{ {
getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.database); getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase());
DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.database); DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.getDatabase());
for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next()) for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next())
dropReplicaImpl(query, iterator->table()); dropReplicaImpl(query, iterator->table());
LOG_TRACE(log, "Dropped replica {} from database {}", query.replica, backQuoteIfNeed(database->getDatabaseName())); LOG_TRACE(log, "Dropped replica {} from database {}", query.replica, backQuoteIfNeed(database->getDatabaseName()));
@ -790,84 +790,84 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
case Type::STOP_MERGES: [[fallthrough]]; case Type::STOP_MERGES: [[fallthrough]];
case Type::START_MERGES: case Type::START_MERGES:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_MERGES); required_access.emplace_back(AccessType::SYSTEM_MERGES);
else else
required_access.emplace_back(AccessType::SYSTEM_MERGES, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_MERGES, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_TTL_MERGES: [[fallthrough]]; case Type::STOP_TTL_MERGES: [[fallthrough]];
case Type::START_TTL_MERGES: case Type::START_TTL_MERGES:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES); required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES);
else else
required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_MOVES: [[fallthrough]]; case Type::STOP_MOVES: [[fallthrough]];
case Type::START_MOVES: case Type::START_MOVES:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_MOVES); required_access.emplace_back(AccessType::SYSTEM_MOVES);
else else
required_access.emplace_back(AccessType::SYSTEM_MOVES, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_MOVES, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_FETCHES: [[fallthrough]]; case Type::STOP_FETCHES: [[fallthrough]];
case Type::START_FETCHES: case Type::START_FETCHES:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_FETCHES); required_access.emplace_back(AccessType::SYSTEM_FETCHES);
else else
required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]]; case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]];
case Type::START_DISTRIBUTED_SENDS: case Type::START_DISTRIBUTED_SENDS:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS); required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS);
else else
required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_REPLICATED_SENDS: [[fallthrough]]; case Type::STOP_REPLICATED_SENDS: [[fallthrough]];
case Type::START_REPLICATED_SENDS: case Type::START_REPLICATED_SENDS:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS); required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS);
else else
required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.getDatabase(), query.getTable());
break; break;
} }
case Type::STOP_REPLICATION_QUEUES: [[fallthrough]]; case Type::STOP_REPLICATION_QUEUES: [[fallthrough]];
case Type::START_REPLICATION_QUEUES: case Type::START_REPLICATION_QUEUES:
{ {
if (query.table.empty()) if (!query.table)
required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES); required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES);
else else
required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.getDatabase(), query.getTable());
break; break;
} }
case Type::DROP_REPLICA: case Type::DROP_REPLICA:
{ {
required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase(), query.getTable());
break; break;
} }
case Type::RESTORE_REPLICA: case Type::RESTORE_REPLICA:
{ {
required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.getDatabase(), query.getTable());
break; break;
} }
case Type::SYNC_REPLICA: case Type::SYNC_REPLICA:
{ {
required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.getDatabase(), query.getTable());
break; break;
} }
case Type::RESTART_REPLICA: case Type::RESTART_REPLICA:
{ {
required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.getDatabase(), query.getTable());
break; break;
} }
case Type::RESTART_REPLICAS: case Type::RESTART_REPLICAS:
@ -877,7 +877,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
} }
case Type::FLUSH_DISTRIBUTED: case Type::FLUSH_DISTRIBUTED:
{ {
required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.database, query.table); required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.getDatabase(), query.getTable());
break; break;
} }
case Type::FLUSH_LOGS: case Type::FLUSH_LOGS:

View File

@ -421,23 +421,42 @@ static ASTPtr getOrderByPolicy(
void InterpreterCreateImpl::validate(const InterpreterCreateImpl::TQuery & create_query, ContextPtr) void InterpreterCreateImpl::validate(const InterpreterCreateImpl::TQuery & create_query, ContextPtr)
{ {
/// This is dangerous, because the like table may not exists in ClickHouse if (!create_query.like_table)
if (create_query.like_table) {
throw Exception("Cannot convert create like statement to ClickHouse SQL", ErrorCodes::NOT_IMPLEMENTED); bool missing_columns_definition = true;
if (create_query.columns_list)
const auto & create_defines = create_query.columns_list->as<MySQLParser::ASTCreateDefines>(); {
const auto & create_defines = create_query.columns_list->as<MySQLParser::ASTCreateDefines>();
if (!create_defines || !create_defines->columns || create_defines->columns->children.empty()) if (create_defines && create_defines->columns && !create_defines->columns->children.empty())
throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); missing_columns_definition = false;
}
if (missing_columns_definition)
throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
}
} }
ASTs InterpreterCreateImpl::getRewrittenQueries( ASTs InterpreterCreateImpl::getRewrittenQueries(
const TQuery & create_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) const TQuery & create_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database)
{ {
auto rewritten_query = std::make_shared<ASTCreateQuery>();
if (resolveDatabase(create_query.database, mysql_database, mapped_to_database, context) != mapped_to_database) if (resolveDatabase(create_query.database, mysql_database, mapped_to_database, context) != mapped_to_database)
return {}; return {};
if (create_query.like_table)
{
auto * table_like = create_query.like_table->as<ASTTableIdentifier>();
if (table_like->compound() && table_like->getTableId().database_name != mysql_database)
return {};
String table_name = table_like->shortName();
ASTPtr rewritten_create_ast = DatabaseCatalog::instance().getDatabase(mapped_to_database)->getCreateTableQuery(table_name, context);
auto * create_ptr = rewritten_create_ast->as<ASTCreateQuery>();
create_ptr->setDatabase(mapped_to_database);
create_ptr->setTable(create_query.table);
create_ptr->uuid = UUIDHelpers::generateV4();
create_ptr->if_not_exists = create_query.if_not_exists;
return ASTs{rewritten_create_ast};
}
auto rewritten_query = std::make_shared<ASTCreateQuery>();
const auto & create_defines = create_query.columns_list->as<MySQLParser::ASTCreateDefines>(); const auto & create_defines = create_query.columns_list->as<MySQLParser::ASTCreateDefines>();
NamesAndTypesList columns_name_and_type = getColumnsList(create_defines->columns); NamesAndTypesList columns_name_and_type = getColumnsList(create_defines->columns);
@ -494,8 +513,8 @@ ASTs InterpreterCreateImpl::getRewrittenQueries(
storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared<ASTIdentifier>(version_column_name))); storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared<ASTIdentifier>(version_column_name)));
rewritten_query->database = mapped_to_database; rewritten_query->setDatabase(mapped_to_database);
rewritten_query->table = create_query.table; rewritten_query->setTable(create_query.table);
rewritten_query->if_not_exists = create_query.if_not_exists; rewritten_query->if_not_exists = create_query.if_not_exists;
rewritten_query->set(rewritten_query->storage, storage); rewritten_query->set(rewritten_query->storage, storage);
rewritten_query->set(rewritten_query->columns_list, columns); rewritten_query->set(rewritten_query->columns_list, columns);
@ -510,14 +529,14 @@ void InterpreterDropImpl::validate(const InterpreterDropImpl::TQuery & /*query*/
ASTs InterpreterDropImpl::getRewrittenQueries( ASTs InterpreterDropImpl::getRewrittenQueries(
const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database)
{ {
const auto & database_name = resolveDatabase(drop_query.database, mysql_database, mapped_to_database, context); const auto & database_name = resolveDatabase(drop_query.getDatabase(), mysql_database, mapped_to_database, context);
/// Skip drop database|view|dictionary /// Skip drop database|view|dictionary
if (database_name != mapped_to_database || drop_query.table.empty() || drop_query.is_view || drop_query.is_dictionary) if (database_name != mapped_to_database || !drop_query.table || drop_query.is_view || drop_query.is_dictionary)
return {}; return {};
ASTPtr rewritten_query = drop_query.clone(); ASTPtr rewritten_query = drop_query.clone();
rewritten_query->as<ASTDropQuery>()->database = mapped_to_database; rewritten_query->as<ASTDropQuery>()->setDatabase(mapped_to_database);
return ASTs{rewritten_query}; return ASTs{rewritten_query};
} }
@ -569,8 +588,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries(
auto rewritten_alter_query = std::make_shared<ASTAlterQuery>(); auto rewritten_alter_query = std::make_shared<ASTAlterQuery>();
auto rewritten_rename_query = std::make_shared<ASTRenameQuery>(); auto rewritten_rename_query = std::make_shared<ASTRenameQuery>();
rewritten_alter_query->database = mapped_to_database; rewritten_alter_query->setDatabase(mapped_to_database);
rewritten_alter_query->table = alter_query.table; rewritten_alter_query->setTable(alter_query.table);
rewritten_alter_query->alter_object = ASTAlterQuery::AlterObjectType::TABLE; rewritten_alter_query->alter_object = ASTAlterQuery::AlterObjectType::TABLE;
rewritten_alter_query->set(rewritten_alter_query->command_list, std::make_shared<ASTExpressionList>()); rewritten_alter_query->set(rewritten_alter_query->command_list, std::make_shared<ASTExpressionList>());

View File

@ -18,8 +18,8 @@ namespace ErrorCodes
StorageID::StorageID(const ASTQueryWithTableAndOutput & query) StorageID::StorageID(const ASTQueryWithTableAndOutput & query)
{ {
database_name = query.database; database_name = query.getDatabase();
table_name = query.table; table_name = query.getTable();
uuid = query.uuid; uuid = query.uuid;
assertNotEmpty(); assertNotEmpty();
} }

View File

@ -595,8 +595,8 @@ ASTPtr SystemLog<LogElement>::getCreateTableQuery()
{ {
auto create = std::make_shared<ASTCreateQuery>(); auto create = std::make_shared<ASTCreateQuery>();
create->database = table_id.database_name; create->setDatabase(table_id.database_name);
create->table = table_id.table_name; create->setTable(table_id.table_name);
auto ordinary_columns = LogElement::getNamesAndTypes(); auto ordinary_columns = LogElement::getNamesAndTypes();
auto alias_columns = LogElement::getNamesAndAliases(); auto alias_columns = LogElement::getNamesAndAliases();

View File

@ -431,12 +431,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
InterpreterSetQuery(query_with_output->settings_ast, context).executeForCurrentContext(); InterpreterSetQuery(query_with_output->settings_ast, context).executeForCurrentContext();
} }
if (const auto * query_with_table_output = dynamic_cast<const ASTQueryWithTableAndOutput *>(ast.get()))
{
query_database = query_with_table_output->database;
query_table = query_with_table_output->table;
}
if (auto * create_query = ast->as<ASTCreateQuery>()) if (auto * create_query = ast->as<ASTCreateQuery>())
{ {
if (create_query->select) if (create_query->select)
@ -510,6 +504,12 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
query = serializeAST(*ast); query = serializeAST(*ast);
} }
if (const auto * query_with_table_output = dynamic_cast<const ASTQueryWithTableAndOutput *>(ast.get()))
{
query_database = query_with_table_output->getDatabase();
query_table = query_with_table_output->getTable();
}
/// MUST go before any modification (except for prepared statements, /// MUST go before any modification (except for prepared statements,
/// since it substitute parameters and w/o them query does not contain /// since it substitute parameters and w/o them query does not contain
/// parameters), to keep query as-is in query_log and server log. /// parameters), to keep query as-is in query_log and server log.

View File

@ -39,7 +39,7 @@ static void executeCreateQuery(
parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context->getSettingsRef().max_parser_depth); parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context->getSettingsRef().max_parser_depth);
auto & ast_create_query = ast->as<ASTCreateQuery &>(); auto & ast_create_query = ast->as<ASTCreateQuery &>();
ast_create_query.database = database; ast_create_query.setDatabase(database);
InterpreterCreateQuery interpreter(ast, context); InterpreterCreateQuery interpreter(ast, context);
interpreter.setInternal(true); interpreter.setInternal(true);

View File

@ -485,7 +485,7 @@ bool ASTAlterQuery::isDropPartitionAlter() const
/** Get the text that identifies this element. */ /** Get the text that identifies this element. */
String ASTAlterQuery::getID(char delim) const String ASTAlterQuery::getID(char delim) const
{ {
return "AlterQuery" + (delim + database) + delim + table; return "AlterQuery" + (delim + getDatabase()) + delim + getTable();
} }
ASTPtr ASTAlterQuery::clone() const ASTPtr ASTAlterQuery::clone() const
@ -523,18 +523,18 @@ void ASTAlterQuery::formatQueryImpl(const FormatSettings & settings, FormatState
settings.ostr << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_none : "");
if (!table.empty()) if (table)
{ {
if (!database.empty()) if (database)
{ {
settings.ostr << indent_str << backQuoteIfNeed(database); settings.ostr << indent_str << backQuoteIfNeed(getDatabase());
settings.ostr << "."; settings.ostr << ".";
} }
settings.ostr << indent_str << backQuoteIfNeed(table); settings.ostr << indent_str << backQuoteIfNeed(getTable());
} }
else if (alter_object == AlterObjectType::DATABASE && !database.empty()) else if (alter_object == AlterObjectType::DATABASE && database)
{ {
settings.ostr << indent_str << backQuoteIfNeed(database); settings.ostr << indent_str << backQuoteIfNeed(getDatabase());
} }
formatOnCluster(settings); formatOnCluster(settings);

View File

@ -12,13 +12,14 @@ struct ASTCheckQuery : public ASTQueryWithTableAndOutput
ASTPtr partition; ASTPtr partition;
/** Get the text that identifies this element. */ /** Get the text that identifies this element. */
String getID(char delim) const override { return "CheckQuery" + (delim + database) + delim + table; } String getID(char delim) const override { return "CheckQuery" + (delim + getDatabase()) + delim + getTable(); }
ASTPtr clone() const override ASTPtr clone() const override
{ {
auto res = std::make_shared<ASTCheckQuery>(*this); auto res = std::make_shared<ASTCheckQuery>(*this);
res->children.clear(); res->children.clear();
cloneOutputOptions(*res); cloneOutputOptions(*res);
cloneTableOptions(*res);
return res; return res;
} }
@ -32,14 +33,14 @@ protected:
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "CHECK TABLE " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "CHECK TABLE " << (settings.hilite ? hilite_none : "");
if (!table.empty()) if (table)
{ {
if (!database.empty()) if (database)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(database) << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : "");
settings.ostr << "."; settings.ostr << ".";
} }
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(table) << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getTable()) << (settings.hilite ? hilite_none : "");
} }
if (partition) if (partition)

View File

@ -212,6 +212,7 @@ ASTPtr ASTCreateQuery::clone() const
res->set(res->comment, comment->clone()); res->set(res->comment, comment->clone());
cloneOutputOptions(*res); cloneOutputOptions(*res);
cloneTableOptions(*res);
return res; return res;
} }
@ -220,13 +221,13 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
{ {
frame.need_parens = false; frame.need_parens = false;
if (!database.empty() && table.empty()) if (database && !table)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") settings.ostr << (settings.hilite ? hilite_keyword : "")
<< (attach ? "ATTACH DATABASE " : "CREATE DATABASE ") << (attach ? "ATTACH DATABASE " : "CREATE DATABASE ")
<< (if_not_exists ? "IF NOT EXISTS " : "") << (if_not_exists ? "IF NOT EXISTS " : "")
<< (settings.hilite ? hilite_none : "") << (settings.hilite ? hilite_none : "")
<< backQuoteIfNeed(database); << backQuoteIfNeed(getDatabase());
if (uuid != UUIDHelpers::Nil) if (uuid != UUIDHelpers::Nil)
{ {
@ -275,7 +276,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
<< what << " " << what << " "
<< (if_not_exists ? "IF NOT EXISTS " : "") << (if_not_exists ? "IF NOT EXISTS " : "")
<< (settings.hilite ? hilite_none : "") << (settings.hilite ? hilite_none : "")
<< (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
if (uuid != UUIDHelpers::Nil) if (uuid != UUIDHelpers::Nil)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "")
@ -316,7 +317,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
/// Always DICTIONARY /// Always DICTIONARY
settings.ostr << (settings.hilite ? hilite_keyword : "") << action << " DICTIONARY " settings.ostr << (settings.hilite ? hilite_keyword : "") << action << " DICTIONARY "
<< (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "")
<< (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
if (uuid != UUIDHelpers::Nil) if (uuid != UUIDHelpers::Nil)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "")
<< quoteString(toString(uuid)); << quoteString(toString(uuid));

View File

@ -91,7 +91,7 @@ public:
bool create_or_replace{false}; bool create_or_replace{false};
/** Get the text that identifies this element. */ /** Get the text that identifies this element. */
String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + database) + delim + table; } String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + getDatabase()) + delim + getTable(); }
ASTPtr clone() const override; ASTPtr clone() const override;

View File

@ -15,11 +15,11 @@ namespace ErrorCodes
String ASTDropQuery::getID(char delim) const String ASTDropQuery::getID(char delim) const
{ {
if (kind == ASTDropQuery::Kind::Drop) if (kind == ASTDropQuery::Kind::Drop)
return "DropQuery" + (delim + database) + delim + table; return "DropQuery" + (delim + getDatabase()) + delim + getTable();
else if (kind == ASTDropQuery::Kind::Detach) else if (kind == ASTDropQuery::Kind::Detach)
return "DetachQuery" + (delim + database) + delim + table; return "DetachQuery" + (delim + getDatabase()) + delim + getTable();
else if (kind == ASTDropQuery::Kind::Truncate) else if (kind == ASTDropQuery::Kind::Truncate)
return "TruncateQuery" + (delim + database) + delim + table; return "TruncateQuery" + (delim + getDatabase()) + delim + getTable();
else else
throw Exception("Not supported kind of drop query.", ErrorCodes::SYNTAX_ERROR); throw Exception("Not supported kind of drop query.", ErrorCodes::SYNTAX_ERROR);
} }
@ -28,6 +28,7 @@ ASTPtr ASTDropQuery::clone() const
{ {
auto res = std::make_shared<ASTDropQuery>(*this); auto res = std::make_shared<ASTDropQuery>(*this);
cloneOutputOptions(*res); cloneOutputOptions(*res);
cloneTableOptions(*res);
return res; return res;
} }
@ -46,7 +47,8 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState
if (temporary) if (temporary)
settings.ostr << "TEMPORARY "; settings.ostr << "TEMPORARY ";
if (table.empty() && !database.empty())
if (!table && database)
settings.ostr << "DATABASE "; settings.ostr << "DATABASE ";
else if (is_dictionary) else if (is_dictionary)
settings.ostr << "DICTIONARY "; settings.ostr << "DICTIONARY ";
@ -60,10 +62,10 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState
settings.ostr << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_none : "");
if (table.empty() && !database.empty()) if (!table && database)
settings.ostr << backQuoteIfNeed(database); settings.ostr << backQuoteIfNeed(getDatabase());
else else
settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
formatOnCluster(settings); formatOnCluster(settings);

View File

@ -201,6 +201,40 @@ String ASTTableIdentifier::getDatabaseName() const
else return {}; else return {};
} }
ASTPtr ASTTableIdentifier::getTable() const
{
if (name_parts.size() == 2)
{
if (!name_parts[1].empty())
return std::make_shared<ASTIdentifier>(name_parts[1]);
if (name_parts[0].empty())
return std::make_shared<ASTIdentifier>("", children[1]->clone());
else
return std::make_shared<ASTIdentifier>("", children[0]->clone());
}
else if (name_parts.size() == 1)
{
if (name_parts[0].empty())
return std::make_shared<ASTIdentifier>("", children[0]->clone());
else
return std::make_shared<ASTIdentifier>(name_parts[0]);
}
else return {};
}
ASTPtr ASTTableIdentifier::getDatabase() const
{
if (name_parts.size() == 2)
{
if (name_parts[0].empty())
return std::make_shared<ASTIdentifier>("", children[0]->clone());
else
return std::make_shared<ASTIdentifier>(name_parts[0]);
}
else return {};
}
void ASTTableIdentifier::resetTable(const String & database_name, const String & table_name) void ASTTableIdentifier::resetTable(const String & database_name, const String & table_name)
{ {
auto identifier = std::make_shared<ASTTableIdentifier>(database_name, table_name); auto identifier = std::make_shared<ASTTableIdentifier>(database_name, table_name);

View File

@ -61,6 +61,7 @@ protected:
private: private:
using ASTWithAlias::children; /// ASTIdentifier is child free using ASTWithAlias::children; /// ASTIdentifier is child free
friend class ASTTableIdentifier;
friend class ReplaceQueryParameterVisitor; friend class ReplaceQueryParameterVisitor;
friend struct IdentifierSemantic; friend struct IdentifierSemantic;
friend void setIdentifierSpecial(ASTPtr & ast); friend void setIdentifierSpecial(ASTPtr & ast);
@ -83,6 +84,9 @@ public:
StorageID getTableId() const; StorageID getTableId() const;
String getDatabaseName() const; String getDatabaseName() const;
ASTPtr getTable() const;
ASTPtr getDatabase() const;
// FIXME: used only when it's needed to rewrite distributed table name to real remote table name. // FIXME: used only when it's needed to rewrite distributed table name to real remote table name.
void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this

View File

@ -1,4 +1,5 @@
#include <iomanip> #include <iomanip>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTInsertQuery.h> #include <Parsers/ASTInsertQuery.h>
#include <Parsers/ASTFunction.h> #include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h> #include <Parsers/ASTLiteral.h>
@ -15,6 +16,35 @@ namespace ErrorCodes
extern const int INVALID_USAGE_OF_INPUT; extern const int INVALID_USAGE_OF_INPUT;
} }
String ASTInsertQuery::getDatabase() const
{
String name;
tryGetIdentifierNameInto(database, name);
return name;
}
String ASTInsertQuery::getTable() const
{
String name;
tryGetIdentifierNameInto(table, name);
return name;
}
void ASTInsertQuery::setDatabase(const String & name)
{
if (name.empty())
database.reset();
else
database = std::make_shared<ASTIdentifier>(name);
}
void ASTInsertQuery::setTable(const String & name)
{
if (name.empty())
table.reset();
else
table = std::make_shared<ASTIdentifier>(name);
}
void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{ {
@ -31,9 +61,16 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s
partition_by->formatImpl(settings, state, frame); partition_by->formatImpl(settings, state, frame);
} }
} }
else else if (table_id)
{
settings.ostr << (settings.hilite ? hilite_none : "") settings.ostr << (settings.hilite ? hilite_none : "")
<< (!table_id.database_name.empty() ? backQuoteIfNeed(table_id.database_name) + "." : "") << backQuoteIfNeed(table_id.table_name); << (!table_id.database_name.empty() ? backQuoteIfNeed(table_id.database_name) + "." : "") << backQuoteIfNeed(table_id.table_name);
}
else
{
settings.ostr << (settings.hilite ? hilite_none : "")
<< (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
}
if (columns) if (columns)
{ {

View File

@ -13,6 +13,10 @@ class ASTInsertQuery : public IAST
{ {
public: public:
StorageID table_id = StorageID::createEmpty(); StorageID table_id = StorageID::createEmpty();
ASTPtr database;
ASTPtr table;
ASTPtr columns; ASTPtr columns;
String format; String format;
ASTPtr table_function; ASTPtr table_function;
@ -31,6 +35,12 @@ public:
/// Data from buffer to insert after inlined one - may be nullptr. /// Data from buffer to insert after inlined one - may be nullptr.
ReadBuffer * tail = nullptr; ReadBuffer * tail = nullptr;
String getDatabase() const;
String getTable() const;
void setDatabase(const String & name);
void setTable(const String & name);
bool hasInlinedData() const { return data || tail; } bool hasInlinedData() const { return data || tail; }
/// Try to find table function input() in SELECT part /// Try to find table function input() in SELECT part
@ -44,6 +54,8 @@ public:
auto res = std::make_shared<ASTInsertQuery>(*this); auto res = std::make_shared<ASTInsertQuery>(*this);
res->children.clear(); res->children.clear();
if (database) { res->database = database->clone(); res->children.push_back(res->database); }
if (table) { res->table = table->clone(); res->children.push_back(res->table); }
if (columns) { res->columns = columns->clone(); res->children.push_back(res->columns); } if (columns) { res->columns = columns->clone(); res->children.push_back(res->columns); }
if (select) { res->select = select->clone(); res->children.push_back(res->select); } if (select) { res->select = select->clone(); res->children.push_back(res->select); }
if (watch) { res->watch = watch->clone(); res->children.push_back(res->watch); } if (watch) { res->watch = watch->clone(); res->children.push_back(res->watch); }

View File

@ -8,7 +8,7 @@ namespace DB
void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "") settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "")
<< (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
formatOnCluster(settings); formatOnCluster(settings);

View File

@ -25,7 +25,7 @@ public:
/** Get the text that identifies this element. */ /** Get the text that identifies this element. */
String getID(char delim) const override String getID(char delim) const override
{ {
return "OptimizeQuery" + (delim + database) + delim + table + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); return "OptimizeQuery" + (delim + getDatabase()) + delim + getTable() + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : "");
} }
ASTPtr clone() const override ASTPtr clone() const override

View File

@ -39,8 +39,8 @@ protected:
T & query = static_cast<T &>(*query_ptr); T & query = static_cast<T &>(*query_ptr);
query.cluster.clear(); query.cluster.clear();
if (query.database.empty()) if (!query.database)
query.database = new_database; query.setDatabase(new_database);
return query_ptr; return query_ptr;
} }

View File

@ -1,3 +1,4 @@
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTQueryWithTableAndOutput.h> #include <Parsers/ASTQueryWithTableAndOutput.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <IO/Operators.h> #include <IO/Operators.h>
@ -6,10 +7,67 @@
namespace DB namespace DB
{ {
String ASTQueryWithTableAndOutput::getDatabase() const
{
String name;
tryGetIdentifierNameInto(database, name);
return name;
}
String ASTQueryWithTableAndOutput::getTable() const
{
String name;
tryGetIdentifierNameInto(table, name);
return name;
}
void ASTQueryWithTableAndOutput::setDatabase(const String & name)
{
if (database)
{
std::erase(children, database);
database.reset();
}
if (!name.empty())
{
database = std::make_shared<ASTIdentifier>(name);
children.push_back(database);
}
}
void ASTQueryWithTableAndOutput::setTable(const String & name)
{
if (table)
{
std::erase(children, table);
table.reset();
}
if (!name.empty())
{
table = std::make_shared<ASTIdentifier>(name);
children.push_back(table);
}
}
void ASTQueryWithTableAndOutput::cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const
{
if (database)
{
cloned.database = database->clone();
cloned.children.push_back(cloned.database);
}
if (table)
{
cloned.table = table->clone();
cloned.children.push_back(cloned.table);
}
}
void ASTQueryWithTableAndOutput::formatHelper(const FormatSettings & settings, const char * name) const void ASTQueryWithTableAndOutput::formatHelper(const FormatSettings & settings, const char * name) const
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << name << " " << (settings.hilite ? hilite_none : ""); settings.ostr << (settings.hilite ? hilite_keyword : "") << name << " " << (settings.hilite ? hilite_none : "");
settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
} }
} }

View File

@ -14,11 +14,21 @@ namespace DB
class ASTQueryWithTableAndOutput : public ASTQueryWithOutput class ASTQueryWithTableAndOutput : public ASTQueryWithOutput
{ {
public: public:
String database; ASTPtr database;
String table; ASTPtr table;
UUID uuid = UUIDHelpers::Nil; UUID uuid = UUIDHelpers::Nil;
bool temporary{false}; bool temporary{false};
String getDatabase() const;
String getTable() const;
// Once database or table are set they cannot be assigned with empty value
void setDatabase(const String & name);
void setTable(const String & name);
void cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const;
protected: protected:
void formatHelper(const FormatSettings & settings, const char * name) const; void formatHelper(const FormatSettings & settings, const char * name) const;
}; };
@ -28,13 +38,14 @@ template <typename AstIDAndQueryNames>
class ASTQueryWithTableAndOutputImpl : public ASTQueryWithTableAndOutput class ASTQueryWithTableAndOutputImpl : public ASTQueryWithTableAndOutput
{ {
public: public:
String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + database) + delim + table; } String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + getDatabase()) + delim + getTable(); }
ASTPtr clone() const override ASTPtr clone() const override
{ {
auto res = std::make_shared<ASTQueryWithTableAndOutputImpl<AstIDAndQueryNames>>(*this); auto res = std::make_shared<ASTQueryWithTableAndOutputImpl<AstIDAndQueryNames>>(*this);
res->children.clear(); res->children.clear();
cloneOutputOptions(*res); cloneOutputOptions(*res);
cloneTableOptions(*res);
return res; return res;
} }

View File

@ -1,3 +1,4 @@
#include <Parsers/ASTIdentifier.h>
#include <Parsers/IAST.h> #include <Parsers/IAST.h>
#include <Parsers/ASTSystemQuery.h> #include <Parsers/ASTSystemQuery.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
@ -39,6 +40,50 @@ const char * ASTSystemQuery::typeToString(Type type)
return type_name.data(); return type_name.data();
} }
String ASTSystemQuery::getDatabase() const
{
String name;
tryGetIdentifierNameInto(database, name);
return name;
}
String ASTSystemQuery::getTable() const
{
String name;
tryGetIdentifierNameInto(table, name);
return name;
}
void ASTSystemQuery::setDatabase(const String & name)
{
if (database)
{
std::erase(children, database);
database.reset();
}
if (!name.empty())
{
database = std::make_shared<ASTIdentifier>(name);
children.push_back(database);
}
}
void ASTSystemQuery::setTable(const String & name)
{
if (table)
{
std::erase(children, table);
table.reset();
}
if (!name.empty())
{
table = std::make_shared<ASTIdentifier>(name);
children.push_back(table);
}
}
void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SYSTEM "; settings.ostr << (settings.hilite ? hilite_keyword : "") << "SYSTEM ";
@ -47,19 +92,19 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
auto print_database_table = [&] auto print_database_table = [&]
{ {
settings.ostr << " "; settings.ostr << " ";
if (!database.empty()) if (database)
{ {
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase())
<< (settings.hilite ? hilite_none : "") << "."; << (settings.hilite ? hilite_none : "") << ".";
} }
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(table) settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getTable())
<< (settings.hilite ? hilite_none : ""); << (settings.hilite ? hilite_none : "");
}; };
auto print_drop_replica = [&] auto print_drop_replica = [&]
{ {
settings.ostr << " " << quoteString(replica); settings.ostr << " " << quoteString(replica);
if (!table.empty()) if (table)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM TABLE" settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM TABLE"
<< (settings.hilite ? hilite_none : ""); << (settings.hilite ? hilite_none : "");
@ -70,11 +115,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM ZKPATH " settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM ZKPATH "
<< (settings.hilite ? hilite_none : "") << quoteString(replica_zk_path); << (settings.hilite ? hilite_none : "") << quoteString(replica_zk_path);
} }
else if (!database.empty()) else if (database)
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM DATABASE " settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM DATABASE "
<< (settings.hilite ? hilite_none : ""); << (settings.hilite ? hilite_none : "");
settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase())
<< (settings.hilite ? hilite_none : ""); << (settings.hilite ? hilite_none : "");
} }
}; };
@ -107,7 +152,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|| type == Type::STOP_DISTRIBUTED_SENDS || type == Type::STOP_DISTRIBUTED_SENDS
|| type == Type::START_DISTRIBUTED_SENDS) || type == Type::START_DISTRIBUTED_SENDS)
{ {
if (!table.empty()) if (table)
print_database_table(); print_database_table();
else if (!volume.empty()) else if (!volume.empty())
print_on_volume(); print_on_volume();

View File

@ -70,10 +70,17 @@ public:
Type type = Type::UNKNOWN; Type type = Type::UNKNOWN;
ASTPtr database;
ASTPtr table;
String getDatabase() const;
String getTable() const;
void setDatabase(const String & name);
void setTable(const String & name);
String target_model; String target_model;
String target_function; String target_function;
String database;
String table;
String replica; String replica;
String replica_zk_path; String replica_zk_path;
bool is_drop_whole_replica{}; bool is_drop_whole_replica{};
@ -84,7 +91,16 @@ public:
String getID(char) const override { return "SYSTEM query"; } String getID(char) const override { return "SYSTEM query"; }
ASTPtr clone() const override { return std::make_shared<ASTSystemQuery>(*this); } ASTPtr clone() const override
{
auto res = std::make_shared<ASTSystemQuery>(*this);
res->children.clear();
if (database) { res->database = database->clone(); res->children.push_back(res->database); }
if (table) { res->table = table->clone(); res->children.push_back(res->table); }
return res;
}
ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override
{ {

View File

@ -26,13 +26,14 @@ public:
bool is_watch_events; bool is_watch_events;
ASTWatchQuery() = default; ASTWatchQuery() = default;
String getID(char) const override { return "WatchQuery_" + database + "_" + table; } String getID(char) const override { return "WatchQuery_" + getDatabase() + "_" + getTable(); }
ASTPtr clone() const override ASTPtr clone() const override
{ {
std::shared_ptr<ASTWatchQuery> res = std::make_shared<ASTWatchQuery>(*this); std::shared_ptr<ASTWatchQuery> res = std::make_shared<ASTWatchQuery>(*this);
res->children.clear(); res->children.clear();
cloneOutputOptions(*res); cloneOutputOptions(*res);
cloneTableOptions(*res);
return res; return res;
} }
@ -42,7 +43,7 @@ protected:
std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' ');
s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH " << (s.hilite ? hilite_none : "") s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH " << (s.hilite ? hilite_none : "")
<< (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable());
if (is_watch_events) if (is_watch_events)
{ {

View File

@ -861,12 +861,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (alter_object_type == ASTAlterQuery::AlterObjectType::DATABASE) if (alter_object_type == ASTAlterQuery::AlterObjectType::DATABASE)
{ {
if (!parseDatabase(pos, expected, query->database)) if (!parseDatabaseAsAST(pos, expected, query->database))
return false; return false;
} }
else else
{ {
if (!parseDatabaseAndTableName(pos, expected, query->database, query->table)) if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table))
return false; return false;
String cluster_str; String cluster_str;
@ -886,6 +886,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
query->set(query->command_list, command_list); query->set(query->command_list, command_list);
query->alter_object = alter_object_type; query->alter_object = alter_object_type;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
return true; return true;
} }

View File

@ -4,6 +4,7 @@
#include <Parsers/ExpressionElementParsers.h> #include <Parsers/ExpressionElementParsers.h>
#include <Parsers/ASTCheckQuery.h> #include <Parsers/ASTCheckQuery.h>
#include <Parsers/ParserPartition.h> #include <Parsers/ParserPartition.h>
#include <Parsers/parseDatabaseAndTableName.h>
namespace DB namespace DB
@ -15,31 +16,15 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
ParserKeyword s_partition("PARTITION"); ParserKeyword s_partition("PARTITION");
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
ParserIdentifier table_parser;
ParserPartition partition_parser; ParserPartition partition_parser;
ASTPtr table;
ASTPtr database;
if (!s_check_table.ignore(pos, expected)) if (!s_check_table.ignore(pos, expected))
return false; return false;
if (!table_parser.parse(pos, database, expected))
return false;
auto query = std::make_shared<ASTCheckQuery>(); auto query = std::make_shared<ASTCheckQuery>();
if (s_dot.ignore(pos))
{
if (!table_parser.parse(pos, table, expected))
return false;
tryGetIdentifierNameInto(database, query->database); if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table))
tryGetIdentifierNameInto(table, query->table); return false;
}
else
{
table = database;
tryGetIdentifierNameInto(table, query->table);
}
if (s_partition.ignore(pos, expected)) if (s_partition.ignore(pos, expected))
{ {
@ -47,6 +32,12 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
return false; return false;
} }
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
node = query; node = query;
return true; return true;
} }

View File

@ -426,7 +426,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ParserKeyword s_temporary("TEMPORARY"); ParserKeyword s_temporary("TEMPORARY");
ParserKeyword s_table("TABLE"); ParserKeyword s_table("TABLE");
ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_if_not_exists("IF NOT EXISTS");
ParserCompoundIdentifier table_name_p(true); ParserCompoundIdentifier table_name_p(true, true);
ParserKeyword s_from("FROM"); ParserKeyword s_from("FROM");
ParserKeyword s_on("ON"); ParserKeyword s_on("ON");
ParserKeyword s_as("AS"); ParserKeyword s_as("AS");
@ -495,7 +495,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
return false; return false;
} }
auto table_id = table->as<ASTTableIdentifier>()->getTableId(); auto * table_id = table->as<ASTTableIdentifier>();
// Shortcut for ATTACH a previously detached table // Shortcut for ATTACH a previously detached table
bool short_attach = attach && !from_path; bool short_attach = attach && !from_path;
@ -508,9 +508,14 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
query->if_not_exists = if_not_exists; query->if_not_exists = if_not_exists;
query->cluster = cluster_str; query->cluster = cluster_str;
query->database = table_id.database_name; query->database = table_id->getDatabase();
query->table = table_id.table_name; query->table = table_id->getTable();
query->uuid = table_id.uuid; query->uuid = table_id->uuid;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
return true; return true;
} }
@ -585,11 +590,16 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
query->if_not_exists = if_not_exists; query->if_not_exists = if_not_exists;
query->temporary = is_temporary; query->temporary = is_temporary;
query->database = table_id.database_name; query->database = table_id->getDatabase();
query->table = table_id.table_name; query->table = table_id->getTable();
query->uuid = table_id.uuid; query->uuid = table_id->uuid;
query->cluster = cluster_str; query->cluster = cluster_str;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
query->set(query->columns_list, columns_list); query->set(query->columns_list, columns_list);
query->set(query->storage, storage); query->set(query->storage, storage);
@ -620,7 +630,7 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ParserKeyword s_create("CREATE"); ParserKeyword s_create("CREATE");
ParserKeyword s_attach("ATTACH"); ParserKeyword s_attach("ATTACH");
ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_if_not_exists("IF NOT EXISTS");
ParserCompoundIdentifier table_name_p(true); ParserCompoundIdentifier table_name_p(true, true);
ParserKeyword s_as("AS"); ParserKeyword s_as("AS");
ParserKeyword s_view("VIEW"); ParserKeyword s_view("VIEW");
ParserKeyword s_live("LIVE"); ParserKeyword s_live("LIVE");
@ -735,12 +745,17 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
query->if_not_exists = if_not_exists; query->if_not_exists = if_not_exists;
query->is_live_view = true; query->is_live_view = true;
auto table_id = table->as<ASTTableIdentifier>()->getTableId(); auto * table_id = table->as<ASTTableIdentifier>();
query->database = table_id.database_name; query->database = table_id->getDatabase();
query->table = table_id.table_name; query->table = table_id->getTable();
query->uuid = table_id.uuid; query->uuid = table_id->uuid;
query->cluster = cluster_str; query->cluster = cluster_str;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
if (to_table) if (to_table)
query->to_table_id = to_table->as<ASTTableIdentifier>()->getTableId(); query->to_table_id = to_table->as<ASTTableIdentifier>()->getTableId();
@ -766,7 +781,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ParserKeyword s_database("DATABASE"); ParserKeyword s_database("DATABASE");
ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_if_not_exists("IF NOT EXISTS");
ParserStorage storage_p; ParserStorage storage_p;
ParserIdentifier name_p; ParserIdentifier name_p(true);
ASTPtr database; ASTPtr database;
ASTPtr storage; ASTPtr storage;
@ -817,9 +832,12 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
query->attach = attach; query->attach = attach;
query->if_not_exists = if_not_exists; query->if_not_exists = if_not_exists;
tryGetIdentifierNameInto(database, query->database);
query->uuid = uuid; query->uuid = uuid;
query->cluster = cluster_str; query->cluster = cluster_str;
query->database = database;
if (database)
query->children.push_back(database);
query->set(query->storage, storage); query->set(query->storage, storage);
if (comment) if (comment)
@ -833,7 +851,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ParserKeyword s_create("CREATE"); ParserKeyword s_create("CREATE");
ParserKeyword s_attach("ATTACH"); ParserKeyword s_attach("ATTACH");
ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_if_not_exists("IF NOT EXISTS");
ParserCompoundIdentifier table_name_p(true); ParserCompoundIdentifier table_name_p(true, true);
ParserKeyword s_as("AS"); ParserKeyword s_as("AS");
ParserKeyword s_view("VIEW"); ParserKeyword s_view("VIEW");
ParserKeyword s_materialized("MATERIALIZED"); ParserKeyword s_materialized("MATERIALIZED");
@ -954,12 +972,17 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->is_populate = is_populate; query->is_populate = is_populate;
query->replace_view = replace_view; query->replace_view = replace_view;
auto table_id = table->as<ASTTableIdentifier>()->getTableId(); auto * table_id = table->as<ASTTableIdentifier>();
query->database = table_id.database_name; query->database = table_id->getDatabase();
query->table = table_id.table_name; query->table = table_id->getTable();
query->uuid = table_id.uuid; query->uuid = table_id->uuid;
query->cluster = cluster_str; query->cluster = cluster_str;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
if (to_table) if (to_table)
query->to_table_id = to_table->as<ASTTableIdentifier>()->getTableId(); query->to_table_id = to_table->as<ASTTableIdentifier>()->getTableId();
if (to_inner_uuid) if (to_inner_uuid)
@ -987,7 +1010,7 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E
ParserKeyword s_dictionary("DICTIONARY"); ParserKeyword s_dictionary("DICTIONARY");
ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_if_not_exists("IF NOT EXISTS");
ParserKeyword s_on("ON"); ParserKeyword s_on("ON");
ParserCompoundIdentifier dict_name_p(true); ParserCompoundIdentifier dict_name_p(true, true);
ParserToken s_left_paren(TokenType::OpeningRoundBracket); ParserToken s_left_paren(TokenType::OpeningRoundBracket);
ParserToken s_right_paren(TokenType::ClosingRoundBracket); ParserToken s_right_paren(TokenType::ClosingRoundBracket);
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
@ -1059,10 +1082,15 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E
query->create_or_replace = or_replace; query->create_or_replace = or_replace;
query->replace_table = replace; query->replace_table = replace;
auto dict_id = name->as<ASTTableIdentifier>()->getTableId(); auto * dict_id = name->as<ASTTableIdentifier>();
query->database = dict_id.database_name; query->database = dict_id->getDatabase();
query->table = dict_id.table_name; query->table = dict_id->getTable();
query->uuid = dict_id.uuid; query->uuid = dict_id->uuid;
if (query->database)
query->children.push_back(query->database);
if (query->table)
query->children.push_back(query->table);
query->if_not_exists = if_not_exists; query->if_not_exists = if_not_exists;
query->set(query->dictionary_attributes_list, attributes); query->set(query->dictionary_attributes_list, attributes);

View File

@ -20,7 +20,7 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons
ParserKeyword s_database("DATABASE"); ParserKeyword s_database("DATABASE");
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
ParserKeyword s_if_exists("IF EXISTS"); ParserKeyword s_if_exists("IF EXISTS");
ParserIdentifier name_p; ParserIdentifier name_p(true);
ParserKeyword s_permanently("PERMANENTLY"); ParserKeyword s_permanently("PERMANENTLY");
ParserKeyword s_no_delay("NO DELAY"); ParserKeyword s_no_delay("NO DELAY");
ParserKeyword s_sync("SYNC"); ParserKeyword s_sync("SYNC");
@ -96,9 +96,14 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons
query->is_view = is_view; query->is_view = is_view;
query->no_delay = no_delay; query->no_delay = no_delay;
query->permanently = permanently; query->permanently = permanently;
query->database = database;
query->table = table;
tryGetIdentifierNameInto(database, query->database); if (database)
tryGetIdentifierNameInto(table, query->table); query->children.push_back(database);
if (table)
query->children.push_back(table);
query->cluster = cluster_str; query->cluster = cluster_str;

View File

@ -41,7 +41,7 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
ParserKeyword s_with("WITH"); ParserKeyword s_with("WITH");
ParserToken s_lparen(TokenType::OpeningRoundBracket); ParserToken s_lparen(TokenType::OpeningRoundBracket);
ParserToken s_rparen(TokenType::ClosingRoundBracket); ParserToken s_rparen(TokenType::ClosingRoundBracket);
ParserIdentifier name_p; ParserIdentifier name_p(true);
ParserList columns_p(std::make_unique<ParserInsertElement>(), std::make_unique<ParserToken>(TokenType::Comma), false); ParserList columns_p(std::make_unique<ParserInsertElement>(), std::make_unique<ParserToken>(TokenType::Comma), false);
ParserFunction table_function_p{false}; ParserFunction table_function_p{false};
ParserStringLiteral infile_name_p; ParserStringLiteral infile_name_p;
@ -244,8 +244,13 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
} }
else else
{ {
tryGetIdentifierNameInto(database, query->table_id.database_name); query->database = database;
tryGetIdentifierNameInto(table, query->table_id.table_name); query->table = table;
if (database)
query->children.push_back(database);
if (table)
query->children.push_back(table);
} }
query->columns = columns; query->columns = columns;

View File

@ -31,7 +31,7 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
ParserKeyword s_deduplicate("DEDUPLICATE"); ParserKeyword s_deduplicate("DEDUPLICATE");
ParserKeyword s_by("BY"); ParserKeyword s_by("BY");
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
ParserIdentifier name_p; ParserIdentifier name_p(true);
ParserPartition partition_p; ParserPartition partition_p;
ASTPtr database; ASTPtr database;
@ -80,15 +80,20 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte
auto query = std::make_shared<ASTOptimizeQuery>(); auto query = std::make_shared<ASTOptimizeQuery>();
node = query; node = query;
tryGetIdentifierNameInto(database, query->database);
tryGetIdentifierNameInto(table, query->table);
query->cluster = cluster_str; query->cluster = cluster_str;
if ((query->partition = partition)) if ((query->partition = partition))
query->children.push_back(partition); query->children.push_back(partition);
query->final = final; query->final = final;
query->deduplicate = deduplicate; query->deduplicate = deduplicate;
query->deduplicate_by_columns = deduplicate_by_columns; query->deduplicate_by_columns = deduplicate_by_columns;
query->database = database;
query->table = table;
if (database)
query->children.push_back(database);
if (table)
query->children.push_back(table);
return true; return true;
} }

View File

@ -39,14 +39,13 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr<ASTSystemQuery>
ASTPtr ast; ASTPtr ast;
if (ParserStringLiteral{}.parse(pos, ast, expected)) if (ParserStringLiteral{}.parse(pos, ast, expected))
{ {
res->database = {}; res->setTable(ast->as<ASTLiteral &>().value.safeGet<String>());
res->table = ast->as<ASTLiteral &>().value.safeGet<String>();
parsed_table = true; parsed_table = true;
} }
} }
if (!parsed_table) if (!parsed_table)
parsed_table = parseDatabaseAndTableName(pos, expected, res->database, res->table); parsed_table = parseDatabaseAndTableAsAST(pos, expected, res->database, res->table);
if (!parsed_table && require_table) if (!parsed_table && require_table)
return false; return false;
@ -56,6 +55,12 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr<ASTSystemQuery>
return false; return false;
res->cluster = cluster; res->cluster = cluster;
if (res->database)
res->children.push_back(res->database);
if (res->table)
res->children.push_back(res->table);
return true; return true;
} }
@ -163,14 +168,12 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) if (ParserKeyword{"DATABASE"}.ignore(pos, expected))
{ {
ParserIdentifier database_parser; ParserIdentifier database_parser;
ASTPtr database; if (!database_parser.parse(pos, res->database, expected))
if (!database_parser.parse(pos, database, expected))
return false; return false;
tryGetIdentifierNameInto(database, res->database);
} }
else if (ParserKeyword{"TABLE"}.ignore(pos, expected)) else if (ParserKeyword{"TABLE"}.ignore(pos, expected))
{ {
parseDatabaseAndTableName(pos, expected, res->database, res->table); parseDatabaseAndTableAsAST(pos, expected, res->database, res->table);
} }
else if (ParserKeyword{"ZKPATH"}.ignore(pos, expected)) else if (ParserKeyword{"ZKPATH"}.ignore(pos, expected))
{ {
@ -193,7 +196,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
case Type::RESTART_REPLICA: case Type::RESTART_REPLICA:
case Type::SYNC_REPLICA: case Type::SYNC_REPLICA:
if (!parseDatabaseAndTableName(pos, expected, res->database, res->table)) if (!parseDatabaseAndTableAsAST(pos, expected, res->database, res->table))
return false; return false;
break; break;
@ -251,7 +254,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
res->storage_policy = storage_policy_str; res->storage_policy = storage_policy_str;
res->volume = volume_str; res->volume = volume_str;
if (res->volume.empty() && res->storage_policy.empty()) if (res->volume.empty() && res->storage_policy.empty())
parseDatabaseAndTableName(pos, expected, res->database, res->table); parseDatabaseAndTableAsAST(pos, expected, res->database, res->table);
break; break;
} }
@ -265,7 +268,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
case Type::START_REPLICATED_SENDS: case Type::START_REPLICATED_SENDS:
case Type::STOP_REPLICATION_QUEUES: case Type::STOP_REPLICATION_QUEUES:
case Type::START_REPLICATION_QUEUES: case Type::START_REPLICATION_QUEUES:
parseDatabaseAndTableName(pos, expected, res->database, res->table); parseDatabaseAndTableAsAST(pos, expected, res->database, res->table);
break; break;
case Type::SUSPEND: case Type::SUSPEND:
@ -287,6 +290,11 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
break; break;
} }
if (res->database)
res->children.push_back(res->database);
if (res->table)
res->children.push_back(res->table);
node = std::move(res); node = std::move(res);
return true; return true;
} }

View File

@ -24,7 +24,7 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
ParserKeyword s_view("VIEW"); ParserKeyword s_view("VIEW");
ParserKeyword s_dictionary("DICTIONARY"); ParserKeyword s_dictionary("DICTIONARY");
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
ParserIdentifier name_p; ParserIdentifier name_p(true);
ASTPtr database; ASTPtr database;
ASTPtr table; ASTPtr table;
@ -110,8 +110,14 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected &
} }
} }
tryGetIdentifierNameInto(database, query->database); query->database = database;
tryGetIdentifierNameInto(table, query->table); query->table = table;
if (database)
query->children.push_back(database);
if (table)
query->children.push_back(table);
node = query; node = query;

View File

@ -24,7 +24,7 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{ {
ParserKeyword s_watch("WATCH"); ParserKeyword s_watch("WATCH");
ParserToken s_dot(TokenType::Dot); ParserToken s_dot(TokenType::Dot);
ParserIdentifier name_p; ParserIdentifier name_p(true);
ParserKeyword s_events("EVENTS"); ParserKeyword s_events("EVENTS");
ParserKeyword s_limit("LIMIT"); ParserKeyword s_limit("LIMIT");
@ -62,11 +62,14 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
return false; return false;
} }
query->database = database;
query->table = table;
if (database) if (database)
query->database = getIdentifierName(database); query->children.push_back(database);
if (table) if (table)
query->table = getIdentifierName(table); query->children.push_back(table);
node = query; node = query;

View File

@ -89,7 +89,7 @@ protected:
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTExistsDatabaseQueryIDAndQueryNames::Query settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTExistsDatabaseQueryIDAndQueryNames::Query
<< " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase());
} }
}; };
@ -99,7 +99,7 @@ protected:
void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override
{ {
settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTShowCreateDatabaseQueryIDAndQueryNames::Query settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTShowCreateDatabaseQueryIDAndQueryNames::Query
<< " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase());
} }
}; };

View File

@ -41,6 +41,24 @@ bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String &
return true; return true;
} }
bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table)
{
ParserToken s_dot(TokenType::Dot);
ParserIdentifier table_parser(true);
if (!table_parser.parse(pos, table, expected))
return false;
if (s_dot.ignore(pos))
{
database = table;
if (!table_parser.parse(pos, table, expected))
return false;
}
return true;
}
bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str) bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str)
{ {
@ -57,6 +75,12 @@ bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_st
return true; return true;
} }
bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database)
{
ParserIdentifier identifier_parser(/* allow_query_parameter */true);
return identifier_parser.parse(pos, database, expected);
}
bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table) bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table)
{ {

View File

@ -7,9 +7,13 @@ namespace DB
/// Parses [db.]name /// Parses [db.]name
bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & database_str, String & table_str); bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & database_str, String & table_str);
bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table);
/// Parses [db.]name or [db.]* or [*.]* /// Parses [db.]name or [db.]* or [*.]*
bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table); bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table);
bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str); bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str);
bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database);
} }

View File

@ -42,8 +42,8 @@ TEST(ParserDictionaryDDL, SimpleDictionary)
ParserCreateDictionaryQuery parser; ParserCreateDictionaryQuery parser;
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
ASTCreateQuery * create = ast->as<ASTCreateQuery>(); ASTCreateQuery * create = ast->as<ASTCreateQuery>();
EXPECT_EQ(create->table, "dict1"); EXPECT_EQ(create->getTable(), "dict1");
EXPECT_EQ(create->database, "test"); EXPECT_EQ(create->getDatabase(), "test");
EXPECT_EQ(create->is_dictionary, true); EXPECT_EQ(create->is_dictionary, true);
EXPECT_NE(create->dictionary, nullptr); EXPECT_NE(create->dictionary, nullptr);
EXPECT_NE(create->dictionary->lifetime, nullptr); EXPECT_NE(create->dictionary->lifetime, nullptr);
@ -138,8 +138,8 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties)
ParserCreateDictionaryQuery parser; ParserCreateDictionaryQuery parser;
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
ASTCreateQuery * create = ast->as<ASTCreateQuery>(); ASTCreateQuery * create = ast->as<ASTCreateQuery>();
EXPECT_EQ(create->table, "dict2"); EXPECT_EQ(create->getTable(), "dict2");
EXPECT_EQ(create->database, ""); EXPECT_EQ(create->getDatabase(), "");
/// test attributes /// test attributes
EXPECT_NE(create->dictionary_attributes_list, nullptr); EXPECT_NE(create->dictionary_attributes_list, nullptr);
@ -240,8 +240,8 @@ TEST(ParserDictionaryDDL, NestedSource)
ParserCreateDictionaryQuery parser; ParserCreateDictionaryQuery parser;
ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
ASTCreateQuery * create = ast->as<ASTCreateQuery>(); ASTCreateQuery * create = ast->as<ASTCreateQuery>();
EXPECT_EQ(create->table, "dict4"); EXPECT_EQ(create->getTable(), "dict4");
EXPECT_EQ(create->database, ""); EXPECT_EQ(create->getDatabase(), "");
/// source test /// source test
EXPECT_EQ(create->dictionary->source->name, "mysql"); EXPECT_EQ(create->dictionary->source->name, "mysql");
@ -301,8 +301,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery)
ASTDropQuery * drop1 = ast1->as<ASTDropQuery>(); ASTDropQuery * drop1 = ast1->as<ASTDropQuery>();
EXPECT_TRUE(drop1->is_dictionary); EXPECT_TRUE(drop1->is_dictionary);
EXPECT_EQ(drop1->database, "test"); EXPECT_EQ(drop1->getDatabase(), "test");
EXPECT_EQ(drop1->table, "dict1"); EXPECT_EQ(drop1->getTable(), "dict1");
auto str1 = serializeAST(*drop1, true); auto str1 = serializeAST(*drop1, true);
EXPECT_EQ(input1, str1); EXPECT_EQ(input1, str1);
@ -312,8 +312,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery)
ASTDropQuery * drop2 = ast2->as<ASTDropQuery>(); ASTDropQuery * drop2 = ast2->as<ASTDropQuery>();
EXPECT_TRUE(drop2->is_dictionary); EXPECT_TRUE(drop2->is_dictionary);
EXPECT_EQ(drop2->database, ""); EXPECT_EQ(drop2->getDatabase(), "");
EXPECT_EQ(drop2->table, "dict2"); EXPECT_EQ(drop2->getTable(), "dict2");
auto str2 = serializeAST(*drop2, true); auto str2 = serializeAST(*drop2, true);
EXPECT_EQ(input2, str2); EXPECT_EQ(input2, str2);
} }
@ -326,8 +326,8 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries)
ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0);
ASTShowCreateDictionaryQuery * show1 = ast1->as<ASTShowCreateDictionaryQuery>(); ASTShowCreateDictionaryQuery * show1 = ast1->as<ASTShowCreateDictionaryQuery>();
EXPECT_EQ(show1->table, "dict1"); EXPECT_EQ(show1->getTable(), "dict1");
EXPECT_EQ(show1->database, "test"); EXPECT_EQ(show1->getDatabase(), "test");
EXPECT_EQ(serializeAST(*show1), input1); EXPECT_EQ(serializeAST(*show1), input1);
String input2 = "EXISTS DICTIONARY dict2"; String input2 = "EXISTS DICTIONARY dict2";
@ -335,7 +335,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries)
ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0);
ASTExistsDictionaryQuery * show2 = ast2->as<ASTExistsDictionaryQuery>(); ASTExistsDictionaryQuery * show2 = ast2->as<ASTExistsDictionaryQuery>();
EXPECT_EQ(show2->table, "dict2"); EXPECT_EQ(show2->getTable(), "dict2");
EXPECT_EQ(show2->database, ""); EXPECT_EQ(show2->getDatabase(), "");
EXPECT_EQ(serializeAST(*show2), input2); EXPECT_EQ(serializeAST(*show2), input2);
} }

View File

@ -958,7 +958,18 @@ namespace
{ {
if (insert_query) if (insert_query)
{ {
auto table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary); auto table_id = StorageID::createEmpty();
if (insert_query->table_id)
{
table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary);
}
else
{
StorageID local_table_id(insert_query->getDatabase(), insert_query->getTable());
table_id = query_context->resolveStorageID(local_table_id, Context::ResolveOrdinary);
}
if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields && table_id) if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields && table_id)
{ {
StoragePtr storage = DatabaseCatalog::instance().getTable(table_id, query_context); StoragePtr storage = DatabaseCatalog::instance().getTable(table_id, query_context);

View File

@ -24,8 +24,8 @@ namespace
{ {
/// We create and execute `drop` query for this table /// We create and execute `drop` query for this table
auto drop_query = std::make_shared<ASTDropQuery>(); auto drop_query = std::make_shared<ASTDropQuery>();
drop_query->database = storage_id.database_name; drop_query->setDatabase(storage_id.database_name);
drop_query->table = storage_id.table_name; drop_query->setTable(storage_id.table_name);
drop_query->kind = ASTDropQuery::Kind::Drop; drop_query->kind = ASTDropQuery::Kind::Drop;
ASTPtr ast_drop_query = drop_query; ASTPtr ast_drop_query = drop_query;
InterpreterDropQuery drop_interpreter(ast_drop_query, context); InterpreterDropQuery drop_interpreter(ast_drop_query, context);

View File

@ -71,6 +71,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
if (buffer_size) if (buffer_size)
settings.read_settings = settings.read_settings.adjustBufferSize(buffer_size); settings.read_settings = settings.read_settings.adjustBufferSize(buffer_size);
if (!settings.read_settings.local_fs_buffer_size || !settings.read_settings.remote_fs_buffer_size)
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read to empty buffer.");
const String full_data_path = data_part->getFullRelativePath() + MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; const String full_data_path = data_part->getFullRelativePath() + MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION;
if (uncompressed_cache) if (uncompressed_cache)
{ {

View File

@ -369,8 +369,8 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(PostgreSQLTableS
auto create_table_query = std::make_shared<ASTCreateQuery>(); auto create_table_query = std::make_shared<ASTCreateQuery>();
auto table_id = getStorageID(); auto table_id = getStorageID();
create_table_query->table = getNestedTableName(); create_table_query->setTable(getNestedTableName());
create_table_query->database = table_id.database_name; create_table_query->setDatabase(table_id.database_name);
if (is_materialized_postgresql_database) if (is_materialized_postgresql_database)
create_table_query->uuid = table_id.uuid; create_table_query->uuid = table_id.uuid;

View File

@ -201,7 +201,7 @@ StoragePtr StorageFactory::get(
.storage_def = storage_def, .storage_def = storage_def,
.query = query, .query = query,
.relative_data_path = relative_data_path, .relative_data_path = relative_data_path,
.table_id = StorageID(query.database, query.table, query.uuid), .table_id = StorageID(query.getDatabase(), query.getTable(), query.uuid),
.local_context = local_context, .local_context = local_context,
.context = context, .context = context,
.columns = columns, .columns = columns,

View File

@ -105,8 +105,8 @@ StorageMaterializedView::StorageMaterializedView(
/// We will create a query to create an internal table. /// We will create a query to create an internal table.
auto create_context = Context::createCopy(local_context); auto create_context = Context::createCopy(local_context);
auto manual_create_query = std::make_shared<ASTCreateQuery>(); auto manual_create_query = std::make_shared<ASTCreateQuery>();
manual_create_query->database = getStorageID().database_name; manual_create_query->setDatabase(getStorageID().database_name);
manual_create_query->table = generateInnerTableName(getStorageID()); manual_create_query->setTable(generateInnerTableName(getStorageID()));
manual_create_query->uuid = query.to_inner_uuid; manual_create_query->uuid = query.to_inner_uuid;
auto new_columns_list = std::make_shared<ASTColumns>(); auto new_columns_list = std::make_shared<ASTColumns>();
@ -119,7 +119,7 @@ StorageMaterializedView::StorageMaterializedView(
create_interpreter.setInternal(true); create_interpreter.setInternal(true);
create_interpreter.execute(); create_interpreter.execute();
target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->database, manual_create_query->table}, getContext())->getStorageID(); target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID();
} }
if (!select.select_table_id.empty()) if (!select.select_table_id.empty())

View File

@ -31,14 +31,14 @@ static void createInformationSchemaView(ContextMutablePtr context, IDatabase & d
DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH);
auto & ast_create = ast->as<ASTCreateQuery &>(); auto & ast_create = ast->as<ASTCreateQuery &>();
assert(view_name == ast_create.table); assert(view_name == ast_create.getTable());
if (is_uppercase) if (is_uppercase)
ast_create.table = Poco::toUpper(view_name); ast_create.setTable(Poco::toUpper(view_name));
StoragePtr view = createTableFromAST(ast_create, database.getDatabaseName(), StoragePtr view = createTableFromAST(ast_create, database.getDatabaseName(),
database.getTableDataPath(ast_create), context, true).second; database.getTableDataPath(ast_create), context, true).second;
database.createTable(context, ast_create.table, view, ast); database.createTable(context, ast_create.getTable(), view, ast);
} }
catch (...) catch (...)
{ {

View File

@ -10,7 +10,7 @@
<startup_timeout>240000</startup_timeout> <startup_timeout>240000</startup_timeout>
<!-- we want all logs for complex problems investigation --> <!-- we want all logs for complex problems investigation -->
<reserved_log_items>1000000000000000</reserved_log_items> <reserved_log_items>1000000000000000</reserved_log_items>
<snapshot_distance>10000</snapshot_distance> <snapshot_distance>100000</snapshot_distance>
<!-- For instant start in single node configuration --> <!-- For instant start in single node configuration -->
<heart_beat_interval_ms>0</heart_beat_interval_ms> <heart_beat_interval_ms>0</heart_beat_interval_ms>

View File

@ -16,5 +16,6 @@
<count>10</count> <count>10</count>
<stderr>/var/log/clickhouse-server/stderr.log</stderr> <stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout> <stdout>/var/log/clickhouse-server/stdout.log</stdout>
<rotateOnOpen>true</rotateOnOpen>
</logger> </logger>
</clickhouse> </clickhouse>

View File

@ -31,8 +31,9 @@ from kazoo.exceptions import KazooException
from minio import Minio from minio import Minio
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from helpers.test_tools import assert_eq_with_retry from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry
from helpers import pytest_xdist_logging_to_separate_files from helpers import pytest_xdist_logging_to_separate_files
from helpers.client import QueryRuntimeException
import docker import docker
@ -225,6 +226,8 @@ class ClickHouseCluster:
self.docker_logs_path = p.join(self.instances_dir, 'docker.log') self.docker_logs_path = p.join(self.instances_dir, 'docker.log')
self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME) self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME)
self.env_variables = {} self.env_variables = {}
self.env_variables["TSAN_OPTIONS"] = "second_deadlock_stack=1"
self.env_variables["CLICKHOUSE_WATCHDOG_ENABLE"] = "0"
self.up_called = False self.up_called = False
custom_dockerd_host = custom_dockerd_host or os.environ.get('CLICKHOUSE_TESTS_DOCKERD_HOST') custom_dockerd_host = custom_dockerd_host or os.environ.get('CLICKHOUSE_TESTS_DOCKERD_HOST')
@ -413,6 +416,10 @@ class ClickHouseCluster:
logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}") logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}")
def cleanup(self): def cleanup(self):
if os.environ and 'DISABLE_CLEANUP' in os.environ and os.environ['DISABLE_CLEANUP'] == "1":
logging.warning("Cleanup is disabled")
return
# Just in case kill unstopped containers from previous launch # Just in case kill unstopped containers from previous launch
try: try:
# docker-compose names containers using the following formula: # docker-compose names containers using the following formula:
@ -422,13 +429,13 @@ class ClickHouseCluster:
filter_name = f'^/{self.project_name}_.*_1$' filter_name = f'^/{self.project_name}_.*_1$'
if int(run_and_check(f'docker container list --all --filter name={filter_name} | wc -l', shell=True)) > 1: if int(run_and_check(f'docker container list --all --filter name={filter_name} | wc -l', shell=True)) > 1:
logging.debug(f"Trying to kill unstopped containers for project {self.project_name}:") logging.debug(f"Trying to kill unstopped containers for project {self.project_name}:")
unstopped_containers = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) unstopped_containers = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True).splitlines()
unstopped_containers_ids = [line.split()[0] for line in unstopped_containers.splitlines()[1:]] logging.debug(f"Unstopped containers {unstopped_containers}")
for id in unstopped_containers_ids: for id in unstopped_containers:
run_and_check(f'docker kill {id}', shell=True, nothrow=True) run_and_check(f'docker kill {id}', shell=True, nothrow=True)
run_and_check(f'docker rm {id}', shell=True, nothrow=True) run_and_check(f'docker rm {id}', shell=True, nothrow=True)
logging.debug("Unstopped containers killed") left_ids = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True)
run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) logging.debug(f"Unstopped containers killed. Left {left_ids}")
else: else:
logging.debug(f"No running containers for project: {self.project_name}") logging.debug(f"No running containers for project: {self.project_name}")
except: except:
@ -962,6 +969,9 @@ class ClickHouseCluster:
logging.info("Restart node with ip change") logging.info("Restart node with ip change")
# In builds with sanitizer the server can take a long time to start # In builds with sanitizer the server can take a long time to start
node.wait_for_start(start_timeout=180.0, connection_timeout=600.0) # seconds node.wait_for_start(start_timeout=180.0, connection_timeout=600.0) # seconds
res = node.client.query("SELECT 30")
logging.debug(f"Read '{res}'")
assert "30\n" == res
logging.info("Restarted") logging.info("Restarted")
return node return node
@ -1414,7 +1424,7 @@ class ClickHouseCluster:
# retry_exception(10, 5, subprocess_check_call, Exception, clickhouse_pull_cmd) # retry_exception(10, 5, subprocess_check_call, Exception, clickhouse_pull_cmd)
if destroy_dirs and p.exists(self.instances_dir): if destroy_dirs and p.exists(self.instances_dir):
logging.debug(("Removing instances dir %s", self.instances_dir)) logging.debug(f"Removing instances dir {self.instances_dir}")
shutil.rmtree(self.instances_dir) shutil.rmtree(self.instances_dir)
for instance in list(self.instances.values()): for instance in list(self.instances.values()):
@ -1424,7 +1434,7 @@ class ClickHouseCluster:
_create_env_file(os.path.join(self.env_file), self.env_variables) _create_env_file(os.path.join(self.env_file), self.env_variables)
self.docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=self.docker_api_version, timeout=600) self.docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=self.docker_api_version, timeout=600)
common_opts = ['up', '-d'] common_opts = ['--verbose', 'up', '-d']
if self.with_zookeeper_secure and self.base_zookeeper_cmd: if self.with_zookeeper_secure and self.base_zookeeper_cmd:
logging.debug('Setup ZooKeeper Secure') logging.debug('Setup ZooKeeper Secure')
@ -1644,7 +1654,7 @@ class ClickHouseCluster:
self.shutdown() self.shutdown()
raise raise
def shutdown(self, kill=True): def shutdown(self, kill=True, ignore_fatal=True):
sanitizer_assert_instance = None sanitizer_assert_instance = None
fatal_log = None fatal_log = None
@ -1672,10 +1682,10 @@ class ClickHouseCluster:
# NOTE: we cannot do this via docker since in case of Fatal message container may already die. # NOTE: we cannot do this via docker since in case of Fatal message container may already die.
for name, instance in self.instances.items(): for name, instance in self.instances.items():
if instance.contains_in_log(SANITIZER_SIGN, from_host=True): if instance.contains_in_log(SANITIZER_SIGN, from_host=True):
sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True) sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True, filename='stderr.log')
logging.error("Sanitizer in instance %s log %s", name, sanitizer_assert_instance) logging.error("Sanitizer in instance %s log %s", name, sanitizer_assert_instance)
if instance.contains_in_log("Fatal", from_host=True): if not ignore_fatal and instance.contains_in_log("Fatal", from_host=True):
fatal_log = instance.grep_in_log("Fatal", from_host=True) fatal_log = instance.grep_in_log("Fatal", from_host=True)
if 'Child process was terminated by signal 9 (KILL)' in fatal_log: if 'Child process was terminated by signal 9 (KILL)' in fatal_log:
fatal_log = None fatal_log = None
@ -1685,7 +1695,7 @@ class ClickHouseCluster:
try: try:
subprocess_check_call(self.base_cmd + ['down', '--volumes']) subprocess_check_call(self.base_cmd + ['down', '--volumes'])
except Exception as e: except Exception as e:
logging.debug("Down + remove orphans failed durung shutdown. {}".format(repr(e))) logging.debug("Down + remove orphans failed during shutdown. {}".format(repr(e)))
else: else:
logging.warning("docker-compose up was not called. Trying to export docker.log for running containers") logging.warning("docker-compose up was not called. Trying to export docker.log for running containers")
@ -1768,7 +1778,7 @@ CLICKHOUSE_START_COMMAND = "clickhouse server --config-file=/etc/clickhouse-serv
" --log-file=/var/log/clickhouse-server/clickhouse-server.log " \ " --log-file=/var/log/clickhouse-server/clickhouse-server.log " \
" --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'killall tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'pkill tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND)
# /run/xtables.lock passed inside for correct iptables --wait # /run/xtables.lock passed inside for correct iptables --wait
DOCKER_COMPOSE_TEMPLATE = ''' DOCKER_COMPOSE_TEMPLATE = '''
@ -2034,84 +2044,122 @@ class ClickHouseInstance:
if not self.stay_alive: if not self.stay_alive:
raise Exception("clickhouse can be stopped only with stay_alive=True instance") raise Exception("clickhouse can be stopped only with stay_alive=True instance")
try: try:
ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root')
if ps_clickhouse == " PID TTY STAT TIME COMMAND" : if ps_clickhouse == " PID TTY STAT TIME COMMAND" :
logging.warning("ClickHouse process already stopped") logging.warning("ClickHouse process already stopped")
return return
self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='root') self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='root')
sleep_time = 0.1 start_time = time.time()
num_steps = int(stop_wait_sec / sleep_time)
stopped = False stopped = False
for step in range(num_steps): while time.time() <= start_time + stop_wait_sec:
time.sleep(sleep_time) pid = self.get_process_pid("clickhouse")
ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') if pid is None:
if ps_clickhouse == " PID TTY STAT TIME COMMAND":
stopped = True stopped = True
break break
else:
time.sleep(1)
if not stopped: if not stopped:
logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{ps_clickhouse}") pid = self.get_process_pid("clickhouse")
self.stop_clickhouse(kill=True) if pid is not None:
logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{pid}")
self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}"], user='root')
self.stop_clickhouse(kill=True)
else:
ps_all = self.exec_in_container(["bash", "-c", "ps aux"], nothrow=True, user='root')
logging.warning(f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}")
return
except Exception as e: except Exception as e:
logging.warning(f"Stop ClickHouse raised an error {e}") logging.warning(f"Stop ClickHouse raised an error {e}")
def start_clickhouse(self, start_wait_sec=30): def start_clickhouse(self, start_wait_sec=60):
if not self.stay_alive: if not self.stay_alive:
raise Exception("ClickHouse can be started again only with stay_alive=True instance") raise Exception("ClickHouse can be started again only with stay_alive=True instance")
start_time = time.time()
time_to_sleep = 0.5 time_to_sleep = 0.5
start_tries = 5
total_tries = int(start_wait_sec / time_to_sleep) while start_time + start_wait_sec >= time.time():
query_tries = int(total_tries / start_tries)
for i in range(start_tries):
# sometimes after SIGKILL (hard reset) server may refuse to start for some time # sometimes after SIGKILL (hard reset) server may refuse to start for some time
# for different reasons. # for different reasons.
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) pid = self.get_process_pid("clickhouse")
started = False if pid is None:
for _ in range(query_tries): logging.debug("No clickhouse process running. Start new one.")
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid()))
time.sleep(1)
continue
else:
logging.debug("Clickhouse process running.")
try: try:
self.query("select 1") self.wait_start(start_wait_sec + start_time - time.time())
started = True return
break except Exception as e:
except: logging.warning(f"Current start attempt failed. Will kill {pid} just in case.")
time.sleep(time_to_sleep) self.exec_in_container(["bash", "-c", f"kill -9 {pid}"], user='root', nothrow=True)
if started: time.sleep(time_to_sleep)
break
else: raise Exception("Cannot start ClickHouse, see additional info in logs")
raise Exception("Cannot start ClickHouse, see additional info in logs")
def restart_clickhouse(self, stop_start_wait_sec=30, kill=False): def wait_start(self, start_wait_sec):
start_time = time.time()
last_err = None
while time.time() <= start_time + start_wait_sec:
try:
pid = self.get_process_pid("clickhouse")
if pid is None:
raise Exception("ClickHouse server is not running. Check logs.")
exec_query_with_retry(self, 'select 20', retry_count = 10, silent=True)
return
except QueryRuntimeException as err:
last_err = err
pid = self.get_process_pid("clickhouse")
if pid is not None:
logging.warning(f"ERROR {err}")
else:
raise Exception("ClickHouse server is not running. Check logs.")
logging.error(f"No time left to start. But process is still running. Will dump threads.")
ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root')
logging.info(f"PS RESULT:\n{ps_clickhouse}")
pid = self.get_process_pid("clickhouse")
if pid is not None:
self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"], user='root')
if last_err is not None:
raise last_err
def restart_clickhouse(self, stop_start_wait_sec=60, kill=False):
self.stop_clickhouse(stop_start_wait_sec, kill) self.stop_clickhouse(stop_start_wait_sec, kill)
self.start_clickhouse(stop_start_wait_sec) self.start_clickhouse(stop_start_wait_sec)
def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs): def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs):
return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs) return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs)
def contains_in_log(self, substring, from_host=False): def rotate_logs(self):
self.exec_in_container(["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"], user='root')
def contains_in_log(self, substring, from_host=False, filename='clickhouse-server.log'):
if from_host: if from_host:
# We check fist file exists but want to look for all rotated logs as well
result = subprocess_check_call(["bash", "-c", result = subprocess_check_call(["bash", "-c",
f'[ -f {self.logs_dir}/clickhouse-server.log ] && grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true'
]) ])
else: else:
result = self.exec_in_container(["bash", "-c", result = self.exec_in_container(["bash", "-c",
f'[ -f /var/log/clickhouse-server/clickhouse-server.log ] && grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true'
]) ])
return len(result) > 0 return len(result) > 0
def grep_in_log(self, substring, from_host=False): def grep_in_log(self, substring, from_host=False, filename='clickhouse-server.log'):
logging.debug(f"grep in log called %s", substring) logging.debug(f"grep in log called %s", substring)
if from_host: if from_host:
# We check fist file exists but want to look for all rotated logs as well
result = subprocess_check_call(["bash", "-c", result = subprocess_check_call(["bash", "-c",
f'grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true'
]) ])
else: else:
result = self.exec_in_container(["bash", "-c", result = self.exec_in_container(["bash", "-c",
f'grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true'
]) ])
logging.debug("grep result %s", result) logging.debug("grep result %s", result)
return result return result
@ -2146,7 +2194,7 @@ class ClickHouseInstance:
def get_process_pid(self, process_name): def get_process_pid(self, process_name):
output = self.exec_in_container(["bash", "-c", output = self.exec_in_container(["bash", "-c",
"ps ax | grep '{}' | grep -v 'grep' | grep -v 'bash -c' | awk '{{print $1}}'".format( "ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format(
process_name)]) process_name)])
if output: if output:
try: try:
@ -2157,6 +2205,7 @@ class ClickHouseInstance:
return None return None
def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
begin_time = time.time()
if not self.stay_alive: if not self.stay_alive:
raise Exception("Cannot restart not stay alive container") raise Exception("Cannot restart not stay alive container")
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root')
@ -2176,6 +2225,7 @@ class ClickHouseInstance:
if callback_onstop: if callback_onstop:
callback_onstop(self) callback_onstop(self)
self.exec_in_container(["bash", "-c", "echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version"])
self.exec_in_container( self.exec_in_container(
["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], ["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
user='root') user='root')
@ -2185,9 +2235,14 @@ class ClickHouseInstance:
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid()))
# wait start # wait start
assert_eq_with_retry(self, "select 1", "1", retry_count=retries) time_left = begin_time + stop_start_wait_sec - time.time()
if time_left <= 0:
raise Exception(f"No time left during restart")
else:
self.wait_start(time_left)
def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
begin_time = time.time()
if not self.stay_alive: if not self.stay_alive:
raise Exception("Cannot restart not stay alive container") raise Exception("Cannot restart not stay alive container")
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root')
@ -2213,13 +2268,18 @@ class ClickHouseInstance:
self.exec_in_container( self.exec_in_container(
["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], ["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
user='root') user='root')
self.exec_in_container(["bash", "-c", "echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version"])
self.exec_in_container(["bash", "-c", self.exec_in_container(["bash", "-c",
"cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"], "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"],
user='root') user='root')
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid()))
# wait start # wait start
assert_eq_with_retry(self, "select 1", "1", retry_count=retries) time_left = begin_time + stop_start_wait_sec - time.time()
if time_left <= 0:
raise Exception(f"No time left during restart")
else:
self.wait_start(time_left)
def get_docker_handle(self): def get_docker_handle(self):
return self.cluster.get_docker_handle(self.docker_id) return self.cluster.get_docker_handle(self.docker_id)

View File

@ -85,15 +85,18 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti
else: else:
raise AssertionError("'{}' not found in logs".format(substring)) raise AssertionError("'{}' not found in logs".format(substring))
def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, settings={}): def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={}):
exception = None exception = None
for _ in range(retry_count): for cnt in range(retry_count):
try: try:
instance.query(query, timeout=30, settings=settings) res = instance.query(query, timeout=30, settings=settings)
if not silent:
logging.debug(f"Result of {query} on {cnt} try is {res}")
break break
except Exception as ex: except Exception as ex:
exception = ex exception = ex
logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry") if not silent:
logging.exception(f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry")
time.sleep(sleep_time) time.sleep(sleep_time)
else: else:
raise exception raise exception

View File

@ -15,12 +15,6 @@ node1 = cluster.add_instance('node1', with_zookeeper=True)
def started_cluster(): def started_cluster():
try: try:
cluster.start() cluster.start()
node1.query('''
CREATE TABLE replicated_mt(date Date, id UInt32, value Int32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id;
'''.format(replica=node1.name))
yield cluster yield cluster
finally: finally:
@ -28,6 +22,12 @@ def started_cluster():
def test_merge_and_part_corruption(started_cluster): def test_merge_and_part_corruption(started_cluster):
node1.query('''
CREATE TABLE replicated_mt(date Date, id UInt32, value Int32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id;
'''.format(replica=node1.name))
node1.query("SYSTEM STOP REPLICATION QUEUES replicated_mt") node1.query("SYSTEM STOP REPLICATION QUEUES replicated_mt")
for i in range(4): for i in range(4):
node1.query("INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format(f=i*100000)) node1.query("INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format(f=i*100000))
@ -53,3 +53,5 @@ def test_merge_and_part_corruption(started_cluster):
# will hung if checked bug not fixed # will hung if checked bug not fixed
node1.query("ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, timeout=30) node1.query("ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, timeout=30)
assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n" assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n"
node1.query('DROP TABLE replicated_mt SYNC')

View File

@ -48,7 +48,8 @@ def test_cleanup_dir_after_bad_zk_conn(start_cluster):
node1.query_with_retry(query_create) node1.query_with_retry(query_create)
node1.query_with_retry('''INSERT INTO replica.test VALUES (1, now())''') node1.query_with_retry('''INSERT INTO replica.test VALUES (1, now())''')
assert "1\n" in node1.query('''SELECT count() from replica.test FORMAT TSV''') assert "1\n" in node1.query('''SELECT count() from replica.test FORMAT TSV''')
node1.query("DROP TABLE replica.test SYNC")
node1.query("DROP DATABASE replica")
def test_cleanup_dir_after_wrong_replica_name(start_cluster): def test_cleanup_dir_after_wrong_replica_name(start_cluster):
node1.query_with_retry( node1.query_with_retry(
@ -68,7 +69,8 @@ def test_cleanup_dir_after_wrong_zk_path(start_cluster):
assert "Cannot create" in error assert "Cannot create" in error
node1.query( node1.query(
"CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n") "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n")
node1.query("DROP TABLE test3_r1 SYNC")
node1.query("DROP TABLE test3_r2 SYNC")
def test_attach_without_zk(start_cluster): def test_attach_without_zk(start_cluster):
node1.query_with_retry( node1.query_with_retry(
@ -82,3 +84,4 @@ def test_attach_without_zk(start_cluster):
pass pass
node1.query("ATTACH TABLE IF NOT EXISTS test4_r1") node1.query("ATTACH TABLE IF NOT EXISTS test4_r1")
node1.query("SELECT * FROM test4_r1") node1.query("SELECT * FROM test4_r1")
node1.query("DROP TABLE test4_r1 SYNC")

View File

@ -1,4 +1,5 @@
import time import time
import logging
import pytest import pytest
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
@ -13,7 +14,6 @@ node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_poo
def started_cluster(): def started_cluster():
try: try:
cluster.start() cluster.start()
yield cluster yield cluster
finally: finally:
@ -22,7 +22,7 @@ def started_cluster():
def count_ttl_merges_in_queue(node, table): def count_ttl_merges_in_queue(node, table):
result = node.query( result = node.query(
"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table)) f"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{table}'")
if not result: if not result:
return 0 return 0
return int(result.strip()) return int(result.strip())
@ -30,22 +30,22 @@ def count_ttl_merges_in_queue(node, table):
def count_ttl_merges_in_background_pool(node, table, level): def count_ttl_merges_in_background_pool(node, table, level):
result = TSV(node.query( result = TSV(node.query(
"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table))) f"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{table}'"))
count = len(result) count = len(result)
if count >= level: if count >= level:
print("count_ttl_merges_in_background_pool: merges more than warn level:\n{}".format(result)) logging.debug(f"count_ttl_merges_in_background_pool: merges more than warn level:\n{result}")
return count return count
def count_regular_merges_in_background_pool(node, table): def count_regular_merges_in_background_pool(node, table):
result = node.query("SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{}'".format(table)) result = node.query(f"SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{table}'")
if not result: if not result:
return 0 return 0
return int(result.strip()) return int(result.strip())
def count_running_mutations(node, table): def count_running_mutations(node, table):
result = node.query("SELECT count() FROM system.merges WHERE table = '{}' and is_mutation=1".format(table)) result = node.query(f"SELECT count() FROM system.merges WHERE table = '{table}' and is_mutation=1")
if not result: if not result:
return 0 return 0
return int(result.strip()) return int(result.strip())
@ -55,7 +55,6 @@ def count_running_mutations(node, table):
# but it revealed a bug when we assign different merges to the same part # but it revealed a bug when we assign different merges to the same part
# on the borders of partitions. # on the borders of partitions.
def test_no_ttl_merges_in_busy_pool(started_cluster): def test_no_ttl_merges_in_busy_pool(started_cluster):
node1.query("DROP TABLE IF EXISTS test_ttl")
node1.query( node1.query(
"CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0") "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0")
@ -63,12 +62,12 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
for i in range(1, 7): for i in range(1, 7):
node1.query( node1.query(
"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {}, number FROM numbers(5)".format(i)) f"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {i}, number FROM numbers(5)")
node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0") node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0")
while count_running_mutations(node1, "test_ttl") < 6: while count_running_mutations(node1, "test_ttl") < 6:
print("Mutations count", count_running_mutations(node1, "test_ttl")) logging.debug(f"Mutations count {count_running_mutations(node1, 'test_ttl')}")
assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0 assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0
time.sleep(0.5) time.sleep(0.5)
@ -76,7 +75,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
rows_count = [] rows_count = []
while count_running_mutations(node1, "test_ttl") == 6: while count_running_mutations(node1, "test_ttl") == 6:
print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl")) logging.debug(f"Mutations count after start TTL{count_running_mutations(node1, 'test_ttl')}")
rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip())) rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip()))
time.sleep(0.5) time.sleep(0.5)
@ -85,17 +84,17 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
assert sum([1 for count in rows_count if count == 30]) > 4 assert sum([1 for count in rows_count if count == 30]) > 4
assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0") assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0")
node1.query("DROP TABLE test_ttl SYNC")
def test_limited_ttl_merges_in_empty_pool(started_cluster): def test_limited_ttl_merges_in_empty_pool(started_cluster):
node1.query("DROP TABLE IF EXISTS test_ttl_v2")
node1.query( node1.query(
"CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") "CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0")
node1.query("SYSTEM STOP TTL MERGES") node1.query("SYSTEM STOP TTL MERGES")
for i in range(100): for i in range(100):
node1.query("INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) node1.query(f"INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)")
assert node1.query("SELECT COUNT() FROM test_ttl_v2") == "100\n" assert node1.query("SELECT COUNT() FROM test_ttl_v2") == "100\n"
@ -109,17 +108,17 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster):
break break
assert max(merges_with_ttl_count) <= 2 assert max(merges_with_ttl_count) <= 2
node1.query("DROP TABLE test_ttl_v2 SYNC")
def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster):
node1.query("DROP TABLE IF EXISTS replicated_ttl")
node1.query( node1.query(
"CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") "CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0")
node1.query("SYSTEM STOP TTL MERGES") node1.query("SYSTEM STOP TTL MERGES")
for i in range(100): for i in range(100):
node1.query_with_retry("INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) node1.query_with_retry(f"INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)")
assert node1.query("SELECT COUNT() FROM replicated_ttl") == "100\n" assert node1.query("SELECT COUNT() FROM replicated_ttl") == "100\n"
@ -137,12 +136,11 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster):
assert max(merges_with_ttl_count) <= 2 assert max(merges_with_ttl_count) <= 2
assert max(entries_with_ttl_count) <= 1 assert max(entries_with_ttl_count) <= 1
node1.query("DROP TABLE replicated_ttl SYNC")
def test_limited_ttl_merges_two_replicas(started_cluster): def test_limited_ttl_merges_two_replicas(started_cluster):
# Actually this test quite fast and often we cannot catch any merges. # Actually this test quite fast and often we cannot catch any merges.
node1.query("DROP TABLE IF EXISTS replicated_ttl_2")
node2.query("DROP TABLE IF EXISTS replicated_ttl_2")
node1.query( node1.query(
"CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0")
node2.query( node2.query(
@ -153,7 +151,7 @@ def test_limited_ttl_merges_two_replicas(started_cluster):
for i in range(100): for i in range(100):
node1.query_with_retry( node1.query_with_retry(
"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(10000)".format(i)) f"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(10000)")
node2.query("SYSTEM SYNC REPLICA replicated_ttl_2", timeout=10) node2.query("SYSTEM SYNC REPLICA replicated_ttl_2", timeout=10)
assert node1.query("SELECT COUNT() FROM replicated_ttl_2") == "1000000\n" assert node1.query("SELECT COUNT() FROM replicated_ttl_2") == "1000000\n"
@ -176,3 +174,6 @@ def test_limited_ttl_merges_two_replicas(started_cluster):
# check them # check them
assert max(merges_with_ttl_count_node1) <= 2 assert max(merges_with_ttl_count_node1) <= 2
assert max(merges_with_ttl_count_node2) <= 2 assert max(merges_with_ttl_count_node2) <= 2
node1.query("DROP TABLE replicated_ttl_2 SYNC")
node2.query("DROP TABLE replicated_ttl_2 SYNC")

View File

@ -1,7 +1,8 @@
import random import random
import string import string
import logging
import pytest import pytest
import time
from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__) cluster = ClickHouseCluster(__file__)
@ -130,6 +131,9 @@ def test_default_codec_single(start_cluster):
assert node1.query("SELECT COUNT() FROM compression_table") == "3\n" assert node1.query("SELECT COUNT() FROM compression_table") == "3\n"
assert node2.query("SELECT COUNT() FROM compression_table") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table") == "3\n"
node1.query("DROP TABLE compression_table SYNC")
node2.query("DROP TABLE compression_table SYNC")
def test_default_codec_multiple(start_cluster): def test_default_codec_multiple(start_cluster):
for i, node in enumerate([node1, node2]): for i, node in enumerate([node1, node2]):
@ -199,6 +203,9 @@ def test_default_codec_multiple(start_cluster):
assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n"
assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n"
node1.query("DROP TABLE compression_table_multiple SYNC")
node2.query("DROP TABLE compression_table_multiple SYNC")
def test_default_codec_version_update(start_cluster): def test_default_codec_version_update(start_cluster):
node3.query(""" node3.query("""
@ -212,8 +219,10 @@ def test_default_codec_version_update(start_cluster):
node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048))) node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048)))
node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048))) node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048)))
old_version = node3.query("SELECT version()")
node3.restart_with_latest_version() node3.restart_with_latest_version()
new_version = node3.query("SELECT version()")
logging.debug(f"Updated from {old_version} to {new_version}")
assert node3.query( assert node3.query(
"SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n" "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n"
assert node3.query( assert node3.query(
@ -230,6 +239,16 @@ def test_default_codec_version_update(start_cluster):
assert node3.query( assert node3.query(
"SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n" "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n"
node3.query("DROP TABLE compression_table SYNC")
def callback(n):
n.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system '], user='root')
node3.restart_with_original_version(callback_onstop=callback)
cur_version = node3.query("SELECT version()")
logging.debug(f"End with {cur_version}")
def test_default_codec_for_compact_parts(start_cluster): def test_default_codec_for_compact_parts(start_cluster):
node4.query(""" node4.query("""
CREATE TABLE compact_parts_table ( CREATE TABLE compact_parts_table (
@ -254,3 +273,4 @@ def test_default_codec_for_compact_parts(start_cluster):
node4.query("ATTACH TABLE compact_parts_table") node4.query("ATTACH TABLE compact_parts_table")
assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n" assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n"
node4.query("DROP TABLE compact_parts_table SYNC")

View File

@ -0,0 +1,2 @@
dictionaries/*
!.gitignore

Some files were not shown because too many files have changed in this diff Show More