diff --git a/contrib/llvm-cmake/CMakeLists.txt b/contrib/llvm-cmake/CMakeLists.txt
index 6ff07f0e016..87c8a65510f 100644
--- a/contrib/llvm-cmake/CMakeLists.txt
+++ b/contrib/llvm-cmake/CMakeLists.txt
@@ -1,12 +1,9 @@
-# During cross-compilation in our CI we have to use llvm-tblgen and other building tools
-# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
-# Possible workaround is to use llvm-tblgen from some package...
-# But lets just enable LLVM for native builds
-if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
- set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
+if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
+ set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
else()
- set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
+ set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
endif()
+
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
if (NOT ENABLE_EMBEDDED_COMPILER)
diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md
index a0acda5d5c6..b70cd225cdd 100644
--- a/docs/en/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md
@@ -688,7 +688,7 @@ Tags:
- `volume_name_N` — Volume name. Volume names must be unique.
- `disk` — a disk within a volume.
- `max_data_part_size_bytes` — the maximum size of a part that can be stored on any of the volume’s disks. If the a size of a merged part estimated to be bigger than `max_data_part_size_bytes` then this part will be written to a next volume. Basically this feature allows to keep new/small parts on a hot (SSD) volume and move them to a cold (HDD) volume when they reach large size. Do not use this setting if your policy has only one volume.
-- `move_factor` — when the amount of available space gets lower than this factor, data automatically start to move on the next volume if any (by default, 0.1).
+- `move_factor` — when the amount of available space gets lower than this factor, data automatically starts to move on the next volume if any (by default, 0.1). ClickHouse sorts existing parts by size from largest to smallest (in descending order) and selects parts with the total size that is sufficient to meet the `move_factor` condition. If the total size of all parts is insufficient, all parts will be moved.
- `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
Cofiguration examples:
diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md
index dce7938f98b..ab972c72345 100644
--- a/docs/en/operations/named-collections.md
+++ b/docs/en/operations/named-collections.md
@@ -36,6 +36,7 @@ Example of configuration:
AKIAIOSFODNN7EXAMPLE
wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
CSV
+ https://s3.us-east-1.amazonaws.com/yourbucket/mydata/
@@ -44,12 +45,12 @@ Example of configuration:
### Example of using named connections with the s3 function
```sql
-INSERT INTO FUNCTION s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz',
+INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz',
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
SELECT * FROM numbers(10000);
SELECT count()
-FROM s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz')
+FROM s3(s3_mydata, filename = 'test_file.tsv.gz')
┌─count()─┐
│ 10000 │
diff --git a/docs/ja/development/developer-instruction.md b/docs/ja/development/developer-instruction.md
index c95dc0e2ea4..48afc77237c 100644
--- a/docs/ja/development/developer-instruction.md
+++ b/docs/ja/development/developer-instruction.md
@@ -273,7 +273,7 @@ GitHubのUIでforkリポジトリに移動します。 ブランチで開発し
プル要求は、作業がまだ完了していない場合でも作成できます。 この場合、単語を入れてください “WIP” (進行中の作業)タイトルの先頭に、それは後で変更することができます。 これは、変更の協調的なレビューと議論、および利用可能なすべてのテストの実行に役立ちます。 変更の簡単な説明を提供することが重要です。
-Yandexの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
+ClickHouseの従業員がタグであなたのPRにラベルを付けるとすぐにテストが開始されます “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
システムは、プル要求用にClickHouseバイナリビルドを個別に準備します。 これらのビルドを取得するには “Details” 次のリンク “ClickHouse build check” 小切手のリストのエントリ。 そこには、ビルドへの直接リンクがあります。ClickHouseのdebパッケージは、本番サーバーにも展開できます(恐れがない場合)。
diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md
index 964d39163d8..5b6740e88bb 100644
--- a/docs/ru/development/developer-instruction.md
+++ b/docs/ru/development/developer-instruction.md
@@ -72,11 +72,11 @@ ClickHouse не работает и не собирается на 32-битны
Этот вариант не подходит для отправки изменений на сервер. Вы можете временно его использовать, а затем добавить ssh ключи и заменить адрес репозитория с помощью команды `git remote`.
-Вы можете также добавить для своего локального репозитория адрес оригинального репозитория Яндекса, чтобы притягивать оттуда обновления:
+Вы можете также добавить для своего локального репозитория адрес оригинального репозитория, чтобы притягивать оттуда обновления:
git remote add upstream git@github.com:ClickHouse/ClickHouse.git
-После этого, вы сможете добавлять в свой репозиторий обновления из репозитория Яндекса с помощью команды `git pull upstream master`.
+После этого, вы сможете добавлять в свой репозиторий обновления из репозитория ClickHouse с помощью команды `git pull upstream master`.
### Работа с сабмодулями Git {#rabota-s-sabmoduliami-git}
@@ -288,7 +288,7 @@ sudo ./llvm.sh 12
Pull request можно создать, даже если работа над задачей ещё не завершена. В этом случае, добавьте в его название слово «WIP» (work in progress). Название можно будет изменить позже. Это полезно для совместного просмотра и обсуждения изменений, а также для запуска всех имеющихся тестов. Введите краткое описание изменений - впоследствии, оно будет использовано для релизных changelog.
-Тесты будут запущены, как только сотрудники Яндекса поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа.
+Тесты будут запущены, как только сотрудники ClickHouse поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа.
Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
index 3f140f85396..c75fa8e92ce 100644
--- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
@@ -678,7 +678,7 @@ TTL d + INTERVAL 1 MONTH GROUP BY k1, k2 SET x = max(x), y = min(y);
- `volume_name_N` — название тома. Названия томов должны быть уникальны.
- `disk` — диск, находящийся внутри тома.
- `max_data_part_size_bytes` — максимальный размер куска данных, который может находится на любом из дисков этого тома. Если в результате слияния размер куска ожидается больше, чем max_data_part_size_bytes, то этот кусок будет записан в следующий том. В основном эта функция позволяет хранить новые / мелкие куски на горячем (SSD) томе и перемещать их на холодный (HDD) том, когда они достигают большого размера. Не используйте этот параметр, если политика имеет только один том.
-- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1).
+- `move_factor` — доля доступного свободного места на томе, если места становится меньше, то данные начнут перемещение на следующий том, если он есть (по умолчанию 0.1). Для перемещения куски сортируются по размеру от большего к меньшему (по убыванию) и выбираются куски, совокупный размер которых достаточен для соблюдения условия `move_factor`, если совокупный размер всех партов недостаточен, будут перемещены все парты.
- `prefer_not_to_merge` — Отключает слияние кусков данных, хранящихся на данном томе. Если данная настройка включена, то слияние данных, хранящихся на данном томе, не допускается. Это позволяет контролировать работу ClickHouse с медленными дисками.
Примеры конфигураций:
diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md
index bd7a197f926..7ade3ad57fb 100644
--- a/docs/zh/development/developer-instruction.md
+++ b/docs/zh/development/developer-instruction.md
@@ -259,7 +259,7 @@ ClickHouse的架构描述可以在此处查看:https://clickhouse.com/docs/en/
即使工作尚未完成,也可以创建拉取请求。在这种情况下,请在标题的开头加上«WIP»(正在进行中),以便后续更改。这对于协同审查和讨论更改以及运行所有可用测试用例很有用。提供有关变更的简短描述很重要,这将在后续用于生成重新发布变更日志。
-Yandex成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。
+ClickHouse成员一旦在您的拉取请求上贴上«可以测试»标签,就会开始测试。一些初始检查项(例如,代码类型)的结果会在几分钟内反馈。构建的检查结果将在半小时内完成。而主要的测试用例集结果将在一小时内报告给您。
系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。
diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h
index f9fc5bb7ea2..accfa0ad33d 100644
--- a/src/Access/Common/AccessType.h
+++ b/src/Access/Common/AccessType.h
@@ -183,6 +183,7 @@ enum class AccessType
M(JDBC, "", GLOBAL, SOURCES) \
M(HDFS, "", GLOBAL, SOURCES) \
M(S3, "", GLOBAL, SOURCES) \
+ M(HIVE, "", GLOBAL, SOURCES) \
M(SOURCES, "", GROUP, ALL) \
\
M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index d2df0b37825..66cd2e75bfc 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -220,7 +220,7 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
}
-std::atomic_flag exit_on_signal = ATOMIC_FLAG_INIT;
+std::atomic_flag exit_on_signal;
class QueryInterruptHandler : private boost::noncopyable
{
diff --git a/src/Common/FileCache.cpp b/src/Common/FileCache.cpp
index dffa4fac44d..d648267b95d 100644
--- a/src/Common/FileCache.cpp
+++ b/src/Common/FileCache.cpp
@@ -241,6 +241,10 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
std::lock_guard cache_lock(mutex);
+#ifndef NDEBUG
+ assertCacheCorrectness(key, cache_lock);
+#endif
+
/// Get all segments which intersect with the given range.
auto file_segments = getImpl(key, range, cache_lock);
@@ -315,7 +319,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
const Key & key, size_t offset, size_t size, FileSegment::State state,
- std::lock_guard & /* cache_lock */)
+ std::lock_guard & cache_lock)
{
/// Create a file segment cell and put it in `files` map by [key][offset].
@@ -323,8 +327,10 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
return nullptr; /// Empty files are not cached.
if (files[key].contains(offset))
- throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
- "Cache already exists for key: `{}`, offset: {}, size: {}", keyToStr(key), offset, size);
+ throw Exception(
+ ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Cache already exists for key: `{}`, offset: {}, size: {}.\nCurrent cache structure: {}",
+ keyToStr(key), offset, size, dumpStructureImpl(key, cache_lock));
auto file_segment = std::make_shared(offset, size, key, this, state);
FileSegmentCell cell(std::move(file_segment), queue);
@@ -340,8 +346,10 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
auto [it, inserted] = offsets.insert({offset, std::move(cell)});
if (!inserted)
- throw Exception(ErrorCodes::LOGICAL_ERROR,
- "Failed to insert into cache key: `{}`, offset: {}, size: {}", keyToStr(key), offset, size);
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Failed to insert into cache key: `{}`, offset: {}, size: {}",
+ keyToStr(key), offset, size);
return &(it->second);
}
@@ -523,8 +531,8 @@ void LRUFileCache::loadCacheInfoIntoMemory()
std::lock_guard cache_lock(mutex);
Key key;
- UInt64 offset;
- size_t size;
+ UInt64 offset = 0;
+ size_t size = 0;
std::vector cells;
/// cache_base_path / key_prefix / key / offset
@@ -687,22 +695,32 @@ LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRU
}
}
-String LRUFileCache::dumpStructure(const Key & key_)
+String LRUFileCache::dumpStructure(const Key & key)
{
std::lock_guard cache_lock(mutex);
+ return dumpStructureImpl(key, cache_lock);
+}
+String LRUFileCache::dumpStructureImpl(const Key & key, std::lock_guard & /* cache_lock */)
+{
WriteBufferFromOwnString result;
- for (auto it = queue.begin(); it != queue.end(); ++it)
- {
- auto [key, offset] = *it;
- if (key == key_)
- {
- auto * cell = getCell(key, offset, cache_lock);
- result << (it != queue.begin() ? ", " : "") << cell->file_segment->range().toString();
- result << "(state: " << cell->file_segment->download_state << ")";
- }
- }
+ const auto & cells_by_offset = files[key];
+
+ for (const auto & [offset, cell] : cells_by_offset)
+ result << cell.file_segment->getInfoForLog() << "\n";
+
return result.str();
}
+void LRUFileCache::assertCacheCorrectness(const Key & key, std::lock_guard & /* cache_lock */)
+{
+ const auto & cells_by_offset = files[key];
+
+ for (const auto & [_, cell] : cells_by_offset)
+ {
+ const auto & file_segment = cell.file_segment;
+ file_segment->assertCorrectness();
+ }
+}
+
}
diff --git a/src/Common/FileCache.h b/src/Common/FileCache.h
index d51dfe7a9ff..d58711cef0a 100644
--- a/src/Common/FileCache.h
+++ b/src/Common/FileCache.h
@@ -25,6 +25,7 @@ namespace DB
class IFileCache : private boost::noncopyable
{
friend class FileSegment;
+friend struct FileSegmentsHolder;
public:
using Key = UInt128;
@@ -196,6 +197,8 @@ private:
FileSegments splitRangeIntoEmptyCells(
const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock);
+ String dumpStructureImpl(const Key & key_, std::lock_guard & cache_lock);
+
public:
struct Stat
{
@@ -208,6 +211,7 @@ public:
Stat getStat();
String dumpStructure(const Key & key_) override;
+ void assertCacheCorrectness(const Key & key, std::lock_guard & cache_lock);
};
}
diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp
index ff4d114c71b..748074c75c2 100644
--- a/src/Common/FileSegment.cpp
+++ b/src/Common/FileSegment.cpp
@@ -159,7 +159,18 @@ void FileSegment::setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_)
remote_file_reader = remote_file_reader_;
}
-void FileSegment::write(const char * from, size_t size)
+void FileSegment::resetRemoteFileReader()
+{
+ if (!isDownloader())
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Only downloader can use remote filesystem file reader");
+
+ if (!remote_file_reader)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Remote file reader does not exist");
+
+ remote_file_reader.reset();
+}
+
+void FileSegment::write(const char * from, size_t size, size_t offset_)
{
if (!size)
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing zero size is not allowed");
@@ -174,8 +185,24 @@ void FileSegment::write(const char * from, size_t size)
"Only downloader can do the downloading. (CallerId: {}, DownloaderId: {})",
getCallerId(), downloader_id);
+ if (downloaded_size == range().size())
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Attempt to write {} bytes to offset: {}, but current file segment is already fully downloaded",
+ size, offset_);
+
+ auto download_offset = range().left + downloaded_size;
+ if (offset_ != download_offset)
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Attempt to write {} bytes to offset: {}, but current download offset is {}",
+ size, offset_, download_offset);
+
if (!cache_writer)
{
+ if (downloaded_size > 0)
+ throw Exception(ErrorCodes::LOGICAL_ERROR,
+ "Cache writer was finalized (downloaded size: {}, state: {})",
+ downloaded_size, stateToString(download_state));
+
auto download_path = cache->getPathInLocalCache(key(), offset());
cache_writer = std::make_unique(download_path);
}
@@ -190,19 +217,26 @@ void FileSegment::write(const char * from, size_t size)
downloaded_size += size;
}
- catch (...)
+ catch (Exception & e)
{
std::lock_guard segment_lock(mutex);
- LOG_ERROR(log, "Failed to write to cache. File segment info: {}", getInfoForLogImpl(segment_lock));
+ auto info = getInfoForLogImpl(segment_lock);
+ e.addMessage("while writing into cache, info: " + info);
+
+ LOG_ERROR(log, "Failed to write to cache. File segment info: {}", info);
download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
cache_writer->finalize();
cache_writer.reset();
+ cv.notify_all();
+
throw;
}
+
+ assert(getDownloadOffset() == offset_ + size);
}
FileSegment::State FileSegment::wait()
@@ -270,7 +304,6 @@ void FileSegment::setDownloaded(std::lock_guard & /* segment_lock */
download_state = State::DOWNLOADED;
is_downloaded = true;
- assert(cache_writer);
if (cache_writer)
{
cache_writer->finalize();
@@ -299,107 +332,125 @@ void FileSegment::completeBatchAndResetDownloader()
void FileSegment::complete(State state)
{
- {
- std::lock_guard segment_lock(mutex);
-
- bool is_downloader = downloader_id == getCallerId();
- if (!is_downloader)
- {
- cv.notify_all();
- throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
- "File segment can be completed only by downloader or downloader's FileSegmentsHodler");
- }
-
- if (state != State::DOWNLOADED
- && state != State::PARTIALLY_DOWNLOADED
- && state != State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
- {
- cv.notify_all();
- throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
- "Cannot complete file segment with state: {}", stateToString(state));
- }
-
- download_state = state;
- }
-
- completeImpl();
- cv.notify_all();
-}
-
-void FileSegment::complete()
-{
- {
- std::lock_guard segment_lock(mutex);
-
- if (download_state == State::SKIP_CACHE || detached)
- return;
-
- if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
- setDownloaded(segment_lock);
-
- if (download_state == State::DOWNLOADING || download_state == State::EMPTY)
- download_state = State::PARTIALLY_DOWNLOADED;
- }
-
- completeImpl(true);
- cv.notify_all();
-}
-
-void FileSegment::completeImpl(bool allow_non_strict_checking)
-{
- /// cache lock is always taken before segment lock.
std::lock_guard cache_lock(cache->mutex);
std::lock_guard segment_lock(mutex);
- bool download_can_continue = false;
-
- if (download_state == State::PARTIALLY_DOWNLOADED
- || download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
+ bool is_downloader = downloader_id == getCallerId();
+ if (!is_downloader)
{
- bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
- download_can_continue = !is_last_holder && download_state == State::PARTIALLY_DOWNLOADED;
+ cv.notify_all();
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "File segment can be completed only by downloader or downloader's FileSegmentsHodler");
+ }
- if (!download_can_continue)
+ if (state != State::DOWNLOADED
+ && state != State::PARTIALLY_DOWNLOADED
+ && state != State::PARTIALLY_DOWNLOADED_NO_CONTINUATION)
+ {
+ cv.notify_all();
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Cannot complete file segment with state: {}", stateToString(state));
+ }
+
+ download_state = state;
+
+ try
+ {
+ completeImpl(cache_lock, segment_lock);
+ }
+ catch (...)
+ {
+ if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
+ downloader_id.clear();
+
+ cv.notify_all();
+ throw;
+ }
+
+ cv.notify_all();
+}
+
+void FileSegment::complete(std::lock_guard & cache_lock)
+{
+ std::lock_guard segment_lock(mutex);
+
+ if (download_state == State::SKIP_CACHE || detached)
+ return;
+
+ if (download_state != State::DOWNLOADED && getDownloadedSize(segment_lock) == range().size())
+ setDownloaded(segment_lock);
+
+ if (download_state == State::DOWNLOADING || download_state == State::EMPTY)
+ {
+ /// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the
+ /// downloader or the only owner of the segment.
+
+ bool can_update_segment_state = downloader_id == getCallerIdImpl(true)
+ || cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
+
+ if (can_update_segment_state)
+ download_state = State::PARTIALLY_DOWNLOADED;
+ }
+
+ try
+ {
+ completeImpl(cache_lock, segment_lock, /* allow_non_strict_checking */true);
+ }
+ catch (...)
+ {
+ if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
+ downloader_id.clear();
+
+ cv.notify_all();
+ throw;
+ }
+
+ cv.notify_all();
+}
+
+void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock, bool allow_non_strict_checking)
+{
+ bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
+
+ if (is_last_holder
+ && (download_state == State::PARTIALLY_DOWNLOADED || download_state == State::PARTIALLY_DOWNLOADED_NO_CONTINUATION))
+ {
+ size_t current_downloaded_size = getDownloadedSize(segment_lock);
+ if (current_downloaded_size == 0)
{
- size_t current_downloaded_size = getDownloadedSize(segment_lock);
- if (current_downloaded_size == 0)
- {
- download_state = State::SKIP_CACHE;
- LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString());
- cache->remove(key(), offset(), cache_lock, segment_lock);
+ download_state = State::SKIP_CACHE;
+ LOG_TEST(log, "Remove cell {} (nothing downloaded)", range().toString());
+ cache->remove(key(), offset(), cache_lock, segment_lock);
+ }
+ else
+ {
+ /**
+ * Only last holder of current file segment can resize the cell,
+ * because there is an invariant that file segments returned to users
+ * in FileSegmentsHolder represent a contiguous range, so we can resize
+ * it only when nobody needs it.
+ */
+ LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size);
+ cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock);
+ }
- detached = true;
- }
- else if (is_last_holder)
- {
- /**
- * Only last holder of current file segment can resize the cell,
- * because there is an invariant that file segments returned to users
- * in FileSegmentsHolder represent a contiguous range, so we can resize
- * it only when nobody needs it.
- */
- LOG_TEST(log, "Resize cell {} to downloaded: {}", range().toString(), current_downloaded_size);
- cache->reduceSizeToDownloaded(key(), offset(), cache_lock, segment_lock);
+ detached = true;
- detached = true;
- }
+ if (cache_writer)
+ {
+ cache_writer->finalize();
+ cache_writer.reset();
+ remote_file_reader.reset();
}
}
- if (!downloader_id.empty() && downloader_id == getCallerIdImpl(allow_non_strict_checking))
+ if (!downloader_id.empty() && (downloader_id == getCallerIdImpl(allow_non_strict_checking) || is_last_holder))
{
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
downloader_id.clear();
}
- if (!download_can_continue && cache_writer)
- {
- cache_writer->finalize();
- cache_writer.reset();
- remote_file_reader.reset();
- }
-
- assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
+ assertCorrectnessImpl(segment_lock);
}
String FileSegment::getInfoForLog() const
@@ -440,6 +491,56 @@ String FileSegment::stateToString(FileSegment::State state)
__builtin_unreachable();
}
+void FileSegment::assertCorrectness() const
+{
+ std::lock_guard segment_lock(mutex);
+ assertCorrectnessImpl(segment_lock);
+}
+
+void FileSegment::assertCorrectnessImpl(std::lock_guard & /* segment_lock */) const
+{
+ assert(downloader_id.empty() == (download_state != FileSegment::State::DOWNLOADING));
+ assert(!downloader_id.empty() == (download_state == FileSegment::State::DOWNLOADING));
+ assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
+}
+
+FileSegmentsHolder::~FileSegmentsHolder()
+{
+ /// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from
+ /// FileSegmentsHolder right after calling file_segment->complete(), so on destruction here
+ /// remain only uncompleted file segments.
+
+ IFileCache * cache = nullptr;
+
+ for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();)
+ {
+ auto current_file_segment_it = file_segment_it;
+ auto & file_segment = *current_file_segment_it;
+
+ if (!cache)
+ cache = file_segment->cache;
+
+ try
+ {
+ /// File segment pointer must be reset right after calling complete() and
+ /// under the same mutex, because complete() checks for segment pointers.
+ std::lock_guard cache_lock(cache->mutex);
+
+ file_segment->complete(cache_lock);
+
+ file_segment_it = file_segments.erase(current_file_segment_it);
+ }
+ catch (...)
+ {
+#ifdef NDEBUG
+ tryLogCurrentException(__PRETTY_FUNCTION__);
+#else
+ throw;
+#endif
+ }
+ }
+}
+
String FileSegmentsHolder::toString()
{
String ranges;
diff --git a/src/Common/FileSegment.h b/src/Common/FileSegment.h
index f0c6c5de9b6..b7501640913 100644
--- a/src/Common/FileSegment.h
+++ b/src/Common/FileSegment.h
@@ -95,12 +95,14 @@ public:
bool reserve(size_t size);
- void write(const char * from, size_t size);
+ void write(const char * from, size_t size, size_t offset_);
RemoteFileReaderPtr getRemoteFileReader();
void setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_);
+ void resetRemoteFileReader();
+
String getOrSetDownloader();
String getDownloader() const;
@@ -121,16 +123,32 @@ public:
String getInfoForLog() const;
+ void assertCorrectness() const;
+
private:
size_t availableSize() const { return reserved_size - downloaded_size; }
- bool lastFileSegmentHolder() const;
- void complete();
- void completeImpl(bool allow_non_strict_checking = false);
- void setDownloaded(std::lock_guard & segment_lock);
- static String getCallerIdImpl(bool allow_non_strict_checking = false);
- void resetDownloaderImpl(std::lock_guard & segment_lock);
+
size_t getDownloadedSize(std::lock_guard & segment_lock) const;
String getInfoForLogImpl(std::lock_guard & segment_lock) const;
+ void assertCorrectnessImpl(std::lock_guard & segment_lock) const;
+
+ void setDownloaded(std::lock_guard & segment_lock);
+
+ bool lastFileSegmentHolder() const;
+
+ /// complete() without any completion state is called from destructor of
+ /// FileSegmentsHolder. complete() might check if the caller of the method
+ /// is the last alive holder of the segment. Therefore, complete() and destruction
+ /// of the file segment pointer must be done under the same cache mutex.
+ void complete(std::lock_guard & cache_lock);
+
+ void completeImpl(
+ std::lock_guard & cache_lock,
+ std::lock_guard & segment_lock, bool allow_non_strict_checking = false);
+
+ static String getCallerIdImpl(bool allow_non_strict_checking = false);
+
+ void resetDownloaderImpl(std::lock_guard & segment_lock);
const Range segment_range;
@@ -169,28 +187,7 @@ struct FileSegmentsHolder : private boost::noncopyable
explicit FileSegmentsHolder(FileSegments && file_segments_) : file_segments(std::move(file_segments_)) {}
FileSegmentsHolder(FileSegmentsHolder && other) : file_segments(std::move(other.file_segments)) {}
- ~FileSegmentsHolder()
- {
- /// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from
- /// FileSegmentsHolder right after calling file_segment->complete(), so on destruction here
- /// remain only uncompleted file segments.
-
- for (auto & segment : file_segments)
- {
- try
- {
- segment->complete();
- }
- catch (...)
- {
-#ifndef NDEBUG
- throw;
-#else
- tryLogCurrentException(__PRETTY_FUNCTION__);
-#endif
- }
- }
- }
+ ~FileSegmentsHolder();
FileSegments file_segments{};
diff --git a/src/Common/getNumberOfPhysicalCPUCores.cpp b/src/Common/getNumberOfPhysicalCPUCores.cpp
index 2fc1dbf9669..1d26b522e64 100644
--- a/src/Common/getNumberOfPhysicalCPUCores.cpp
+++ b/src/Common/getNumberOfPhysicalCPUCores.cpp
@@ -38,21 +38,7 @@ unsigned getCGroupLimitedCPUCores(unsigned default_cpu_count)
quota_count = ceil(static_cast(cgroup_quota) / static_cast(cgroup_period));
}
- // Share number (typically a number relative to 1024) (2048 typically expresses 2 CPUs worth of processing)
- // -1 for no share setup
- int cgroup_share = read_from("/sys/fs/cgroup/cpu/cpu.shares", -1);
- // Convert 1024 to no shares setup
- if (cgroup_share == 1024)
- cgroup_share = -1;
-
-# define PER_CPU_SHARES 1024
- unsigned share_count = default_cpu_count;
- if (cgroup_share > -1)
- {
- share_count = ceil(static_cast(cgroup_share) / static_cast(PER_CPU_SHARES));
- }
-
- return std::min(default_cpu_count, std::min(share_count, quota_count));
+ return std::min(default_cpu_count, quota_count);
}
#endif // OS_LINUX
@@ -91,6 +77,7 @@ unsigned getNumberOfPhysicalCPUCores()
cpu_count = std::thread::hardware_concurrency();
#if defined(OS_LINUX)
+ /// TODO: add a setting for disabling that, similar to UseContainerSupport in java
cpu_count = getCGroupLimitedCPUCores(cpu_count);
#endif // OS_LINUX
return cpu_count;
diff --git a/src/Common/tests/gtest_lru_file_cache.cpp b/src/Common/tests/gtest_lru_file_cache.cpp
index d5a76f9daad..c08b12857a1 100644
--- a/src/Common/tests/gtest_lru_file_cache.cpp
+++ b/src/Common/tests/gtest_lru_file_cache.cpp
@@ -67,7 +67,7 @@ void download(DB::FileSegmentPtr file_segment)
fs::create_directories(subdir);
std::string data(size, '0');
- file_segment->write(data.data(), size);
+ file_segment->write(data.data(), size, file_segment->getDownloadOffset());
}
void prepareAndDownload(DB::FileSegmentPtr file_segment)
diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
index 4b614cd10e0..4766b838fda 100644
--- a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
+++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
@@ -63,7 +63,11 @@ void CachedReadBufferFromRemoteFS::initialize(size_t offset, size_t size)
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t offset) const
{
- return std::make_shared(cache->getPathInLocalCache(cache_key, offset), settings.local_fs_buffer_size);
+ auto path = cache->getPathInLocalCache(cache_key, offset);
+ auto buf = std::make_shared(path, settings.local_fs_buffer_size);
+ if (buf->size() == 0)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path);
+ return buf;
}
SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getRemoteFSReadBuffer(FileSegmentPtr & file_segment, ReadType read_type_)
@@ -96,7 +100,6 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getRemoteFSReadBuffer(FileSe
remote_fs_segment_reader = remote_file_reader_creator();
file_segment->setRemoteFileReader(remote_fs_segment_reader);
- ///TODO: add check for pending data
return remote_fs_segment_reader;
}
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
@@ -119,7 +122,6 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getReadBufferForFileSegment(
{
auto range = file_segment->range();
- /// Each wait() call has a timeout of 1 second.
size_t wait_download_max_tries = settings.remote_fs_cache_max_wait_sec;
size_t wait_download_tries = 0;
@@ -296,17 +298,21 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getImplementationBuffer(File
{
case ReadType::CACHED:
{
+#ifndef NDEBUG
+ auto * file_reader = assert_cast(read_buffer_for_file_segment.get());
+ size_t file_size = file_reader->size();
+
+ if (file_size == 0 || range.left + file_size <= file_offset_of_buffer_end)
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Unexpected state of cache file. Cache file size: {}, cache file offset: {}, "
+ "expected file size to be non-zero and file downloaded size to exceed current file read offset (expected: {} > {})",
+ file_size, range.left, range.left + file_size, file_offset_of_buffer_end);
+#endif
+
size_t seek_offset = file_offset_of_buffer_end - range.left;
read_buffer_for_file_segment->seek(seek_offset, SEEK_SET);
- auto * file_reader = assert_cast(read_buffer_for_file_segment.get());
- size_t file_size = file_reader->size();
- auto state = file_segment->state();
-
- LOG_TEST(log, "Cache file: {}. Cached seek to: {}, file size: {}, file segment state: {}, download offset: {}",
- file_reader->getFileName(), seek_offset, file_size, state, file_segment->getDownloadOffset());
-
- assert(file_size > 0);
break;
}
case ReadType::REMOTE_FS_READ_BYPASS_CACHE:
@@ -384,6 +390,7 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
LOG_TEST(log, "Bytes to predownload: {}, caller_id: {}", bytes_to_predownload, FileSegment::getCallerId());
assert(implementation_buffer->getFileOffsetOfBufferEnd() == file_segment->getDownloadOffset());
+ size_t current_offset = file_segment->getDownloadOffset();
while (true)
{
@@ -423,7 +430,11 @@ void CachedReadBufferFromRemoteFS::predownload(FileSegmentPtr & file_segment)
{
LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, implementation_buffer->buffer().size());
- file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size);
+ assert(file_segment->getDownloadOffset() == static_cast(implementation_buffer->getPosition()));
+
+ file_segment->write(implementation_buffer->buffer().begin(), current_predownload_size, current_offset);
+
+ current_offset += current_predownload_size;
bytes_to_predownload -= current_predownload_size;
implementation_buffer->position() += current_predownload_size;
@@ -537,13 +548,15 @@ bool CachedReadBufferFromRemoteFS::nextImpl()
}
catch (Exception & e)
{
- e.addMessage("Cache info: {}", getInfoForLog());
+ e.addMessage("Cache info: {}", nextimpl_step_log_info);
throw;
}
}
bool CachedReadBufferFromRemoteFS::nextImplStep()
{
+ last_caller_id = FileSegment::getCallerId();
+
if (IFileCache::shouldBypassCache())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Using cache when not allowed");
@@ -554,6 +567,9 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
return false;
SCOPE_EXIT({
+ /// Save state of current file segment before it is completed.
+ nextimpl_step_log_info = getInfoForLog();
+
if (current_file_segment_it == file_segments_holder->file_segments.end())
return;
@@ -623,6 +639,18 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
if (!result)
{
+#ifndef NDEBUG
+ if (auto * cache_file_reader = typeid_cast(implementation_buffer.get()))
+ {
+ auto cache_file_size = cache_file_reader->size();
+ if (cache_file_size == 0)
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Attempt to read from an empty cache file: {} (just before actual read)",
+ cache_file_size);
+ }
+#endif
+
result = implementation_buffer->next();
size = implementation_buffer->buffer().size();
}
@@ -635,7 +663,12 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
if (file_segment->reserve(size))
{
- file_segment->write(needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(), size);
+ assert(file_segment->getDownloadOffset() == static_cast(implementation_buffer->getPosition()));
+
+ file_segment->write(needed_to_predownload ? implementation_buffer->position() : implementation_buffer->buffer().begin(), size, file_offset_of_buffer_end);
+
+ assert(file_segment->getDownloadOffset() <= file_segment->range().right + 1);
+ assert(std::next(current_file_segment_it) == file_segments_holder->file_segments.end() || file_segment->getDownloadOffset() == implementation_buffer->getFileOffsetOfBufferEnd());
}
else
{
@@ -665,10 +698,15 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
}
}
+ /// - If last file segment was read from remote fs, then we read up to segment->range().right, but
+ /// the requested right boundary could be segment->range().left < requested_right_boundary < segment->range().right.
+ /// Therefore need to resize to a smaller size. And resize must be done after write into cache.
+ /// - If last file segment was read from local fs, then we could read more than file_segemnt->range().right, so resize is also needed.
if (std::next(current_file_segment_it) == file_segments_holder->file_segments.end())
{
size_t remaining_size_to_read = std::min(current_read_range.right, read_until_position - 1) - file_offset_of_buffer_end + 1;
size = std::min(size, remaining_size_to_read);
+ assert(implementation_buffer->buffer().size() >= nextimpl_working_buffer_offset + size);
implementation_buffer->buffer().resize(nextimpl_working_buffer_offset + size);
}
@@ -692,9 +730,16 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
read_until_position, first_offset, file_segments_holder->toString());
if (size == 0 && file_offset_of_buffer_end < read_until_position)
- throw Exception(ErrorCodes::LOGICAL_ERROR,
- "Having zero bytes, but range is not finished: file offset: {}, reading until: {}",
- file_offset_of_buffer_end, read_until_position);
+ {
+ std::optional cache_file_size;
+ if (auto * cache_file_reader = assert_cast(implementation_buffer.get()))
+ cache_file_size = cache_file_reader->size();
+
+ throw Exception(ErrorCodes::LOGICAL_ERROR,
+ "Having zero bytes, but range is not finished: file offset: {}, reading until: {}, read type: {}, cache file size: {}",
+ file_offset_of_buffer_end, read_until_position, toString(read_type), cache_file_size ? std::to_string(*cache_file_size) : "None");
+ }
+
return result;
}
@@ -757,12 +802,24 @@ std::optional CachedReadBufferFromRemoteFS::getLastNonDownloadedOffset()
String CachedReadBufferFromRemoteFS::getInfoForLog()
{
- return fmt::format("Buffer path: {}, hash key: {}, file_offset_of_buffer_end: {}, internal buffer remaining read range: {}, file segment info: {}",
- remote_fs_object_path, getHexUIntLowercase(cache_key), file_offset_of_buffer_end,
- (implementation_buffer ?
- std::to_string(implementation_buffer->getRemainingReadRange().left) + '-' + (implementation_buffer->getRemainingReadRange().right ? std::to_string(*implementation_buffer->getRemainingReadRange().right) : "None")
- : "None"),
- (current_file_segment_it == file_segments_holder->file_segments.end() ? "None" : (*current_file_segment_it)->getInfoForLog()));
+ auto implementation_buffer_read_range_str =
+ implementation_buffer ?
+ std::to_string(implementation_buffer->getRemainingReadRange().left)
+ + '-'
+ + (implementation_buffer->getRemainingReadRange().right ? std::to_string(*implementation_buffer->getRemainingReadRange().right) : "None")
+ : "None";
+
+ auto current_file_segment_info = current_file_segment_it == file_segments_holder->file_segments.end() ? "None" : (*current_file_segment_it)->getInfoForLog();
+
+ return fmt::format("Buffer path: {}, hash key: {}, file_offset_of_buffer_end: {}, internal buffer remaining read range: {}, "
+ "read_type: {}, last caller: {}, file segment info: {}",
+ remote_fs_object_path,
+ getHexUIntLowercase(cache_key),
+ file_offset_of_buffer_end,
+ implementation_buffer_read_range_str,
+ toString(read_type),
+ last_caller_id,
+ current_file_segment_info);
}
}
diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.h b/src/Disks/IO/CachedReadBufferFromRemoteFS.h
index 0e9dcc1033f..5fc9ec39246 100644
--- a/src/Disks/IO/CachedReadBufferFromRemoteFS.h
+++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.h
@@ -98,7 +98,10 @@ private:
}
__builtin_unreachable();
}
+
size_t first_offset = 0;
+ String nextimpl_step_log_info;
+ String last_caller_id;
};
}
diff --git a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
index bdb012a6376..af545d15c0e 100644
--- a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
+++ b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
@@ -68,16 +68,28 @@ std::future ThreadPoolRemoteFSReader::submit(Reques
auto * remote_fs_fd = assert_cast(request.descriptor.get());
Stopwatch watch(CLOCK_MONOTONIC);
- auto [bytes_read, offset] = remote_fs_fd->readInto(request.buf, request.size, request.offset, request.ignore);
+
+ ReadBufferFromRemoteFSGather::ReadResult result;
+ try
+ {
+ result = remote_fs_fd->readInto(request.buf, request.size, request.offset, request.ignore);
+ }
+ catch (...)
+ {
+ if (running_group)
+ CurrentThread::detachQuery();
+ throw;
+ }
+
watch.stop();
- ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
- ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, bytes_read);
-
if (running_group)
- thread_status.detachQuery();
+ CurrentThread::detachQuery();
- return Result{ .size = bytes_read, .offset = offset };
+ ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
+ ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size);
+
+ return Result{ .size = result.size, .offset = result.offset };
});
auto future = task->get_future();
diff --git a/src/Interpreters/ClientInfo.h b/src/Interpreters/ClientInfo.h
index 0b40c78becc..42c56d470af 100644
--- a/src/Interpreters/ClientInfo.h
+++ b/src/Interpreters/ClientInfo.h
@@ -69,6 +69,7 @@ public:
/// All below are parameters related to initial query.
Interface interface = Interface::TCP;
+ bool is_secure = false;
/// For tcp
String os_user;
diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp
index d74caf3e820..03eeb81f14d 100644
--- a/src/Interpreters/QueryLog.cpp
+++ b/src/Interpreters/QueryLog.cpp
@@ -86,6 +86,7 @@ NamesAndTypesList QueryLogElement::getNamesAndTypes()
{"initial_query_start_time", std::make_shared()},
{"initial_query_start_time_microseconds", std::make_shared(6)},
{"interface", std::make_shared()},
+ {"is_secure", std::make_shared()},
{"os_user", std::make_shared()},
{"client_hostname", std::make_shared()},
{"client_name", std::make_shared()},
@@ -279,6 +280,7 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
columns[i++]->insert(client_info.initial_query_start_time_microseconds);
columns[i++]->insert(UInt64(client_info.interface));
+ columns[i++]->insert(static_cast(client_info.is_secure));
columns[i++]->insert(client_info.os_user);
columns[i++]->insert(client_info.client_hostname);
diff --git a/src/Interpreters/QueryThreadLog.cpp b/src/Interpreters/QueryThreadLog.cpp
index d9feaf0a0c3..3b28df17999 100644
--- a/src/Interpreters/QueryThreadLog.cpp
+++ b/src/Interpreters/QueryThreadLog.cpp
@@ -56,6 +56,7 @@ NamesAndTypesList QueryThreadLogElement::getNamesAndTypes()
{"initial_query_start_time", std::make_shared()},
{"initial_query_start_time_microseconds", std::make_shared(6)},
{"interface", std::make_shared()},
+ {"is_secure", std::make_shared()},
{"os_user", std::make_shared()},
{"client_hostname", std::make_shared()},
{"client_name", std::make_shared()},
diff --git a/src/Interpreters/Session.cpp b/src/Interpreters/Session.cpp
index 2af9a2b6bbc..42ccd45f4de 100644
--- a/src/Interpreters/Session.cpp
+++ b/src/Interpreters/Session.cpp
@@ -243,7 +243,7 @@ void Session::shutdownNamedSessions()
NamedSessionsStorage::instance().shutdown();
}
-Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_)
+Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_, bool is_secure)
: auth_id(UUIDHelpers::generateV4()),
global_context(global_context_),
interface(interface_),
@@ -251,6 +251,7 @@ Session::Session(const ContextPtr & global_context_, ClientInfo::Interface inter
{
prepared_client_info.emplace();
prepared_client_info->interface = interface_;
+ prepared_client_info->is_secure = is_secure;
}
Session::~Session()
diff --git a/src/Interpreters/Session.h b/src/Interpreters/Session.h
index f937c73d1a8..d210f38fb88 100644
--- a/src/Interpreters/Session.h
+++ b/src/Interpreters/Session.h
@@ -32,7 +32,7 @@ public:
/// Stops using named sessions. The method must be called at the server shutdown.
static void shutdownNamedSessions();
- Session(const ContextPtr & global_context_, ClientInfo::Interface interface_);
+ Session(const ContextPtr & global_context_, ClientInfo::Interface interface_, bool is_secure = false);
~Session();
Session(const Session &&) = delete;
diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp
index c51201750c5..ef236388a04 100644
--- a/src/Parsers/ExpressionElementParsers.cpp
+++ b/src/Parsers/ExpressionElementParsers.cpp
@@ -505,32 +505,34 @@ namespace
bool parseExtract(IParser::Pos & pos, ASTPtr & node, Expected & expected)
{
- ASTPtr expr;
-
+ IParser::Pos begin = pos;
IntervalKind interval_kind;
- if (!parseIntervalKind(pos, expected, interval_kind))
- {
- ASTPtr expr_list;
- if (!ParserExpressionList(false, false).parse(pos, expr_list, expected))
- return false;
- auto res = std::make_shared();
- res->name = "extract";
- res->arguments = expr_list;
- res->children.push_back(res->arguments);
- node = std::move(res);
- return true;
+ if (parseIntervalKind(pos, expected, interval_kind))
+ {
+ ASTPtr expr;
+
+ ParserKeyword s_from("FROM");
+ ParserExpression elem_parser;
+
+ if (s_from.ignore(pos, expected) && elem_parser.parse(pos, expr, expected))
+ {
+ node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), expr);
+ return true;
+ }
}
- ParserKeyword s_from("FROM");
- if (!s_from.ignore(pos, expected))
+ pos = begin;
+
+ ASTPtr expr_list;
+ if (!ParserExpressionList(false, false).parse(pos, expr_list, expected))
return false;
- ParserExpression elem_parser;
- if (!elem_parser.parse(pos, expr, expected))
- return false;
-
- node = makeASTFunction(interval_kind.toNameOfFunctionExtractTimePart(), expr);
+ auto res = std::make_shared();
+ res->name = "extract";
+ res->arguments = expr_list;
+ res->children.push_back(res->arguments);
+ node = std::move(res);
return true;
}
diff --git a/src/Processors/Formats/IInputFormat.h b/src/Processors/Formats/IInputFormat.h
index 99d25d87b73..9774f630f15 100644
--- a/src/Processors/Formats/IInputFormat.h
+++ b/src/Processors/Formats/IInputFormat.h
@@ -17,7 +17,7 @@ struct ColumnMapping
OptionalIndexes column_indexes_for_input_fields;
/// The list of column indexes that are not presented in input data.
- std::vector not_presented_columns;
+ std::vector not_presented_columns;
/// The list of column names in input data. Needed for better exception messages.
std::vector names_of_columns;
diff --git a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp
index 7720b01dc74..0735f927c6a 100644
--- a/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp
+++ b/src/Processors/Formats/RowInputFormatWithNamesAndTypes.cpp
@@ -98,8 +98,6 @@ void RowInputFormatWithNamesAndTypes::readPrefix()
/// Skip prefix before names and types.
format_reader->skipPrefixBeforeHeader();
- /// This is a bit of abstraction leakage, but we need it in parallel parsing:
- /// we check if this InputFormat is working with the "real" beginning of the data.
if (with_names)
{
if (format_settings.with_names_use_header)
diff --git a/src/QueryPipeline/Pipe.cpp b/src/QueryPipeline/Pipe.cpp
index 6cef7cc28bd..551841524b3 100644
--- a/src/QueryPipeline/Pipe.cpp
+++ b/src/QueryPipeline/Pipe.cpp
@@ -759,44 +759,6 @@ void Pipe::setSinks(const Pipe::ProcessorGetterWithStreamKind & getter)
header.clear();
}
-void Pipe::setOutputFormat(ProcessorPtr output)
-{
- if (output_ports.empty())
- throw Exception("Cannot set output format to empty Pipe.", ErrorCodes::LOGICAL_ERROR);
-
- if (output_ports.size() != 1)
- throw Exception("Cannot set output format to Pipe because single output port is expected, "
- "but it has " + std::to_string(output_ports.size()) + " ports", ErrorCodes::LOGICAL_ERROR);
-
- auto * format = dynamic_cast(output.get());
-
- if (!format)
- throw Exception("IOutputFormat processor expected for QueryPipelineBuilder::setOutputFormat.",
- ErrorCodes::LOGICAL_ERROR);
-
- auto & main = format->getPort(IOutputFormat::PortKind::Main);
- auto & totals = format->getPort(IOutputFormat::PortKind::Totals);
- auto & extremes = format->getPort(IOutputFormat::PortKind::Extremes);
-
- if (!totals_port)
- addTotalsSource(std::make_shared(totals.getHeader()));
-
- if (!extremes_port)
- addExtremesSource(std::make_shared(extremes.getHeader()));
-
- if (collected_processors)
- collected_processors->emplace_back(output);
-
- processors.emplace_back(std::move(output));
-
- connect(*output_ports.front(), main);
- connect(*totals_port, totals);
- connect(*extremes_port, extremes);
-
- output_ports.clear();
- header.clear();
-}
-
void Pipe::transform(const Transformer & transformer)
{
if (output_ports.empty())
diff --git a/src/QueryPipeline/Pipe.h b/src/QueryPipeline/Pipe.h
index 613e92a782d..bc19b8389b3 100644
--- a/src/QueryPipeline/Pipe.h
+++ b/src/QueryPipeline/Pipe.h
@@ -141,7 +141,6 @@ private:
bool isCompleted() const { return !empty() && output_ports.empty(); }
static Pipe unitePipes(Pipes pipes, Processors * collected_processors, bool allow_empty_header);
void setSinks(const Pipe::ProcessorGetterWithStreamKind & getter);
- void setOutputFormat(ProcessorPtr output);
friend class QueryPipelineBuilder;
friend class QueryPipeline;
diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp
index dba7c7cb8f7..fcd3105a422 100644
--- a/src/QueryPipeline/QueryPipelineBuilder.cpp
+++ b/src/QueryPipeline/QueryPipelineBuilder.cpp
@@ -8,7 +8,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -247,21 +246,6 @@ void QueryPipelineBuilder::addExtremesTransform()
pipe.addTransform(std::move(transform), nullptr, port);
}
-void QueryPipelineBuilder::setOutputFormat(ProcessorPtr output)
-{
- checkInitializedAndNotCompleted();
-
- if (output_format)
- throw Exception("QueryPipeline already has output.", ErrorCodes::LOGICAL_ERROR);
-
- resize(1);
-
- output_format = dynamic_cast(output.get());
- pipe.setOutputFormat(std::move(output));
-
- initRowsBeforeLimit();
-}
-
QueryPipelineBuilder QueryPipelineBuilder::unitePipelines(
std::vector> pipelines,
size_t max_threads_limit,
@@ -461,93 +445,6 @@ void QueryPipelineBuilder::setProcessListElement(QueryStatus * elem)
}
}
-void QueryPipelineBuilder::initRowsBeforeLimit()
-{
- RowsBeforeLimitCounterPtr rows_before_limit_at_least;
-
- /// TODO: add setRowsBeforeLimitCounter as virtual method to IProcessor.
- std::vector limits;
- std::vector remote_sources;
-
- std::unordered_set visited;
-
- struct QueuedEntry
- {
- IProcessor * processor;
- bool visited_limit;
- };
-
- std::queue queue;
-
- queue.push({ output_format, false });
- visited.emplace(output_format);
-
- while (!queue.empty())
- {
- auto * processor = queue.front().processor;
- auto visited_limit = queue.front().visited_limit;
- queue.pop();
-
- if (!visited_limit)
- {
- if (auto * limit = typeid_cast(processor))
- {
- visited_limit = true;
- limits.emplace_back(limit);
- }
-
- if (auto * source = typeid_cast(processor))
- remote_sources.emplace_back(source);
- }
- else if (auto * sorting = typeid_cast(processor))
- {
- if (!rows_before_limit_at_least)
- rows_before_limit_at_least = std::make_shared();
-
- sorting->setRowsBeforeLimitCounter(rows_before_limit_at_least);
-
- /// Don't go to children. Take rows_before_limit from last PartialSortingTransform.
- continue;
- }
-
- /// Skip totals and extremes port for output format.
- if (auto * format = dynamic_cast(processor))
- {
- auto * child_processor = &format->getPort(IOutputFormat::PortKind::Main).getOutputPort().getProcessor();
- if (visited.emplace(child_processor).second)
- queue.push({ child_processor, visited_limit });
-
- continue;
- }
-
- for (auto & child_port : processor->getInputs())
- {
- auto * child_processor = &child_port.getOutputPort().getProcessor();
- if (visited.emplace(child_processor).second)
- queue.push({ child_processor, visited_limit });
- }
- }
-
- if (!rows_before_limit_at_least && (!limits.empty() || !remote_sources.empty()))
- {
- rows_before_limit_at_least = std::make_shared();
-
- for (auto & limit : limits)
- limit->setRowsBeforeLimitCounter(rows_before_limit_at_least);
-
- for (auto & source : remote_sources)
- source->setRowsBeforeLimitCounter(rows_before_limit_at_least);
- }
-
- /// If there is a limit, then enable rows_before_limit_at_least
- /// It is needed when zero rows is read, but we still want rows_before_limit_at_least in result.
- if (!limits.empty())
- rows_before_limit_at_least->add(0);
-
- if (rows_before_limit_at_least)
- output_format->setRowsBeforeLimitCounter(rows_before_limit_at_least);
-}
-
PipelineExecutorPtr QueryPipelineBuilder::execute()
{
if (!isCompleted())
diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h
index 9e198f45e98..ac84191cf34 100644
--- a/src/QueryPipeline/QueryPipelineBuilder.h
+++ b/src/QueryPipeline/QueryPipelineBuilder.h
@@ -10,8 +10,6 @@
namespace DB
{
-class IOutputFormat;
-
class QueryPipelineProcessorsCollector;
struct AggregatingTransformParams;
@@ -71,10 +69,6 @@ public:
void addTotalsHavingTransform(ProcessorPtr transform);
/// Add transform which calculates extremes. This transform adds extremes port and doesn't change inputs number.
void addExtremesTransform();
- /// Resize pipeline to single output and add IOutputFormat. Pipeline will be completed after this transformation.
- void setOutputFormat(ProcessorPtr output);
- /// Get current OutputFormat.
- IOutputFormat * getOutputFormat() const { return output_format; }
/// Sink is a processor with single input port and no output ports. Creates sink for each output port.
/// Pipeline will be completed after this transformation.
void setSinks(const Pipe::ProcessorGetterWithStreamKind & getter);
@@ -163,7 +157,6 @@ public:
private:
Pipe pipe;
- IOutputFormat * output_format = nullptr;
/// Limit on the number of threads. Zero means no limit.
/// Sometimes, more streams are created then the number of threads for more optimal execution.
@@ -174,8 +167,6 @@ private:
void checkInitialized();
void checkInitializedAndNotCompleted();
- void initRowsBeforeLimit();
-
void setCollectedProcessors(Processors * processors);
friend class QueryPipelineProcessorsCollector;
diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp
index 9218c75c390..8e9ce395f83 100644
--- a/src/Server/HTTPHandler.cpp
+++ b/src/Server/HTTPHandler.cpp
@@ -922,7 +922,7 @@ void HTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
setThreadName("HTTPHandler");
ThreadStatus thread_status;
- session = std::make_unique(server.context(), ClientInfo::Interface::HTTP);
+ session = std::make_unique(server.context(), ClientInfo::Interface::HTTP, request.isSecure());
SCOPE_EXIT({ session.reset(); });
std::optional query_scope;
diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp
index f4592a8b2c9..119f601563a 100644
--- a/src/Server/TCPHandler.cpp
+++ b/src/Server/TCPHandler.cpp
@@ -110,7 +110,7 @@ void TCPHandler::runImpl()
setThreadName("TCPHandler");
ThreadStatus thread_status;
- session = std::make_unique(server.context(), ClientInfo::Interface::TCP);
+ session = std::make_unique(server.context(), ClientInfo::Interface::TCP, socket().secure());
extractConnectionSettingsFromContext(server.context());
socket().setReceiveTimeout(receive_timeout);
diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp
index 74f6937dbae..51c887b7a17 100644
--- a/src/Storages/HDFS/StorageHDFS.cpp
+++ b/src/Storages/HDFS/StorageHDFS.cpp
@@ -26,6 +26,7 @@
#include
#include
#include
+#include
#include
#include
@@ -164,6 +165,13 @@ StorageHDFS::StorageHDFS(
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
+
+ auto default_virtuals = NamesAndTypesList{
+ {"_path", std::make_shared(std::make_shared())},
+ {"_file", std::make_shared(std::make_shared())}};
+
+ auto columns = storage_metadata.getSampleBlock().getNamesAndTypesList();
+ virtual_columns = getVirtualsForStorage(columns, default_virtuals);
}
ColumnsDescription StorageHDFS::getTableStructureFromData(
@@ -273,36 +281,6 @@ private:
Strings::iterator uris_iter;
};
-Block HDFSSource::getHeader(const StorageMetadataPtr & metadata_snapshot, bool need_path_column, bool need_file_column)
-{
- auto header = metadata_snapshot->getSampleBlock();
- /// Note: AddingDefaultsBlockInputStream doesn't change header.
- if (need_path_column)
- header.insert(
- {DataTypeLowCardinality{std::make_shared()}.createColumn(),
- std::make_shared(std::make_shared()),
- "_path"});
- if (need_file_column)
- header.insert(
- {DataTypeLowCardinality{std::make_shared()}.createColumn(),
- std::make_shared(std::make_shared()),
- "_file"});
- return header;
-}
-
-Block HDFSSource::getBlockForSource(
- const StorageHDFSPtr & storage,
- const StorageSnapshotPtr & storage_snapshot,
- const ColumnsDescription & columns_description,
- bool need_path_column,
- bool need_file_column)
-{
- if (storage->isColumnOriented())
- return storage_snapshot->getSampleBlockForColumns(columns_description.getNamesOfPhysical());
- else
- return getHeader(storage_snapshot->metadata, need_path_column, need_file_column);
-}
-
HDFSSource::DisclosedGlobIterator::DisclosedGlobIterator(ContextPtr context_, const String & uri)
: pimpl(std::make_shared(context_, uri)) {}
@@ -321,22 +299,28 @@ String HDFSSource::URISIterator::next()
return pimpl->next();
}
+Block HDFSSource::getHeader(Block sample_block, const std::vector & requested_virtual_columns)
+{
+ for (const auto & virtual_column : requested_virtual_columns)
+ sample_block.insert({virtual_column.type->createColumn(), virtual_column.type, virtual_column.name});
+
+ return sample_block;
+}
+
HDFSSource::HDFSSource(
StorageHDFSPtr storage_,
- const StorageSnapshotPtr & storage_snapshot_,
+ const Block & block_for_format_,
+ const std::vector & requested_virtual_columns_,
ContextPtr context_,
UInt64 max_block_size_,
- bool need_path_column_,
- bool need_file_column_,
std::shared_ptr file_iterator_,
ColumnsDescription columns_description_)
- : SourceWithProgress(getBlockForSource(storage_, storage_snapshot_, columns_description_, need_path_column_, need_file_column_))
+ : SourceWithProgress(getHeader(block_for_format_, requested_virtual_columns_))
, WithContext(context_)
, storage(std::move(storage_))
- , storage_snapshot(storage_snapshot_)
+ , block_for_format(block_for_format_)
+ , requested_virtual_columns(requested_virtual_columns_)
, max_block_size(max_block_size_)
- , need_path_column(need_path_column_)
- , need_file_column(need_file_column_)
, file_iterator(file_iterator_)
, columns_description(std::move(columns_description_))
{
@@ -361,14 +345,7 @@ bool HDFSSource::initialize()
auto compression = chooseCompressionMethod(path_from_uri, storage->compression_method);
read_buf = wrapReadBufferWithCompressionMethod(std::make_unique(uri_without_path, path_from_uri, getContext()->getGlobalContext()->getConfigRef()), compression);
- auto get_block_for_format = [&]() -> Block
- {
- if (storage->isColumnOriented())
- return storage_snapshot->getSampleBlockForColumns(columns_description.getNamesOfPhysical());
- return storage_snapshot->metadata->getSampleBlock();
- };
-
- auto input_format = getContext()->getInputFormat(storage->format_name, *read_buf, get_block_for_format(), max_block_size);
+ auto input_format = getContext()->getInputFormat(storage->format_name, *read_buf, block_for_format, max_block_size);
QueryPipelineBuilder builder;
builder.init(Pipe(input_format));
@@ -402,20 +379,21 @@ Chunk HDFSSource::generate()
Columns columns = chunk.getColumns();
UInt64 num_rows = chunk.getNumRows();
- /// Enrich with virtual columns.
- if (need_path_column)
+ for (const auto & virtual_column : requested_virtual_columns)
{
- auto column = DataTypeLowCardinality{std::make_shared()}.createColumnConst(num_rows, current_path);
- columns.push_back(column->convertToFullColumnIfConst());
- }
+ if (virtual_column.name == "_path")
+ {
+ auto column = DataTypeLowCardinality{std::make_shared()}.createColumnConst(num_rows, current_path);
+ columns.push_back(column->convertToFullColumnIfConst());
+ }
+ else if (virtual_column.name == "_file")
+ {
+ size_t last_slash_pos = current_path.find_last_of('/');
+ auto file_name = current_path.substr(last_slash_pos + 1);
- if (need_file_column)
- {
- size_t last_slash_pos = current_path.find_last_of('/');
- auto file_name = current_path.substr(last_slash_pos + 1);
-
- auto column = DataTypeLowCardinality{std::make_shared()}.createColumnConst(num_rows, std::move(file_name));
- columns.push_back(column->convertToFullColumnIfConst());
+ auto column = DataTypeLowCardinality{std::make_shared()}.createColumnConst(num_rows, std::move(file_name));
+ columns.push_back(column->convertToFullColumnIfConst());
+ }
}
return Chunk(std::move(columns), num_rows);
@@ -526,17 +504,6 @@ Pipe StorageHDFS::read(
size_t max_block_size,
unsigned num_streams)
{
- bool need_path_column = false;
- bool need_file_column = false;
-
- for (const auto & column : column_names)
- {
- if (column == "_path")
- need_path_column = true;
- if (column == "_file")
- need_file_column = true;
- }
-
std::shared_ptr iterator_wrapper{nullptr};
if (distributed_processing)
{
@@ -563,27 +530,51 @@ Pipe StorageHDFS::read(
});
}
+ std::unordered_set column_names_set(column_names.begin(), column_names.end());
+ std::vector requested_virtual_columns;
+
+ for (const auto & virtual_column : getVirtuals())
+ {
+ if (column_names_set.contains(virtual_column.name))
+ requested_virtual_columns.push_back(virtual_column);
+ }
+
+ ColumnsDescription columns_description;
+ Block block_for_format;
+ if (isColumnOriented())
+ {
+ auto fetch_columns = column_names;
+ const auto & virtuals = getVirtuals();
+ std::erase_if(
+ fetch_columns,
+ [&](const String & col)
+ { return std::any_of(virtuals.begin(), virtuals.end(), [&](const NameAndTypePair & virtual_col){ return col == virtual_col.name; }); });
+
+ if (fetch_columns.empty())
+ fetch_columns.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical()));
+
+ columns_description = ColumnsDescription{
+ storage_snapshot->getSampleBlockForColumns(fetch_columns).getNamesAndTypesList()};
+ block_for_format = storage_snapshot->getSampleBlockForColumns(columns_description.getNamesOfPhysical());
+ }
+ else
+ {
+ columns_description = storage_snapshot->metadata->getColumns();
+ block_for_format = storage_snapshot->metadata->getSampleBlock();
+ }
+
Pipes pipes;
auto this_ptr = std::static_pointer_cast(shared_from_this());
for (size_t i = 0; i < num_streams; ++i)
{
- const auto get_columns_for_format = [&]() -> ColumnsDescription
- {
- if (isColumnOriented())
- return ColumnsDescription{storage_snapshot->getSampleBlockForColumns(column_names).getNamesAndTypesList()};
- else
- return storage_snapshot->metadata->getColumns();
- };
-
pipes.emplace_back(std::make_shared(
this_ptr,
- storage_snapshot,
+ block_for_format,
+ requested_virtual_columns,
context_,
max_block_size,
- need_path_column,
- need_file_column,
iterator_wrapper,
- get_columns_for_format()));
+ columns_description));
}
return Pipe::unitePipes(std::move(pipes));
}
@@ -715,9 +706,7 @@ void registerStorageHDFS(StorageFactory & factory)
NamesAndTypesList StorageHDFS::getVirtuals() const
{
- return NamesAndTypesList{
- {"_path", std::make_shared(std::make_shared())},
- {"_file", std::make_shared(std::make_shared())}};
+ return virtual_columns;
}
}
diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h
index e87564aef32..d2cf77b3098 100644
--- a/src/Storages/HDFS/StorageHDFS.h
+++ b/src/Storages/HDFS/StorageHDFS.h
@@ -76,6 +76,7 @@ private:
const bool distributed_processing;
ASTPtr partition_by;
bool is_path_with_globs;
+ NamesAndTypesList virtual_columns;
Poco::Logger * log = &Poco::Logger::get("StorageHDFS");
};
@@ -110,25 +111,14 @@ public:
using IteratorWrapper = std::function;
using StorageHDFSPtr = std::shared_ptr;
- static Block getHeader(
- const StorageMetadataPtr & metadata_snapshot,
- bool need_path_column,
- bool need_file_column);
-
- static Block getBlockForSource(
- const StorageHDFSPtr & storage,
- const StorageSnapshotPtr & storage_snapshot_,
- const ColumnsDescription & columns_description,
- bool need_path_column,
- bool need_file_column);
+ static Block getHeader(Block sample_block, const std::vector & requested_virtual_columns);
HDFSSource(
StorageHDFSPtr storage_,
- const StorageSnapshotPtr & storage_snapshot_,
+ const Block & block_for_format_,
+ const std::vector & requested_virtual_columns_,
ContextPtr context_,
UInt64 max_block_size_,
- bool need_path_column_,
- bool need_file_column_,
std::shared_ptr file_iterator_,
ColumnsDescription columns_description_);
@@ -140,7 +130,8 @@ public:
private:
StorageHDFSPtr storage;
- StorageSnapshotPtr storage_snapshot;
+ Block block_for_format;
+ std::vector requested_virtual_columns;
UInt64 max_block_size;
bool need_path_column;
bool need_file_column;
diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp
index c62ba491040..61bbc3ce4f6 100644
--- a/src/Storages/Hive/StorageHive.cpp
+++ b/src/Storages/Hive/StorageHive.cpp
@@ -742,6 +742,7 @@ void registerStorageHive(StorageFactory & factory)
StorageFactory::StorageFeatures{
.supports_settings = true,
.supports_sort_order = true,
+ .source_access_type = AccessType::HIVE,
});
}
diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp
index c19b579bd42..6711c5d8e5f 100644
--- a/src/Storages/MergeTree/MergeTreeData.cpp
+++ b/src/Storages/MergeTree/MergeTreeData.cpp
@@ -1314,9 +1314,6 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
if (!parts_from_wal.empty())
loadDataPartsFromWAL(broken_parts_to_detach, duplicate_parts_to_remove, parts_from_wal, part_lock);
- for (auto & part : duplicate_parts_to_remove)
- part->remove();
-
for (auto & part : broken_parts_to_detach)
part->renameToDetached("broken-on-start"); /// detached parts must not have '_' in prefixes
diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp
index 1cf701492a9..82be9f1708a 100644
--- a/src/Storages/MergeTree/registerStorageMergeTree.cpp
+++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp
@@ -256,20 +256,19 @@ static StoragePtr create(const StorageFactory::Arguments & args)
{
String msg;
if (is_extended_storage_def)
- msg += "With extended storage definition syntax storage " + args.engine_name + " requires ";
+ msg += fmt::format("With extended storage definition syntax storage {} requires ", args.engine_name);
else
- msg += "Storage " + args.engine_name + " requires ";
+ msg += fmt::format("ORDER BY or PRIMARY KEY clause is missing. "
+ "Consider using extended storage definition syntax with ORDER BY or PRIMARY KEY clause. "
+ "With deprecated old syntax (highly not recommended) storage {} requires ", args.engine_name);
- if (max_num_params)
- {
- if (min_num_params == max_num_params)
- msg += toString(min_num_params) + " parameters: ";
- else
- msg += toString(min_num_params) + " to " + toString(max_num_params) + " parameters: ";
- msg += needed_params;
- }
- else
+ if (max_num_params == 0)
msg += "no parameters";
+ if (min_num_params == max_num_params)
+ msg += fmt::format("{} parameters: {}", min_num_params, needed_params);
+ else
+ msg += fmt::format("{} to {} parameters: {}", min_num_params, max_num_params, needed_params);
+
msg += getMergeTreeVerboseHelp(is_extended_storage_def);
diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp
index 396adb801f5..adc21cc8c60 100644
--- a/src/Storages/StorageS3.cpp
+++ b/src/Storages/StorageS3.cpp
@@ -25,6 +25,7 @@
#include
#include
#include
+#include
#include
#include
@@ -210,25 +211,16 @@ String StorageS3Source::KeysIterator::next()
return pimpl->next();
}
-Block StorageS3Source::getHeader(Block sample_block, bool with_path_column, bool with_file_column)
+Block StorageS3Source::getHeader(Block sample_block, const std::vector & requested_virtual_columns)
{
- if (with_path_column)
- sample_block.insert(
- {DataTypeLowCardinality{std::make_shared()}.createColumn(),
- std::make_shared(std::make_shared()),
- "_path"});
- if (with_file_column)
- sample_block.insert(
- {DataTypeLowCardinality{std::make_shared()}.createColumn(),
- std::make_shared(std::make_shared()),
- "_file"});
+ for (const auto & virtual_column : requested_virtual_columns)
+ sample_block.insert({virtual_column.type->createColumn(), virtual_column.type, virtual_column.name});
return sample_block;
}
StorageS3Source::StorageS3Source(
- bool need_path,
- bool need_file,
+ const std::vector & requested_virtual_columns_,
const String & format_,
String name_,
const Block & sample_block_,
@@ -242,7 +234,7 @@ StorageS3Source::StorageS3Source(
const String & bucket_,
std::shared_ptr file_iterator_,
const size_t download_thread_num_)
- : SourceWithProgress(getHeader(sample_block_, need_path, need_file))
+ : SourceWithProgress(getHeader(sample_block_, requested_virtual_columns_))
, WithContext(context_)
, name(std::move(name_))
, bucket(bucket_)
@@ -254,8 +246,7 @@ StorageS3Source::StorageS3Source(
, client(client_)
, sample_block(sample_block_)
, format_settings(format_settings_)
- , with_file_column(need_file)
- , with_path_column(need_path)
+ , requested_virtual_columns(requested_virtual_columns_)
, file_iterator(file_iterator_)
, download_thread_num(download_thread_num_)
{
@@ -344,16 +335,18 @@ Chunk StorageS3Source::generate()
{
UInt64 num_rows = chunk.getNumRows();
- if (with_path_column)
- chunk.addColumn(DataTypeLowCardinality{std::make_shared()}
- .createColumnConst(num_rows, file_path)
- ->convertToFullColumnIfConst());
- if (with_file_column)
+ for (const auto & virtual_column : requested_virtual_columns)
{
- size_t last_slash_pos = file_path.find_last_of('/');
- chunk.addColumn(DataTypeLowCardinality{std::make_shared()}
- .createColumnConst(num_rows, file_path.substr(last_slash_pos + 1))
- ->convertToFullColumnIfConst());
+ if (virtual_column.name == "_path")
+ {
+ chunk.addColumn(virtual_column.type->createColumnConst(num_rows, file_path)->convertToFullColumnIfConst());
+ }
+ else if (virtual_column.name == "_file")
+ {
+ size_t last_slash_pos = file_path.find_last_of('/');
+ auto column = virtual_column.type->createColumnConst(num_rows, file_path.substr(last_slash_pos + 1));
+ chunk.addColumn(column->convertToFullColumnIfConst());
+ }
}
return chunk;
@@ -627,6 +620,13 @@ StorageS3::StorageS3(
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
setInMemoryMetadata(storage_metadata);
+
+ auto default_virtuals = NamesAndTypesList{
+ {"_path", std::make_shared(std::make_shared())},
+ {"_file", std::make_shared(std::make_shared())}};
+
+ auto columns = storage_metadata.getSampleBlock().getNamesAndTypesList();
+ virtual_columns = getVirtualsForStorage(columns, default_virtuals);
}
std::shared_ptr StorageS3::createFileIterator(const ClientAuthentication & client_auth, const std::vector & keys, bool is_key_with_globs, bool distributed_processing, ContextPtr local_context)
@@ -674,14 +674,14 @@ Pipe StorageS3::read(
updateClientAndAuthSettings(local_context, client_auth);
Pipes pipes;
- bool need_path_column = false;
- bool need_file_column = false;
- for (const auto & column : column_names)
+
+ std::unordered_set column_names_set(column_names.begin(), column_names.end());
+ std::vector requested_virtual_columns;
+
+ for (const auto & virtual_column : getVirtuals())
{
- if (column == "_path")
- need_path_column = true;
- if (column == "_file")
- need_file_column = true;
+ if (column_names_set.contains(virtual_column.name))
+ requested_virtual_columns.push_back(virtual_column);
}
std::shared_ptr iterator_wrapper = createFileIterator(client_auth, keys, is_key_with_globs, distributed_processing, local_context);
@@ -690,8 +690,18 @@ Pipe StorageS3::read(
Block block_for_format;
if (isColumnOriented())
{
+ auto fetch_columns = column_names;
+ const auto & virtuals = getVirtuals();
+ std::erase_if(
+ fetch_columns,
+ [&](const String & col)
+ { return std::any_of(virtuals.begin(), virtuals.end(), [&](const NameAndTypePair & virtual_col){ return col == virtual_col.name; }); });
+
+ if (fetch_columns.empty())
+ fetch_columns.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical()));
+
columns_description = ColumnsDescription{
- storage_snapshot->getSampleBlockForColumns(column_names).getNamesAndTypesList()};
+ storage_snapshot->getSampleBlockForColumns(fetch_columns).getNamesAndTypesList()};
block_for_format = storage_snapshot->getSampleBlockForColumns(columns_description.getNamesOfPhysical());
}
else
@@ -704,8 +714,7 @@ Pipe StorageS3::read(
for (size_t i = 0; i < num_streams; ++i)
{
pipes.emplace_back(std::make_shared(
- need_path_column,
- need_file_column,
+ requested_virtual_columns,
format_name,
getName(),
block_for_format,
@@ -882,6 +891,8 @@ StorageS3Configuration StorageS3::getConfiguration(ASTs & engine_args, ContextPt
configuration.access_key_id = arg_value->as()->value.safeGet();
else if (arg_name == "secret_access_key")
configuration.secret_access_key = arg_value->as()->value.safeGet();
+ else if (arg_name == "filename")
+ configuration.url = std::filesystem::path(configuration.url) / arg_value->as()->value.safeGet();
else
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Unknown key-value argument `{}` for StorageS3, expected: url, [access_key_id, secret_access_key], name of used format and [compression_method].",
@@ -1081,9 +1092,7 @@ void registerStorageCOS(StorageFactory & factory)
NamesAndTypesList StorageS3::getVirtuals() const
{
- return NamesAndTypesList{
- {"_path", std::make_shared(std::make_shared())},
- {"_file", std::make_shared(std::make_shared())}};
+ return virtual_columns;
}
bool StorageS3::supportsPartitionBy() const
diff --git a/src/Storages/StorageS3.h b/src/Storages/StorageS3.h
index e67223190ba..cbf18e240f6 100644
--- a/src/Storages/StorageS3.h
+++ b/src/Storages/StorageS3.h
@@ -58,11 +58,10 @@ public:
using IteratorWrapper = std::function;
- static Block getHeader(Block sample_block, bool with_path_column, bool with_file_column);
+ static Block getHeader(Block sample_block, const std::vector & requested_virtual_columns);
StorageS3Source(
- bool need_path,
- bool need_file,
+ const std::vector & requested_virtual_columns_,
const String & format,
String name_,
const Block & sample_block,
@@ -102,8 +101,7 @@ private:
std::unique_ptr reader;
/// onCancel and generate can be called concurrently
std::mutex reader_mutex;
- bool with_file_column = false;
- bool with_path_column = false;
+ std::vector requested_virtual_columns;
std::shared_ptr file_iterator;
size_t download_thread_num = 1;
@@ -196,6 +194,7 @@ private:
ClientAuthentication client_auth;
std::vector keys;
+ NamesAndTypesList virtual_columns;
String format_name;
UInt64 max_single_read_retries;
diff --git a/src/Storages/WindowView/StorageWindowView.h b/src/Storages/WindowView/StorageWindowView.h
index ef552262378..863eac275a2 100644
--- a/src/Storages/WindowView/StorageWindowView.h
+++ b/src/Storages/WindowView/StorageWindowView.h
@@ -182,7 +182,6 @@ private:
/// Mutex for the blocks and ready condition
std::mutex mutex;
- std::mutex flush_table_mutex;
std::shared_mutex fire_signal_mutex;
mutable std::mutex sample_block_lock; /// Mutex to protect access to sample block and inner_blocks_query
diff --git a/src/Storages/getVirtualsForStorage.cpp b/src/Storages/getVirtualsForStorage.cpp
new file mode 100644
index 00000000000..93b2aa97856
--- /dev/null
+++ b/src/Storages/getVirtualsForStorage.cpp
@@ -0,0 +1,22 @@
+#include "getVirtualsForStorage.h"
+
+namespace DB
+{
+
+NamesAndTypesList getVirtualsForStorage(const NamesAndTypesList & storage_columns_, const NamesAndTypesList & default_virtuals_)
+{
+ auto default_virtuals = default_virtuals_;
+ auto storage_columns = storage_columns_;
+ default_virtuals.sort();
+ storage_columns.sort();
+
+ NamesAndTypesList result_virtuals;
+ std::set_difference(
+ default_virtuals.begin(), default_virtuals.end(), storage_columns.begin(), storage_columns.end(),
+ std::back_inserter(result_virtuals),
+ [](const NameAndTypePair & lhs, const NameAndTypePair & rhs){ return lhs.name < rhs.name; });
+
+ return result_virtuals;
+}
+
+}
diff --git a/src/Storages/getVirtualsForStorage.h b/src/Storages/getVirtualsForStorage.h
new file mode 100644
index 00000000000..19e13425959
--- /dev/null
+++ b/src/Storages/getVirtualsForStorage.h
@@ -0,0 +1,9 @@
+#pragma once
+#include
+
+namespace DB
+{
+
+NamesAndTypesList getVirtualsForStorage(const NamesAndTypesList & storage_columns_, const NamesAndTypesList & default_virtuals_);
+
+}
diff --git a/src/TableFunctions/Hive/TableFunctionHive.h b/src/TableFunctions/Hive/TableFunctionHive.h
index 0973bdda329..dba838389e5 100644
--- a/src/TableFunctions/Hive/TableFunctionHive.h
+++ b/src/TableFunctions/Hive/TableFunctionHive.h
@@ -10,7 +10,7 @@ class TableFunctionHive : public ITableFunction
{
public:
static constexpr auto name = "hive";
- static constexpr auto storage_type_name = "hive";
+ static constexpr auto storage_type_name = "Hive";
std::string getName() const override { return name; }
bool hasStaticStructure() const override { return true; }
diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp
index 42b24abdbbe..639240fd105 100644
--- a/src/TableFunctions/ITableFunction.cpp
+++ b/src/TableFunctions/ITableFunction.cpp
@@ -14,11 +14,16 @@ namespace ProfileEvents
namespace DB
{
+AccessType ITableFunction::getSourceAccessType() const
+{
+ return StorageFactory::instance().getSourceAccessType(getStorageTypeName());
+}
+
StoragePtr ITableFunction::execute(const ASTPtr & ast_function, ContextPtr context, const std::string & table_name,
ColumnsDescription cached_columns, bool use_global_context) const
{
ProfileEvents::increment(ProfileEvents::TableFunctionExecute);
- context->checkAccess(AccessType::CREATE_TEMPORARY_TABLE | StorageFactory::instance().getSourceAccessType(getStorageTypeName()));
+ context->checkAccess(AccessType::CREATE_TEMPORARY_TABLE | getSourceAccessType());
auto context_to_use = use_global_context ? context->getGlobalContext() : context;
diff --git a/src/TableFunctions/ITableFunction.h b/src/TableFunctions/ITableFunction.h
index 9c8d694865b..b419c4cfeed 100644
--- a/src/TableFunctions/ITableFunction.h
+++ b/src/TableFunctions/ITableFunction.h
@@ -3,6 +3,7 @@
#include
#include
#include
+#include
#include
#include
@@ -71,7 +72,10 @@ public:
private:
virtual StoragePtr executeImpl(
const ASTPtr & ast_function, ContextPtr context, const std::string & table_name, ColumnsDescription cached_columns) const = 0;
+
virtual const char * getStorageTypeName() const = 0;
+
+ virtual AccessType getSourceAccessType() const;
};
using TableFunctionPtr = std::shared_ptr;
diff --git a/src/TableFunctions/TableFunctionHDFSCluster.h b/src/TableFunctions/TableFunctionHDFSCluster.h
index 58d1c3d9b05..b5464e2fd19 100644
--- a/src/TableFunctions/TableFunctionHDFSCluster.h
+++ b/src/TableFunctions/TableFunctionHDFSCluster.h
@@ -39,6 +39,8 @@ protected:
const char * getStorageTypeName() const override { return "HDFSCluster"; }
+ AccessType getSourceAccessType() const override { return AccessType::HDFS; }
+
ColumnsDescription getActualTableStructure(ContextPtr) const override;
void parseArguments(const ASTPtr &, ContextPtr) override;
diff --git a/src/TableFunctions/TableFunctionS3.cpp b/src/TableFunctions/TableFunctionS3.cpp
index abd16f138b4..a80eab219e8 100644
--- a/src/TableFunctions/TableFunctionS3.cpp
+++ b/src/TableFunctions/TableFunctionS3.cpp
@@ -12,6 +12,7 @@
#include
#include
#include "registerTableFunctions.h"
+#include
namespace DB
@@ -37,6 +38,8 @@ void TableFunctionS3::parseArgumentsImpl(const String & error_message, ASTs & ar
s3_configuration.access_key_id = arg_value->as()->value.safeGet();
else if (arg_name == "secret_access_key")
s3_configuration.secret_access_key = arg_value->as()->value.safeGet();
+ else if (arg_name == "filename")
+ s3_configuration.url = std::filesystem::path(s3_configuration.url) / arg_value->as()->value.safeGet();
else
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message);
}
diff --git a/src/TableFunctions/TableFunctionS3Cluster.h b/src/TableFunctions/TableFunctionS3Cluster.h
index 81427b27aab..d4278980e9f 100644
--- a/src/TableFunctions/TableFunctionS3Cluster.h
+++ b/src/TableFunctions/TableFunctionS3Cluster.h
@@ -45,6 +45,8 @@ protected:
const char * getStorageTypeName() const override { return "S3Cluster"; }
+ AccessType getSourceAccessType() const override { return AccessType::S3; }
+
ColumnsDescription getActualTableStructure(ContextPtr) const override;
void parseArguments(const ASTPtr &, ContextPtr) override;
diff --git a/tests/ci/docker_pull_helper.py b/tests/ci/docker_pull_helper.py
index ee7f3337cd9..717327ded41 100644
--- a/tests/ci/docker_pull_helper.py
+++ b/tests/ci/docker_pull_helper.py
@@ -56,18 +56,20 @@ def get_images_with_versions(
for i in range(10):
try:
logging.info("Pulling image %s", docker_image)
- latest_error = subprocess.check_output(
+ subprocess.check_output(
f"docker pull {docker_image}",
stderr=subprocess.STDOUT,
shell=True,
)
break
except Exception as ex:
+ latest_error = ex
time.sleep(i * 3)
logging.info("Got execption pulling docker %s", ex)
else:
raise Exception(
- f"Cannot pull dockerhub for image docker pull {docker_image} because of {latest_error}"
+ "Cannot pull dockerhub for image docker pull "
+ f"{docker_image} because of {latest_error}"
)
return docker_images
diff --git a/tests/config/config.d/named_collection.xml b/tests/config/config.d/named_collection.xml
index bc75461d664..f7b731061a8 100644
--- a/tests/config/config.d/named_collection.xml
+++ b/tests/config/config.d/named_collection.xml
@@ -22,5 +22,11 @@
+
+ http://localhost:11111/test/
+ test
+ testtest
+ auto
+
diff --git a/tests/integration/test_dotnet_client/test.py b/tests/integration/test_dotnet_client/test.py
index b147688c099..2af9b80f720 100644
--- a/tests/integration/test_dotnet_client/test.py
+++ b/tests/integration/test_dotnet_client/test.py
@@ -44,7 +44,7 @@ def dotnet_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--no-build",
]
diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py
index 78049e0f123..6e61675563f 100644
--- a/tests/integration/test_mysql_protocol/test.py
+++ b/tests/integration/test_mysql_protocol/test.py
@@ -55,7 +55,7 @@ def golang_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--no-build",
]
@@ -82,7 +82,7 @@ def php_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--no-build",
]
@@ -109,7 +109,7 @@ def nodejs_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--no-build",
]
@@ -136,7 +136,7 @@ def java_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--no-build",
]
diff --git a/tests/integration/test_postgresql_protocol/test.py b/tests/integration/test_postgresql_protocol/test.py
index 5c270fd9ca7..43528c13c4d 100644
--- a/tests/integration/test_postgresql_protocol/test.py
+++ b/tests/integration/test_postgresql_protocol/test.py
@@ -56,7 +56,7 @@ def psql_client():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--build",
]
@@ -99,7 +99,7 @@ def java_container():
"-f",
docker_compose,
"up",
- "--no-recreate",
+ "--force-recreate",
"-d",
"--build",
]
diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py
index 7f340424ccf..5e8a96d2c05 100644
--- a/tests/integration/test_storage_hdfs/test.py
+++ b/tests/integration/test_storage_hdfs/test.py
@@ -554,6 +554,26 @@ def test_insert_select_schema_inference(started_cluster):
assert int(result) == 1
+def test_virtual_columns_2(started_cluster):
+ hdfs_api = started_cluster.hdfs_api
+
+ table_function = (
+ f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')"
+ )
+ node1.query(f"insert into table function {table_function} SELECT 1, 'kek'")
+
+ result = node1.query(f"SELECT _path FROM {table_function}")
+ assert result.strip() == "hdfs://hdfs1:9000/parquet_2"
+
+ table_function = (
+ f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')"
+ )
+ node1.query(f"insert into table function {table_function} SELECT 1, 'kek'")
+
+ result = node1.query(f"SELECT _path FROM {table_function}")
+ assert result.strip() == "kek"
+
+
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")
diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference
index 831e65d021d..039e438dc0a 100644
--- a/tests/queries/0_stateless/01271_show_privileges.reference
+++ b/tests/queries/0_stateless/01271_show_privileges.reference
@@ -134,6 +134,7 @@ ODBC [] GLOBAL SOURCES
JDBC [] GLOBAL SOURCES
HDFS [] GLOBAL SOURCES
S3 [] GLOBAL SOURCES
+HIVE [] GLOBAL SOURCES
SOURCES [] \N ALL
ALL ['ALL PRIVILEGES'] \N \N
NONE ['USAGE','NO PRIVILEGES'] \N \N
diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference
index bae8d7d332a..9a7f1cc408a 100644
--- a/tests/queries/0_stateless/02117_show_create_table_system.reference
+++ b/tests/queries/0_stateless/02117_show_create_table_system.reference
@@ -20,7 +20,7 @@ CREATE TABLE system.errors\n(\n `name` String,\n `code` Int32,\n `value
CREATE TABLE system.events\n(\n `event` String,\n `value` UInt64,\n `description` String\n)\nENGINE = SystemEvents()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.formats\n(\n `name` String,\n `is_input` UInt8,\n `is_output` UInt8\n)\nENGINE = SystemFormats()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.functions\n(\n `name` String,\n `is_aggregate` UInt8,\n `case_insensitive` UInt8,\n `alias_to` String,\n `create_query` String,\n `origin` Enum8(\'System\' = 0, \'SQLUserDefined\' = 1, \'ExecutableUserDefined\' = 2)\n)\nENGINE = SystemFunctions()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
-CREATE TABLE system.grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `access_type` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'SOURCES\' = 136, \'ALL\' = 137, \'NONE\' = 138),\n `database` Nullable(String),\n `table` Nullable(String),\n `column` Nullable(String),\n `is_partial_revoke` UInt8,\n `grant_option` UInt8\n)\nENGINE = SystemGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
+CREATE TABLE system.grants\n(\n `user_name` Nullable(String),\n `role_name` Nullable(String),\n `access_type` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139),\n `database` Nullable(String),\n `table` Nullable(String),\n `column` Nullable(String),\n `is_partial_revoke` UInt8,\n `grant_option` UInt8\n)\nENGINE = SystemGrants()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.graphite_retentions\n(\n `config_name` String,\n `rule_type` String,\n `regexp` String,\n `function` String,\n `age` UInt64,\n `precision` UInt64,\n `priority` UInt16,\n `is_default` UInt8,\n `Tables.database` Array(String),\n `Tables.table` Array(String)\n)\nENGINE = SystemGraphite()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.licenses\n(\n `library_name` String,\n `license_type` String,\n `license_path` String,\n `license_text` String\n)\nENGINE = SystemLicenses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.macros\n(\n `macro` String,\n `substitution` String\n)\nENGINE = SystemMacros()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
@@ -35,7 +35,7 @@ CREATE TABLE system.one\n(\n `dummy` UInt8\n)\nENGINE = SystemOne()\nCOMMENT
CREATE TABLE system.part_moves_between_shards\n(\n `database` String,\n `table` String,\n `task_name` String,\n `task_uuid` UUID,\n `create_time` DateTime,\n `part_name` String,\n `part_uuid` UUID,\n `to_shard` String,\n `dst_part_name` String,\n `update_time` DateTime,\n `state` String,\n `rollback` UInt8,\n `num_tries` UInt32,\n `last_exception` String\n)\nENGINE = SystemShardMoves()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.parts\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `secondary_indices_compressed_bytes` UInt64,\n `secondary_indices_uncompressed_bytes` UInt64,\n `secondary_indices_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `projections` Array(String),\n `visible` UInt8,\n `creation_tid` Tuple(UInt64, UInt64, UUID),\n `removal_tid` Tuple(UInt64, UInt64, UUID),\n `creation_csn` UInt64,\n `removal_csn` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.parts_columns\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `serialization_kind` String,\n `subcolumns.names` Array(String),\n `subcolumns.types` Array(String),\n `subcolumns.serializations` Array(String),\n `subcolumns.bytes_on_disk` Array(UInt64),\n `subcolumns.data_compressed_bytes` Array(UInt64),\n `subcolumns.data_uncompressed_bytes` Array(UInt64),\n `subcolumns.marks_bytes` Array(UInt64),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
-CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'SOURCES\' = 136, \'ALL\' = 137, \'NONE\' = 138),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'SOURCES\' = 136, \'ALL\' = 137, \'NONE\' = 138))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
+CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'KILL TRANSACTION\' = 62, \'MOVE PARTITION BETWEEN SHARDS\' = 63, \'CREATE USER\' = 64, \'ALTER USER\' = 65, \'DROP USER\' = 66, \'CREATE ROLE\' = 67, \'ALTER ROLE\' = 68, \'DROP ROLE\' = 69, \'ROLE ADMIN\' = 70, \'CREATE ROW POLICY\' = 71, \'ALTER ROW POLICY\' = 72, \'DROP ROW POLICY\' = 73, \'CREATE QUOTA\' = 74, \'ALTER QUOTA\' = 75, \'DROP QUOTA\' = 76, \'CREATE SETTINGS PROFILE\' = 77, \'ALTER SETTINGS PROFILE\' = 78, \'DROP SETTINGS PROFILE\' = 79, \'SHOW USERS\' = 80, \'SHOW ROLES\' = 81, \'SHOW ROW POLICIES\' = 82, \'SHOW QUOTAS\' = 83, \'SHOW SETTINGS PROFILES\' = 84, \'SHOW ACCESS\' = 85, \'ACCESS MANAGEMENT\' = 86, \'SYSTEM SHUTDOWN\' = 87, \'SYSTEM DROP DNS CACHE\' = 88, \'SYSTEM DROP MARK CACHE\' = 89, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 90, \'SYSTEM DROP MMAP CACHE\' = 91, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 92, \'SYSTEM DROP CACHE\' = 93, \'SYSTEM RELOAD CONFIG\' = 94, \'SYSTEM RELOAD SYMBOLS\' = 95, \'SYSTEM RELOAD DICTIONARY\' = 96, \'SYSTEM RELOAD MODEL\' = 97, \'SYSTEM RELOAD FUNCTION\' = 98, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 99, \'SYSTEM RELOAD\' = 100, \'SYSTEM RESTART DISK\' = 101, \'SYSTEM MERGES\' = 102, \'SYSTEM TTL MERGES\' = 103, \'SYSTEM FETCHES\' = 104, \'SYSTEM MOVES\' = 105, \'SYSTEM DISTRIBUTED SENDS\' = 106, \'SYSTEM REPLICATED SENDS\' = 107, \'SYSTEM SENDS\' = 108, \'SYSTEM REPLICATION QUEUES\' = 109, \'SYSTEM DROP REPLICA\' = 110, \'SYSTEM SYNC REPLICA\' = 111, \'SYSTEM RESTART REPLICA\' = 112, \'SYSTEM RESTORE REPLICA\' = 113, \'SYSTEM FLUSH DISTRIBUTED\' = 114, \'SYSTEM FLUSH LOGS\' = 115, \'SYSTEM FLUSH\' = 116, \'SYSTEM THREAD FUZZER\' = 117, \'SYSTEM\' = 118, \'dictGet\' = 119, \'addressToLine\' = 120, \'addressToLineWithInlines\' = 121, \'addressToSymbol\' = 122, \'demangle\' = 123, \'INTROSPECTION\' = 124, \'FILE\' = 125, \'URL\' = 126, \'REMOTE\' = 127, \'MONGO\' = 128, \'MYSQL\' = 129, \'POSTGRES\' = 130, \'SQLITE\' = 131, \'ODBC\' = 132, \'JDBC\' = 133, \'HDFS\' = 134, \'S3\' = 135, \'HIVE\' = 136, \'SOURCES\' = 137, \'ALL\' = 138, \'NONE\' = 139))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.processes\n(\n `is_initial_query` UInt8,\n `user` String,\n `query_id` String,\n `address` IPv6,\n `port` UInt16,\n `initial_user` String,\n `initial_query_id` String,\n `initial_address` IPv6,\n `initial_port` UInt16,\n `interface` UInt8,\n `os_user` String,\n `client_hostname` String,\n `client_name` String,\n `client_revision` UInt64,\n `client_version_major` UInt64,\n `client_version_minor` UInt64,\n `client_version_patch` UInt64,\n `http_method` UInt8,\n `http_user_agent` String,\n `http_referer` String,\n `forwarded_for` String,\n `quota_key` String,\n `distributed_depth` UInt64,\n `elapsed` Float64,\n `is_cancelled` UInt8,\n `read_rows` UInt64,\n `read_bytes` UInt64,\n `total_rows_approx` UInt64,\n `written_rows` UInt64,\n `written_bytes` UInt64,\n `memory_usage` Int64,\n `peak_memory_usage` Int64,\n `query` String,\n `thread_ids` Array(UInt64),\n `ProfileEvents` Map(String, UInt64),\n `Settings` Map(String, String),\n `current_database` String,\n `ProfileEvents.Names` Array(String),\n `ProfileEvents.Values` Array(UInt64),\n `Settings.Names` Array(String),\n `Settings.Values` Array(String)\n)\nENGINE = SystemProcesses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.projection_parts\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
CREATE TABLE system.projection_parts_columns\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\'
diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference b/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference
new file mode 100644
index 00000000000..1fa6905307d
--- /dev/null
+++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.reference
@@ -0,0 +1,32 @@
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
+OK
diff --git a/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh
new file mode 100755
index 00000000000..a5c0ee6ecff
--- /dev/null
+++ b/tests/queries/0_stateless/02235_remote_fs_cache_stress.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest, long, no-random-settings
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+
+${CLICKHOUSE_CLIENT} --multiquery --multiline --query="""
+
+DROP TABLE IF EXISTS t_01411;
+DROP TABLE IF EXISTS t_01411_num;
+drop table if exists lc_dict_reading;
+
+CREATE TABLE t_01411(
+ str LowCardinality(String),
+ arr Array(LowCardinality(String)) default [str]
+) ENGINE = MergeTree()
+ORDER BY tuple();
+
+INSERT INTO t_01411 (str) SELECT concat('asdf', toString(number % 10000)) FROM numbers(100000);
+
+CREATE TABLE t_01411_num(
+ num UInt8,
+ arr Array(LowCardinality(Int64)) default [num]
+) ENGINE = MergeTree()
+ORDER BY tuple();
+
+INSERT INTO t_01411_num (num) SELECT number % 1000 FROM numbers(100000);
+
+create table lc_dict_reading (val UInt64, str StringWithDictionary, pat String) engine = MergeTree order by val;
+insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, number) as s, s from system.numbers limit 100000;
+"""
+
+function go()
+{
+
+${CLICKHOUSE_CLIENT} --multiquery --multiline --query="""
+
+select sum(toUInt64(str)), sum(toUInt64(pat)) from lc_dict_reading where val < 8129 or val > 8192 * 4;
+
+SELECT count() FROM t_01411 WHERE str = 'asdf337';
+SELECT count() FROM t_01411 WHERE arr[1] = 'asdf337';
+SELECT count() FROM t_01411 WHERE has(arr, 'asdf337');
+SELECT count() FROM t_01411 WHERE indexOf(arr, 'asdf337') > 0;
+
+SELECT count() FROM t_01411 WHERE arr[1] = str;
+SELECT count() FROM t_01411 WHERE has(arr, str);
+SELECT count() FROM t_01411 WHERE indexOf(arr, str) > 0;
+
+SELECT count() FROM t_01411_num WHERE num = 42;
+SELECT count() FROM t_01411_num WHERE arr[1] = 42;
+SELECT count() FROM t_01411_num WHERE has(arr, 42);
+SELECT count() FROM t_01411_num WHERE indexOf(arr, 42) > 0;
+
+SELECT count() FROM t_01411_num WHERE arr[1] = num;
+SELECT count() FROM t_01411_num WHERE has(arr, num);
+SELECT count() FROM t_01411_num WHERE indexOf(arr, num) > 0;
+SELECT count() FROM t_01411_num WHERE indexOf(arr, num % 337) > 0;
+
+SELECT indexOf(['a', 'b', 'c'], toLowCardinality('a'));
+SELECT indexOf(['a', 'b', NULL], toLowCardinality('a'));
+"""
+}
+
+for i in `seq 1 32`; do go | grep -q "Exception" && echo 'FAIL' || echo 'OK' ||: & done
+
+wait
+
+${CLICKHOUSE_CLIENT} --multiquery --multiline --query="""
+DROP TABLE IF EXISTS t_01411;
+DROP TABLE IF EXISTS t_01411_num;
+"""
diff --git a/tests/queries/0_stateless/02245_s3_virtual_columns.reference b/tests/queries/0_stateless/02245_s3_virtual_columns.reference
new file mode 100644
index 00000000000..09383c51888
--- /dev/null
+++ b/tests/queries/0_stateless/02245_s3_virtual_columns.reference
@@ -0,0 +1,15 @@
+-- { echo }
+drop table if exists test_02245;
+create table test_02245 (a UInt64) engine = S3(s3_conn, filename='test_02245', format=Parquet);
+insert into test_02245 select 1 settings s3_truncate_on_insert=1;
+select * from test_02245;
+1
+select _path from test_02245;
+test/test_02245
+drop table if exists test_02245_2;
+create table test_02245_2 (a UInt64, _path Int32) engine = S3(s3_conn, filename='test_02245_2', format=Parquet);
+insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1;
+select * from test_02245_2;
+1 2
+select _path from test_02245_2;
+2
diff --git a/tests/queries/0_stateless/02245_s3_virtual_columns.sql b/tests/queries/0_stateless/02245_s3_virtual_columns.sql
new file mode 100644
index 00000000000..e86344d2094
--- /dev/null
+++ b/tests/queries/0_stateless/02245_s3_virtual_columns.sql
@@ -0,0 +1,15 @@
+-- Tags: no-fasttest
+-- Tag no-fasttest: Depends on AWS
+
+-- { echo }
+drop table if exists test_02245;
+create table test_02245 (a UInt64) engine = S3(s3_conn, filename='test_02245', format=Parquet);
+insert into test_02245 select 1 settings s3_truncate_on_insert=1;
+select * from test_02245;
+select _path from test_02245;
+
+drop table if exists test_02245_2;
+create table test_02245_2 (a UInt64, _path Int32) engine = S3(s3_conn, filename='test_02245_2', format=Parquet);
+insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1;
+select * from test_02245_2;
+select _path from test_02245_2;
diff --git a/tests/queries/0_stateless/02246_is_secure_query_log.reference b/tests/queries/0_stateless/02246_is_secure_query_log.reference
new file mode 100644
index 00000000000..1e8c0bbc9cf
--- /dev/null
+++ b/tests/queries/0_stateless/02246_is_secure_query_log.reference
@@ -0,0 +1,4 @@
+1 0
+1 1
+2 0
+2 1
diff --git a/tests/queries/0_stateless/02246_is_secure_query_log.sh b/tests/queries/0_stateless/02246_is_secure_query_log.sh
new file mode 100755
index 00000000000..6b064cedfdf
--- /dev/null
+++ b/tests/queries/0_stateless/02246_is_secure_query_log.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+${CLICKHOUSE_CLIENT} --log_queries=1 --query_id "2246_${CLICKHOUSE_DATABASE}_client_nonsecure" -q "select 1 Format Null"
+${CLICKHOUSE_CLIENT} -q "system flush logs"
+${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_client_nonsecure' and type = 'QueryFinish' and current_database = currentDatabase()"
+
+${CLICKHOUSE_CLIENT_SECURE} --log_queries=1 --query_id "2246_${CLICKHOUSE_DATABASE}_client_secure" -q "select 1 Format Null"
+${CLICKHOUSE_CLIENT} -q "system flush logs"
+${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_client_secure' and type = 'QueryFinish' and current_database = currentDatabase()"
+
+${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&log_queries=1&query_id=2246_${CLICKHOUSE_DATABASE}_http_nonsecure" -d "select 1 Format Null"
+${CLICKHOUSE_CLIENT} -q "system flush logs"
+${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_http_nonsecure' and type = 'QueryFinish' and current_database = currentDatabase()"
+
+${CLICKHOUSE_CURL} -sSk "${CLICKHOUSE_URL_HTTPS}&log_queries=1&query_id=2246_${CLICKHOUSE_DATABASE}_http_secure" -d "select 1 Format Null"
+${CLICKHOUSE_CLIENT} -q "system flush logs"
+${CLICKHOUSE_CLIENT} -q "select interface, is_secure from system.query_log where query_id = '2246_${CLICKHOUSE_DATABASE}_http_secure' and type = 'QueryFinish' and current_database = currentDatabase()"
diff --git a/tests/queries/0_stateless/02247_fix_extract_parser.reference b/tests/queries/0_stateless/02247_fix_extract_parser.reference
new file mode 100644
index 00000000000..01e79c32a8c
--- /dev/null
+++ b/tests/queries/0_stateless/02247_fix_extract_parser.reference
@@ -0,0 +1,3 @@
+1
+2
+3
diff --git a/tests/queries/0_stateless/02247_fix_extract_parser.sql b/tests/queries/0_stateless/02247_fix_extract_parser.sql
new file mode 100644
index 00000000000..9b721a6e830
--- /dev/null
+++ b/tests/queries/0_stateless/02247_fix_extract_parser.sql
@@ -0,0 +1,3 @@
+WITH 'number: 1' as year SELECT extract(year, '\\d+');
+WITH 'number: 2' as mm SELECT extract(mm, '\\d+');
+WITH 'number: 3' as s SELECT extract(s, '\\d+');
diff --git a/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference
new file mode 100644
index 00000000000..9cd40e0d748
--- /dev/null
+++ b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.reference
@@ -0,0 +1 @@
+42 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh
new file mode 100755
index 00000000000..9366d41af9a
--- /dev/null
+++ b/tests/queries/0_stateless/02250_lots_of_columns_in_csv_with_names.sh
@@ -0,0 +1,274 @@
+#!/usr/bin/env bash
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_02250"
+
+$CLICKHOUSE_CLIENT -q "CREATE TABLE test_02250
+(
+ field_1 Int32,
+ field_2 Int32,
+ field_3 Int32,
+ field_4 Int32,
+ field_5 Int32,
+ field_6 Int32,
+ field_7 Int32,
+ field_8 Int32,
+ field_9 Int32,
+ field_10 Int32,
+ field_11 Int32,
+ field_12 Int32,
+ field_13 Int32,
+ field_14 Int32,
+ field_15 Int32,
+ field_16 Int32,
+ field_17 Int32,
+ field_18 Int32,
+ field_19 Int32,
+ field_20 Int32,
+ field_21 Int32,
+ field_22 Int32,
+ field_23 Int32,
+ field_24 Int32,
+ field_25 Int32,
+ field_26 Int32,
+ field_27 Int32,
+ field_28 Int32,
+ field_29 Int32,
+ field_30 Int32,
+ field_31 Int32,
+ field_32 Int32,
+ field_33 Int32,
+ field_34 Int32,
+ field_35 Int32,
+ field_36 Int32,
+ field_37 Int32,
+ field_38 Int32,
+ field_39 Int32,
+ field_40 Int32,
+ field_41 Int32,
+ field_42 Int32,
+ field_43 Int32,
+ field_44 Int32,
+ field_45 Int32,
+ field_46 Int32,
+ field_47 Int32,
+ field_48 Int32,
+ field_49 Int32,
+ field_50 Int32,
+ field_51 Int32,
+ field_52 Int32,
+ field_53 Int32,
+ field_54 Int32,
+ field_55 Int32,
+ field_56 Int32,
+ field_57 Int32,
+ field_58 Int32,
+ field_59 Int32,
+ field_60 Int32,
+ field_61 Int32,
+ field_62 Int32,
+ field_63 Int32,
+ field_64 Int32,
+ field_65 Int32,
+ field_66 Int32,
+ field_67 Int32,
+ field_68 Int32,
+ field_69 Int32,
+ field_70 Int32,
+ field_71 Int32,
+ field_72 Int32,
+ field_73 Int32,
+ field_74 Int32,
+ field_75 Int32,
+ field_76 Int32,
+ field_77 Int32,
+ field_78 Int32,
+ field_79 Int32,
+ field_80 Int32,
+ field_81 Int32,
+ field_82 Int32,
+ field_83 Int32,
+ field_84 Int32,
+ field_85 Int32,
+ field_86 Int32,
+ field_87 Int32,
+ field_88 Int32,
+ field_89 Int32,
+ field_90 Int32,
+ field_91 Int32,
+ field_92 Int32,
+ field_93 Int32,
+ field_94 Int32,
+ field_95 Int32,
+ field_96 Int32,
+ field_97 Int32,
+ field_98 Int32,
+ field_99 Int32,
+ field_100 Int32,
+ field_101 Int32,
+ field_102 Int32,
+ field_103 Int32,
+ field_104 Int32,
+ field_105 Int32,
+ field_106 Int32,
+ field_107 Int32,
+ field_108 Int32,
+ field_109 Int32,
+ field_110 Int32,
+ field_111 Int32,
+ field_112 Int32,
+ field_113 Int32,
+ field_114 Int32,
+ field_115 Int32,
+ field_116 Int32,
+ field_117 Int32,
+ field_118 Int32,
+ field_119 Int32,
+ field_120 Int32,
+ field_121 Int32,
+ field_122 Int32,
+ field_123 Int32,
+ field_124 Int32,
+ field_125 Int32,
+ field_126 Int32,
+ field_127 Int32,
+ field_128 Int32,
+ field_129 Int32,
+ field_130 Int32,
+ field_131 Int32,
+ field_132 Int32,
+ field_133 Int32,
+ field_134 Int32,
+ field_135 Int32,
+ field_136 Int32,
+ field_137 Int32,
+ field_138 Int32,
+ field_139 Int32,
+ field_140 Int32,
+ field_141 Int32,
+ field_142 Int32,
+ field_143 Int32,
+ field_144 Int32,
+ field_145 Int32,
+ field_146 Int32,
+ field_147 Int32,
+ field_148 Int32,
+ field_149 Int32,
+ field_150 Int32,
+ field_151 Int32,
+ field_152 Int32,
+ field_153 Int32,
+ field_154 Int32,
+ field_155 Int32,
+ field_156 Int32,
+ field_157 Int32,
+ field_158 Int32,
+ field_159 Int32,
+ field_160 Int32,
+ field_161 Int32,
+ field_162 Int32,
+ field_163 Int32,
+ field_164 Int32,
+ field_165 Int32,
+ field_166 Int32,
+ field_167 Int32,
+ field_168 Int32,
+ field_169 Int32,
+ field_170 Int32,
+ field_171 Int32,
+ field_172 Int32,
+ field_173 Int32,
+ field_174 Int32,
+ field_175 Int32,
+ field_176 Int32,
+ field_177 Int32,
+ field_178 Int32,
+ field_179 Int32,
+ field_180 Int32,
+ field_181 Int32,
+ field_182 Int32,
+ field_183 Int32,
+ field_184 Int32,
+ field_185 Int32,
+ field_186 Int32,
+ field_187 Int32,
+ field_188 Int32,
+ field_189 Int32,
+ field_190 Int32,
+ field_191 Int32,
+ field_192 Int32,
+ field_193 Int32,
+ field_194 Int32,
+ field_195 Int32,
+ field_196 Int32,
+ field_197 Int32,
+ field_198 Int32,
+ field_199 Int32,
+ field_200 Int32,
+ field_201 Int32,
+ field_202 Int32,
+ field_203 Int32,
+ field_204 Int32,
+ field_205 Int32,
+ field_206 Int32,
+ field_207 Int32,
+ field_208 Int32,
+ field_209 Int32,
+ field_210 Int32,
+ field_211 Int32,
+ field_212 Int32,
+ field_213 Int32,
+ field_214 Int32,
+ field_215 Int32,
+ field_216 Int32,
+ field_217 Int32,
+ field_218 Int32,
+ field_219 Int32,
+ field_220 Int32,
+ field_221 Int32,
+ field_222 Int32,
+ field_223 Int32,
+ field_224 Int32,
+ field_225 Int32,
+ field_226 Int32,
+ field_227 Int32,
+ field_228 Int32,
+ field_229 Int32,
+ field_230 Int32,
+ field_231 Int32,
+ field_232 Int32,
+ field_233 Int32,
+ field_234 Int32,
+ field_235 Int32,
+ field_236 Int32,
+ field_237 Int32,
+ field_238 Int32,
+ field_239 Int32,
+ field_240 Int32,
+ field_241 Int32,
+ field_242 Int32,
+ field_243 Int32,
+ field_244 Int32,
+ field_245 Int32,
+ field_246 Int32,
+ field_247 Int32,
+ field_248 Int32,
+ field_249 Int32,
+ field_250 Int32,
+ field_251 Int32,
+ field_252 Int32,
+ field_253 Int32,
+ field_254 Int32,
+ field_255 Int32,
+ field_256 Int32,
+ field_257 Int32
+)
+ENGINE = MergeTree
+ORDER BY tuple()"
+
+echo -e "field_1\n42" | $CLICKHOUSE_CLIENT -q "INSERT INTO test_02250 FORMAT CSVWithNames" --input_format_with_names_use_header 1
+$CLICKHOUSE_CLIENT -q "SELECT * FROM test_02250"
+$CLICKHOUSE_CLIENT -q "DROP TABLE test_02250"