mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into joins
This commit is contained in:
commit
cc61fb9408
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -7,16 +7,20 @@ Changelog category (leave one):
|
||||
- Performance Improvement
|
||||
- Backward Incompatible Change
|
||||
- Build/Testing/Packaging Improvement
|
||||
- Documentation
|
||||
- Documentation (changelog entry is not required)
|
||||
- Other
|
||||
- Non-significant (changelog entry is not needed)
|
||||
- Non-significant (changelog entry is not required)
|
||||
|
||||
|
||||
Changelog entry (up to few sentences, required except for Non-significant/Documentation categories):
|
||||
Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
|
||||
|
||||
...
|
||||
|
||||
|
||||
Detailed description (optional):
|
||||
Detailed description / Documentation draft:
|
||||
|
||||
...
|
||||
|
||||
By adding documentation, you'll allow users to try your new feature immediately, not when someone else will have time to document it later. Documentation is necessary for all features that affect user experience in any way. You can add brief documentation draft above, or add documentation right into your patch as Markdown files in [docs](https://github.com/ClickHouse/ClickHouse/tree/master/docs) folder.
|
||||
|
||||
If you are doing this for the first time, it's recommended to read the lightweight [Contributing to ClickHouse Documentation](https://github.com/ClickHouse/ClickHouse/tree/master/docs/README.md) guide first.
|
||||
|
@ -1,10 +1,16 @@
|
||||
# Contributing to ClickHouse
|
||||
|
||||
## Technical info
|
||||
Developer guide for writing code for ClickHouse is published on official website alongside the usage and operations documentation:
|
||||
https://clickhouse.yandex/docs/en/development/architecture/
|
||||
ClickHouse is an open project, and you can contribute to it in many ways. You can help with ideas, code, or documentation. We appreciate any efforts that help us to make the project better.
|
||||
|
||||
## Legal info
|
||||
Thank you.
|
||||
|
||||
## Technical Info
|
||||
|
||||
We have a [developer's guide](https://clickhouse.yandex/docs/en/development/developer_instruction/) for writing code for ClickHouse. Besides this guide, you can find [Overview of ClickHouse Architecture](https://clickhouse.yandex/docs/en/development/architecture/) and instructions on how to build ClickHouse in different environments.
|
||||
|
||||
If you want to contribute to documentation, read the [Contributing to ClickHouse Documentation](docs/README.md) guide.
|
||||
|
||||
## Legal Info
|
||||
|
||||
In order for us (YANDEX LLC) to accept patches and other contributions from you, you will have to adopt our Yandex Contributor License Agreement (the "**CLA**"). The current version of the CLA you may find here:
|
||||
1) https://yandex.ru/legal/cla/?lang=en (in English) and
|
||||
|
@ -177,7 +177,7 @@ elseif (COMPILER_GCC)
|
||||
# Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof
|
||||
add_cxx_compile_options(-Wsizeof-pointer-memaccess)
|
||||
# Warn about overriding virtual functions that are not marked with the override keyword
|
||||
# add_cxx_compile_options(-Wsuggest-override)
|
||||
add_cxx_compile_options(-Wsuggest-override)
|
||||
# Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type
|
||||
add_cxx_compile_options(-Wswitch-bool)
|
||||
# Warn if a self-comparison always evaluates to true or false
|
||||
|
@ -101,7 +101,7 @@ public:
|
||||
|
||||
}
|
||||
|
||||
void initialize(Poco::Util::Application & self [[maybe_unused]])
|
||||
void initialize(Poco::Util::Application & self [[maybe_unused]]) override
|
||||
{
|
||||
std::string home_path;
|
||||
const char * home_path_cstr = getenv("HOME");
|
||||
@ -111,7 +111,7 @@ public:
|
||||
configReadClient(config(), home_path);
|
||||
}
|
||||
|
||||
int main(const std::vector<std::string> &)
|
||||
int main(const std::vector<std::string> &) override
|
||||
{
|
||||
if (!json_path.empty() && Poco::File(json_path).exists()) /// Clear file with previous results
|
||||
Poco::File(json_path).remove();
|
||||
@ -492,7 +492,7 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
~Benchmark()
|
||||
~Benchmark() override
|
||||
{
|
||||
shutdown = true;
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ private:
|
||||
|
||||
ConnectionParameters connection_parameters;
|
||||
|
||||
void initialize(Poco::Util::Application & self)
|
||||
void initialize(Poco::Util::Application & self) override
|
||||
{
|
||||
Poco::Util::Application::initialize(self);
|
||||
|
||||
@ -233,7 +233,7 @@ private:
|
||||
}
|
||||
|
||||
|
||||
int main(const std::vector<std::string> & /*args*/)
|
||||
int main(const std::vector<std::string> & /*args*/) override
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -606,7 +606,7 @@ void TCPHandler::processOrdinaryQueryWithProcessors(size_t num_threads)
|
||||
/// If exception was thrown during pipeline execution, skip it while processing other exception.
|
||||
}
|
||||
|
||||
pipeline = QueryPipeline()
|
||||
/// pipeline = QueryPipeline()
|
||||
);
|
||||
|
||||
while (true)
|
||||
|
@ -111,7 +111,7 @@ public:
|
||||
server_display_name = server.config().getString("display_name", getFQDNOrHostName());
|
||||
}
|
||||
|
||||
void run();
|
||||
void run() override;
|
||||
|
||||
/// This method is called right before the query execution.
|
||||
virtual void customizeContext(DB::Context & /*context*/) {}
|
||||
|
@ -112,7 +112,7 @@ void FileChecker::save() const
|
||||
out->next();
|
||||
}
|
||||
|
||||
disk->moveFile(tmp_files_info_path, files_info_path);
|
||||
disk->replaceFile(tmp_files_info_path, files_info_path);
|
||||
}
|
||||
|
||||
void FileChecker::load(Map & local_map, const String & path) const
|
||||
|
@ -99,7 +99,7 @@ class ExternalTablesHandler : public Poco::Net::PartHandler, BaseExternalTable
|
||||
public:
|
||||
ExternalTablesHandler(Context & context_, const Poco::Net::NameValueCollection & params_) : context(context_), params(params_) {}
|
||||
|
||||
void handlePart(const Poco::Net::MessageHeader & header, std::istream & stream);
|
||||
void handlePart(const Poco::Net::MessageHeader & header, std::istream & stream) override;
|
||||
|
||||
private:
|
||||
Context & context;
|
||||
|
@ -60,7 +60,7 @@ std::ostream & operator<<(std::ostream & ostr, const TypesTestCase & test_case)
|
||||
class TypeTest : public ::testing::TestWithParam<TypesTestCase>
|
||||
{
|
||||
public:
|
||||
void SetUp()
|
||||
void SetUp() override
|
||||
{
|
||||
const auto & p = GetParam();
|
||||
from_types = typesFromString(p.from_types);
|
||||
|
@ -9,6 +9,7 @@ namespace DB
|
||||
|
||||
DatabaseMemory::DatabaseMemory(const String & name_)
|
||||
: DatabaseWithOwnTablesBase(name_, "DatabaseMemory(" + name_ + ")")
|
||||
, data_path("data/" + escapeForFileName(database_name) + "/")
|
||||
{}
|
||||
|
||||
void DatabaseMemory::createTable(
|
||||
|
@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Databases/DatabasesCommon.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
@ -32,6 +34,16 @@ public:
|
||||
const String & table_name) override;
|
||||
|
||||
ASTPtr getCreateDatabaseQuery(const Context & /*context*/) const override;
|
||||
|
||||
/// DatabaseMemory allows to create tables, which store data on disk.
|
||||
/// It's needed to create such tables in default database of clickhouse-local.
|
||||
/// TODO May be it's better to use DiskMemory for such tables.
|
||||
/// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local.
|
||||
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); }
|
||||
|
||||
private:
|
||||
String data_path;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -56,13 +56,13 @@ public:
|
||||
|
||||
DatabaseTablesSnapshotIterator(Tables && tables_) : tables(tables_), it(tables.begin()) {}
|
||||
|
||||
void next() { ++it; }
|
||||
void next() override { ++it; }
|
||||
|
||||
bool isValid() const { return it != tables.end(); }
|
||||
bool isValid() const override { return it != tables.end(); }
|
||||
|
||||
const String & name() const { return it->first; }
|
||||
const String & name() const override { return it->first; }
|
||||
|
||||
const StoragePtr & table() const { return it->second; }
|
||||
const StoragePtr & table() const override { return it->second; }
|
||||
};
|
||||
|
||||
/// Copies list of dictionaries and iterates through such snapshot.
|
||||
|
@ -15,7 +15,57 @@ namespace ErrorCodes
|
||||
extern const int PATH_ACCESS_DENIED;
|
||||
}
|
||||
|
||||
std::mutex DiskLocal::mutex;
|
||||
std::mutex DiskLocal::reservation_mutex;
|
||||
|
||||
|
||||
using DiskLocalPtr = std::shared_ptr<DiskLocal>;
|
||||
|
||||
class DiskLocalReservation : public IReservation
|
||||
{
|
||||
public:
|
||||
DiskLocalReservation(const DiskLocalPtr & disk_, UInt64 size_)
|
||||
: disk(disk_), size(size_), metric_increment(CurrentMetrics::DiskSpaceReservedForMerge, size_)
|
||||
{
|
||||
}
|
||||
|
||||
UInt64 getSize() const override { return size; }
|
||||
|
||||
DiskPtr getDisk() const override { return disk; }
|
||||
|
||||
void update(UInt64 new_size) override;
|
||||
|
||||
~DiskLocalReservation() override;
|
||||
|
||||
private:
|
||||
DiskLocalPtr disk;
|
||||
UInt64 size;
|
||||
CurrentMetrics::Increment metric_increment;
|
||||
};
|
||||
|
||||
|
||||
class DiskLocalDirectoryIterator : public IDiskDirectoryIterator
|
||||
{
|
||||
public:
|
||||
explicit DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_) :
|
||||
dir_path(dir_path_), iter(disk_path_ + dir_path_) {}
|
||||
|
||||
void next() override { ++iter; }
|
||||
|
||||
bool isValid() const override { return iter != Poco::DirectoryIterator(); }
|
||||
|
||||
String path() const override
|
||||
{
|
||||
if (iter->isDirectory())
|
||||
return dir_path + iter.name() + '/';
|
||||
else
|
||||
return dir_path + iter.name();
|
||||
}
|
||||
|
||||
private:
|
||||
String dir_path;
|
||||
Poco::DirectoryIterator iter;
|
||||
};
|
||||
|
||||
|
||||
ReservationPtr DiskLocal::reserve(UInt64 bytes)
|
||||
{
|
||||
@ -26,7 +76,7 @@ ReservationPtr DiskLocal::reserve(UInt64 bytes)
|
||||
|
||||
bool DiskLocal::tryReserve(UInt64 bytes)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
if (bytes == 0)
|
||||
{
|
||||
LOG_DEBUG(&Logger::get("DiskLocal"), "Reserving 0 bytes on disk " << backQuote(name));
|
||||
@ -71,7 +121,7 @@ UInt64 DiskLocal::getAvailableSpace() const
|
||||
|
||||
UInt64 DiskLocal::getUnreservedSpace() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
auto available_space = getAvailableSpace();
|
||||
available_space -= std::min(available_space, reserved_bytes);
|
||||
return available_space;
|
||||
@ -161,20 +211,31 @@ std::unique_ptr<WriteBuffer> DiskLocal::writeFile(const String & path, size_t bu
|
||||
return std::make_unique<WriteBufferFromFile>(disk_path + path, buf_size, flags);
|
||||
}
|
||||
|
||||
void DiskLocal::remove(const String & path)
|
||||
{
|
||||
Poco::File(disk_path + path).remove(false);
|
||||
}
|
||||
|
||||
void DiskLocal::removeRecursive(const String & path)
|
||||
{
|
||||
Poco::File(disk_path + path).remove(true);
|
||||
}
|
||||
|
||||
|
||||
void DiskLocalReservation::update(UInt64 new_size)
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::mutex);
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
disk->reserved_bytes -= size;
|
||||
size = new_size;
|
||||
disk->reserved_bytes += size;
|
||||
}
|
||||
|
||||
|
||||
DiskLocalReservation::~DiskLocalReservation()
|
||||
{
|
||||
try
|
||||
{
|
||||
std::lock_guard lock(DiskLocal::mutex);
|
||||
std::lock_guard lock(DiskLocal::reservation_mutex);
|
||||
if (disk->reserved_bytes < size)
|
||||
{
|
||||
disk->reserved_bytes = 0;
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
|
||||
#include <mutex>
|
||||
#include <Poco/DirectoryIterator.h>
|
||||
#include <Poco/File.h>
|
||||
|
||||
@ -71,6 +70,10 @@ public:
|
||||
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & path, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, WriteMode mode = WriteMode::Rewrite) override;
|
||||
|
||||
void remove(const String & path) override;
|
||||
|
||||
void removeRecursive(const String & path) override;
|
||||
|
||||
private:
|
||||
bool tryReserve(UInt64 bytes);
|
||||
|
||||
@ -79,61 +82,10 @@ private:
|
||||
const String disk_path;
|
||||
const UInt64 keep_free_space_bytes;
|
||||
|
||||
/// Used for reservation counters modification
|
||||
static std::mutex mutex;
|
||||
UInt64 reserved_bytes = 0;
|
||||
UInt64 reservation_count = 0;
|
||||
|
||||
static std::mutex reservation_mutex;
|
||||
};
|
||||
|
||||
using DiskLocalPtr = std::shared_ptr<DiskLocal>;
|
||||
|
||||
|
||||
class DiskLocalDirectoryIterator : public IDiskDirectoryIterator
|
||||
{
|
||||
public:
|
||||
explicit DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_) :
|
||||
dir_path(dir_path_), iter(disk_path_ + dir_path_) {}
|
||||
|
||||
void next() override { ++iter; }
|
||||
|
||||
bool isValid() const override { return iter != Poco::DirectoryIterator(); }
|
||||
|
||||
String path() const override
|
||||
{
|
||||
if (iter->isDirectory())
|
||||
return dir_path + iter.name() + '/';
|
||||
else
|
||||
return dir_path + iter.name();
|
||||
}
|
||||
|
||||
private:
|
||||
String dir_path;
|
||||
Poco::DirectoryIterator iter;
|
||||
};
|
||||
|
||||
class DiskLocalReservation : public IReservation
|
||||
{
|
||||
public:
|
||||
DiskLocalReservation(const DiskLocalPtr & disk_, UInt64 size_)
|
||||
: disk(disk_), size(size_), metric_increment(CurrentMetrics::DiskSpaceReservedForMerge, size_)
|
||||
{
|
||||
}
|
||||
|
||||
UInt64 getSize() const override { return size; }
|
||||
|
||||
DiskPtr getDisk() const override { return disk; }
|
||||
|
||||
void update(UInt64 new_size) override;
|
||||
|
||||
~DiskLocalReservation() override;
|
||||
|
||||
private:
|
||||
DiskLocalPtr disk;
|
||||
UInt64 size;
|
||||
CurrentMetrics::Increment metric_increment;
|
||||
};
|
||||
|
||||
class DiskFactory;
|
||||
void registerDiskLocal(DiskFactory & factory);
|
||||
|
||||
}
|
||||
|
@ -16,6 +16,27 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_DELETE_DIRECTORY;
|
||||
}
|
||||
|
||||
|
||||
class DiskMemoryDirectoryIterator : public IDiskDirectoryIterator
|
||||
{
|
||||
public:
|
||||
explicit DiskMemoryDirectoryIterator(std::vector<String> && dir_file_paths_)
|
||||
: dir_file_paths(std::move(dir_file_paths_)), iter(dir_file_paths.begin())
|
||||
{
|
||||
}
|
||||
|
||||
void next() override { ++iter; }
|
||||
|
||||
bool isValid() const override { return iter != dir_file_paths.end(); }
|
||||
|
||||
String path() const override { return *iter; }
|
||||
|
||||
private:
|
||||
std::vector<String> dir_file_paths;
|
||||
std::vector<String>::iterator iter;
|
||||
};
|
||||
|
||||
|
||||
ReservationPtr DiskMemory::reserve(UInt64 /*bytes*/)
|
||||
{
|
||||
throw Exception("Method reserve is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
|
||||
@ -71,7 +92,7 @@ size_t DiskMemory::getFileSize(const String & path) const
|
||||
|
||||
auto iter = files.find(path);
|
||||
if (iter == files.end())
|
||||
throw Exception("File " + path + " does not exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
throw Exception("File '" + path + "' does not exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
return iter->second.data.size();
|
||||
}
|
||||
@ -86,7 +107,7 @@ void DiskMemory::createDirectory(const String & path)
|
||||
String parent_path = parentPath(path);
|
||||
if (!parent_path.empty() && files.find(parent_path) == files.end())
|
||||
throw Exception(
|
||||
"Failed to create directory " + path + ". Parent directory " + parent_path + " does not exist",
|
||||
"Failed to create directory '" + path + "'. Parent directory " + parent_path + " does not exist",
|
||||
ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
|
||||
files.emplace(path, FileData{FileType::Directory});
|
||||
@ -116,7 +137,7 @@ void DiskMemory::clearDirectory(const String & path)
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (files.find(path) == files.end())
|
||||
throw Exception("Directory " + path + " does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
throw Exception("Directory '" + path + "' does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
|
||||
for (auto iter = files.begin(); iter != files.end();)
|
||||
{
|
||||
@ -128,7 +149,7 @@ void DiskMemory::clearDirectory(const String & path)
|
||||
|
||||
if (iter->second.type == FileType::Directory)
|
||||
throw Exception(
|
||||
"Failed to clear directory " + path + ". " + iter->first + " is a directory", ErrorCodes::CANNOT_DELETE_DIRECTORY);
|
||||
"Failed to clear directory '" + path + "'. " + iter->first + " is a directory", ErrorCodes::CANNOT_DELETE_DIRECTORY);
|
||||
|
||||
files.erase(iter++);
|
||||
}
|
||||
@ -144,7 +165,7 @@ DiskDirectoryIteratorPtr DiskMemory::iterateDirectory(const String & path)
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (!path.empty() && files.find(path) == files.end())
|
||||
throw Exception("Directory " + path + " does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
throw Exception("Directory '" + path + "' does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
|
||||
std::vector<String> dir_file_paths;
|
||||
for (const auto & file : files)
|
||||
@ -203,7 +224,7 @@ std::unique_ptr<ReadBuffer> DiskMemory::readFile(const String & path, size_t /*b
|
||||
|
||||
auto iter = files.find(path);
|
||||
if (iter == files.end())
|
||||
throw Exception("File " + path + " does not exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
throw Exception("File '" + path + "' does not exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
return std::make_unique<ReadBufferFromString>(iter->second.data);
|
||||
}
|
||||
@ -218,7 +239,7 @@ std::unique_ptr<WriteBuffer> DiskMemory::writeFile(const String & path, size_t /
|
||||
String parent_path = parentPath(path);
|
||||
if (!parent_path.empty() && files.find(parent_path) == files.end())
|
||||
throw Exception(
|
||||
"Failed to create file " + path + ". Directory " + parent_path + " does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
"Failed to create file '" + path + "'. Directory " + parent_path + " does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
|
||||
iter = files.emplace(path, FileData{FileType::File}).first;
|
||||
}
|
||||
@ -229,6 +250,46 @@ std::unique_ptr<WriteBuffer> DiskMemory::writeFile(const String & path, size_t /
|
||||
return std::make_unique<WriteBufferFromString>(iter->second.data);
|
||||
}
|
||||
|
||||
void DiskMemory::remove(const String & path)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto file_it = files.find(path);
|
||||
if (file_it == files.end())
|
||||
throw Exception("File '" + path + "' doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
if (file_it->second.type == FileType::Directory)
|
||||
{
|
||||
files.erase(file_it);
|
||||
if (std::any_of(files.begin(), files.end(), [path](const auto & file) { return parentPath(file.first) == path; }))
|
||||
throw Exception("Directory '" + path + "' is not empty", ErrorCodes::CANNOT_DELETE_DIRECTORY);
|
||||
}
|
||||
else
|
||||
{
|
||||
files.erase(file_it);
|
||||
}
|
||||
}
|
||||
|
||||
void DiskMemory::removeRecursive(const String & path)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto file_it = files.find(path);
|
||||
if (file_it == files.end())
|
||||
throw Exception("File '" + path + "' doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
for (auto iter = files.begin(); iter != files.end();)
|
||||
{
|
||||
if (iter->first.size() >= path.size() && std::string_view(iter->first.data(), path.size()) == path)
|
||||
iter = files.erase(iter);
|
||||
else
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;
|
||||
|
||||
|
||||
void registerDiskMemory(DiskFactory & factory)
|
||||
{
|
||||
|
@ -67,6 +67,10 @@ public:
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
WriteMode mode = WriteMode::Rewrite) override;
|
||||
|
||||
void remove(const String & path) override;
|
||||
|
||||
void removeRecursive(const String & path) override;
|
||||
|
||||
private:
|
||||
void createDirectoriesImpl(const String & path);
|
||||
void replaceFileImpl(const String & from_path, const String & to_path);
|
||||
@ -93,30 +97,4 @@ private:
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;
|
||||
|
||||
|
||||
class DiskMemoryDirectoryIterator : public IDiskDirectoryIterator
|
||||
{
|
||||
public:
|
||||
explicit DiskMemoryDirectoryIterator(std::vector<String> && dir_file_paths_)
|
||||
: dir_file_paths(std::move(dir_file_paths_)), iter(dir_file_paths.begin())
|
||||
{
|
||||
}
|
||||
|
||||
void next() override { ++iter; }
|
||||
|
||||
bool isValid() const override { return iter != dir_file_paths.end(); }
|
||||
|
||||
String path() const override { return *iter; }
|
||||
|
||||
private:
|
||||
std::vector<String> dir_file_paths;
|
||||
std::vector<String>::iterator iter;
|
||||
};
|
||||
|
||||
|
||||
class DiskFactory;
|
||||
void registerDiskMemory(DiskFactory & factory);
|
||||
|
||||
}
|
||||
|
439
dbms/src/Disks/DiskS3.cpp
Normal file
439
dbms/src/Disks/DiskS3.cpp
Normal file
@ -0,0 +1,439 @@
|
||||
#include "DiskS3.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
# include "DiskFactory.h"
|
||||
|
||||
# include <random>
|
||||
# include <IO/S3Common.h>
|
||||
# include <IO/ReadBufferFromS3.h>
|
||||
# include <IO/WriteBufferFromS3.h>
|
||||
# include <IO/ReadBufferFromFile.h>
|
||||
# include <IO/WriteBufferFromFile.h>
|
||||
# include <IO/ReadHelpers.h>
|
||||
# include <IO/WriteHelpers.h>
|
||||
# include <Poco/File.h>
|
||||
# include <Common/checkStackSize.h>
|
||||
# include <Common/quoteString.h>
|
||||
# include <Common/thread_local_rng.h>
|
||||
|
||||
# include <aws/s3/model/CopyObjectRequest.h>
|
||||
# include <aws/s3/model/DeleteObjectRequest.h>
|
||||
# include <aws/s3/model/GetObjectRequest.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FILE_ALREADY_EXISTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int PATH_ACCESS_DENIED;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename Result, typename Error>
|
||||
void throwIfError(Aws::Utils::Outcome<Result, Error> && response)
|
||||
{
|
||||
if (!response.IsSuccess())
|
||||
{
|
||||
const auto & err = response.GetError();
|
||||
throw Exception(err.GetMessage(), static_cast<int>(err.GetErrorType()));
|
||||
}
|
||||
}
|
||||
|
||||
String readKeyFromFile(const String & path)
|
||||
{
|
||||
String key;
|
||||
ReadBufferFromFile buf(path, 1024); /* reasonable buffer size for small file */
|
||||
readStringUntilEOF(key, buf);
|
||||
return key;
|
||||
}
|
||||
|
||||
void writeKeyToFile(const String & key, const String & path)
|
||||
{
|
||||
WriteBufferFromFile buf(path, 1024);
|
||||
writeString(key, buf);
|
||||
buf.next();
|
||||
}
|
||||
|
||||
/// Stores data in S3 and the object key in file in local filesystem.
|
||||
class WriteIndirectBufferFromS3 : public WriteBufferFromS3
|
||||
{
|
||||
public:
|
||||
WriteIndirectBufferFromS3(
|
||||
std::shared_ptr<Aws::S3::S3Client> & client_ptr_,
|
||||
const String & bucket_,
|
||||
const String & metadata_path_,
|
||||
const String & s3_path_,
|
||||
size_t buf_size_)
|
||||
: WriteBufferFromS3(client_ptr_, bucket_, s3_path_, DEFAULT_BLOCK_SIZE, buf_size_)
|
||||
, metadata_path(metadata_path_)
|
||||
, s3_path(s3_path_)
|
||||
{
|
||||
}
|
||||
|
||||
void finalize() override
|
||||
{
|
||||
WriteBufferFromS3::finalize();
|
||||
writeKeyToFile(s3_path, metadata_path);
|
||||
finalized = true;
|
||||
}
|
||||
|
||||
~WriteIndirectBufferFromS3() override
|
||||
{
|
||||
if (finalized)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
bool finalized = false;
|
||||
const String metadata_path;
|
||||
const String s3_path;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
class DiskS3DirectoryIterator : public IDiskDirectoryIterator
|
||||
{
|
||||
public:
|
||||
DiskS3DirectoryIterator(const String & full_path, const String & folder_path_) : iter(full_path), folder_path(folder_path_) {}
|
||||
|
||||
void next() override { ++iter; }
|
||||
|
||||
bool isValid() const override { return iter != Poco::DirectoryIterator(); }
|
||||
|
||||
String path() const override
|
||||
{
|
||||
if (iter->isDirectory())
|
||||
return folder_path + iter.name() + '/';
|
||||
else
|
||||
return folder_path + iter.name();
|
||||
}
|
||||
|
||||
private:
|
||||
Poco::DirectoryIterator iter;
|
||||
String folder_path;
|
||||
};
|
||||
|
||||
|
||||
using DiskS3Ptr = std::shared_ptr<DiskS3>;
|
||||
|
||||
class DiskS3Reservation : public IReservation
|
||||
{
|
||||
public:
|
||||
DiskS3Reservation(const DiskS3Ptr & disk_, UInt64 size_)
|
||||
: disk(disk_), size(size_), metric_increment(CurrentMetrics::DiskSpaceReservedForMerge, size_)
|
||||
{
|
||||
}
|
||||
|
||||
UInt64 getSize() const override { return size; }
|
||||
|
||||
DiskPtr getDisk() const override { return disk; }
|
||||
|
||||
void update(UInt64 new_size) override
|
||||
{
|
||||
std::lock_guard lock(disk->reservation_mutex);
|
||||
disk->reserved_bytes -= size;
|
||||
size = new_size;
|
||||
disk->reserved_bytes += size;
|
||||
}
|
||||
|
||||
~DiskS3Reservation() override;
|
||||
|
||||
private:
|
||||
DiskS3Ptr disk;
|
||||
UInt64 size;
|
||||
CurrentMetrics::Increment metric_increment;
|
||||
};
|
||||
|
||||
|
||||
DiskS3::DiskS3(String name_, std::shared_ptr<Aws::S3::S3Client> client_, String bucket_, String s3_root_path_, String metadata_path_)
|
||||
: name(std::move(name_))
|
||||
, client(std::move(client_))
|
||||
, bucket(std::move(bucket_))
|
||||
, s3_root_path(std::move(s3_root_path_))
|
||||
, metadata_path(std::move(metadata_path_))
|
||||
{
|
||||
}
|
||||
|
||||
ReservationPtr DiskS3::reserve(UInt64 bytes)
|
||||
{
|
||||
if (!tryReserve(bytes))
|
||||
return {};
|
||||
return std::make_unique<DiskS3Reservation>(std::static_pointer_cast<DiskS3>(shared_from_this()), bytes);
|
||||
}
|
||||
|
||||
bool DiskS3::exists(const String & path) const
|
||||
{
|
||||
return Poco::File(metadata_path + path).exists();
|
||||
}
|
||||
|
||||
bool DiskS3::isFile(const String & path) const
|
||||
{
|
||||
return Poco::File(metadata_path + path).isFile();
|
||||
}
|
||||
|
||||
bool DiskS3::isDirectory(const String & path) const
|
||||
{
|
||||
return Poco::File(metadata_path + path).isDirectory();
|
||||
}
|
||||
|
||||
size_t DiskS3::getFileSize(const String & path) const
|
||||
{
|
||||
Aws::S3::Model::GetObjectRequest request;
|
||||
request.SetBucket(bucket);
|
||||
request.SetKey(getS3Path(path));
|
||||
auto outcome = client->GetObject(request);
|
||||
if (!outcome.IsSuccess())
|
||||
{
|
||||
auto & err = outcome.GetError();
|
||||
throw Exception(err.GetMessage(), static_cast<int>(err.GetErrorType()));
|
||||
}
|
||||
else
|
||||
{
|
||||
return outcome.GetResult().GetContentLength();
|
||||
}
|
||||
}
|
||||
|
||||
void DiskS3::createDirectory(const String & path)
|
||||
{
|
||||
Poco::File(metadata_path + path).createDirectory();
|
||||
}
|
||||
|
||||
void DiskS3::createDirectories(const String & path)
|
||||
{
|
||||
Poco::File(metadata_path + path).createDirectories();
|
||||
}
|
||||
|
||||
DiskDirectoryIteratorPtr DiskS3::iterateDirectory(const String & path)
|
||||
{
|
||||
return std::make_unique<DiskS3DirectoryIterator>(metadata_path + path, path);
|
||||
}
|
||||
|
||||
void DiskS3::clearDirectory(const String & path)
|
||||
{
|
||||
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
|
||||
if (isFile(it->path()))
|
||||
remove(it->path());
|
||||
}
|
||||
|
||||
void DiskS3::moveFile(const String & from_path, const String & to_path)
|
||||
{
|
||||
if (exists(to_path))
|
||||
throw Exception("File already exists " + to_path, ErrorCodes::FILE_ALREADY_EXISTS);
|
||||
Poco::File(metadata_path + from_path).renameTo(metadata_path + to_path);
|
||||
}
|
||||
|
||||
void DiskS3::replaceFile(const String & from_path, const String & to_path)
|
||||
{
|
||||
Poco::File from_file(metadata_path + from_path);
|
||||
Poco::File to_file(metadata_path + to_path);
|
||||
if (to_file.exists())
|
||||
{
|
||||
Poco::File tmp_file(metadata_path + to_path + ".old");
|
||||
to_file.renameTo(tmp_file.path());
|
||||
from_file.renameTo(metadata_path + to_path);
|
||||
remove(to_path + ".old");
|
||||
}
|
||||
else
|
||||
from_file.renameTo(to_file.path());
|
||||
}
|
||||
|
||||
void DiskS3::copyFile(const String & from_path, const String & to_path)
|
||||
{
|
||||
if (exists(to_path))
|
||||
remove(to_path);
|
||||
|
||||
String s3_from_path = readKeyFromFile(metadata_path + from_path);
|
||||
String s3_to_path = s3_root_path + getRandomName();
|
||||
|
||||
Aws::S3::Model::CopyObjectRequest req;
|
||||
req.SetBucket(bucket);
|
||||
req.SetCopySource(s3_from_path);
|
||||
req.SetKey(s3_to_path);
|
||||
throwIfError(client->CopyObject(req));
|
||||
writeKeyToFile(s3_to_path, metadata_path + to_path);
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBuffer> DiskS3::readFile(const String & path, size_t buf_size) const
|
||||
{
|
||||
return std::make_unique<ReadBufferFromS3>(client, bucket, getS3Path(path), buf_size);
|
||||
}
|
||||
|
||||
std::unique_ptr<WriteBuffer> DiskS3::writeFile(const String & path, size_t buf_size, WriteMode mode)
|
||||
{
|
||||
if (!exists(path) || mode == WriteMode::Rewrite)
|
||||
{
|
||||
String new_s3_path = s3_root_path + getRandomName();
|
||||
return std::make_unique<WriteIndirectBufferFromS3>(client, bucket, metadata_path + path, new_s3_path, buf_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto old_s3_path = getS3Path(path);
|
||||
ReadBufferFromS3 read_buffer(client, bucket, old_s3_path, buf_size);
|
||||
auto writeBuffer = std::make_unique<WriteIndirectBufferFromS3>(client, bucket, metadata_path + path, old_s3_path, buf_size);
|
||||
std::vector<char> buffer(buf_size);
|
||||
while (!read_buffer.eof())
|
||||
writeBuffer->write(buffer.data(), read_buffer.read(buffer.data(), buf_size));
|
||||
return writeBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
void DiskS3::remove(const String & path)
|
||||
{
|
||||
Poco::File file(metadata_path + path);
|
||||
if (file.isFile())
|
||||
{
|
||||
Aws::S3::Model::DeleteObjectRequest request;
|
||||
request.SetBucket(bucket);
|
||||
request.SetKey(getS3Path(path));
|
||||
throwIfError(client->DeleteObject(request));
|
||||
}
|
||||
file.remove();
|
||||
}
|
||||
|
||||
void DiskS3::removeRecursive(const String & path)
|
||||
{
|
||||
checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks.
|
||||
|
||||
Poco::File file(metadata_path + path);
|
||||
if (file.isFile())
|
||||
{
|
||||
Aws::S3::Model::DeleteObjectRequest request;
|
||||
request.SetBucket(bucket);
|
||||
request.SetKey(getS3Path(path));
|
||||
throwIfError(client->DeleteObject(request));
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
|
||||
removeRecursive(it->path());
|
||||
}
|
||||
file.remove();
|
||||
}
|
||||
|
||||
String DiskS3::getS3Path(const String & path) const
|
||||
{
|
||||
if (!exists(path))
|
||||
throw Exception("File not found: " + path, ErrorCodes::FILE_DOESNT_EXIST);
|
||||
|
||||
return readKeyFromFile(metadata_path + path);
|
||||
}
|
||||
|
||||
String DiskS3::getRandomName() const
|
||||
{
|
||||
std::uniform_int_distribution<int> distribution('a', 'z');
|
||||
String res(32, ' '); /// The number of bits of entropy should be not less than 128.
|
||||
for (auto & c : res)
|
||||
c = distribution(thread_local_rng);
|
||||
return res;
|
||||
}
|
||||
|
||||
bool DiskS3::tryReserve(UInt64 bytes)
|
||||
{
|
||||
std::lock_guard lock(reservation_mutex);
|
||||
if (bytes == 0)
|
||||
{
|
||||
LOG_DEBUG(&Logger::get("DiskS3"), "Reserving 0 bytes on s3 disk " << backQuote(name));
|
||||
++reservation_count;
|
||||
return true;
|
||||
}
|
||||
|
||||
auto available_space = getAvailableSpace();
|
||||
UInt64 unreserved_space = available_space - std::min(available_space, reserved_bytes);
|
||||
if (unreserved_space >= bytes)
|
||||
{
|
||||
LOG_DEBUG(
|
||||
&Logger::get("DiskS3"),
|
||||
"Reserving " << formatReadableSizeWithBinarySuffix(bytes) << " on disk " << backQuote(name) << ", having unreserved "
|
||||
<< formatReadableSizeWithBinarySuffix(unreserved_space) << ".");
|
||||
++reservation_count;
|
||||
reserved_bytes += bytes;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
DiskS3Reservation::~DiskS3Reservation()
|
||||
{
|
||||
try
|
||||
{
|
||||
std::lock_guard lock(disk->reservation_mutex);
|
||||
if (disk->reserved_bytes < size)
|
||||
{
|
||||
disk->reserved_bytes = 0;
|
||||
LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservations size for disk '" + disk->getName() + "'.");
|
||||
}
|
||||
else
|
||||
{
|
||||
disk->reserved_bytes -= size;
|
||||
}
|
||||
|
||||
if (disk->reservation_count == 0)
|
||||
LOG_ERROR(&Logger::get("DiskLocal"), "Unbalanced reservation count for disk '" + disk->getName() + "'.");
|
||||
else
|
||||
--disk->reservation_count;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
void registerDiskS3(DiskFactory & factory)
|
||||
{
|
||||
auto creator = [](const String & name,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const String & config_prefix,
|
||||
const Context & context) -> DiskPtr {
|
||||
Poco::File disk{context.getPath() + "disks/" + name};
|
||||
disk.createDirectories();
|
||||
|
||||
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
||||
auto client = S3::ClientFactory::instance().create(
|
||||
uri.endpoint,
|
||||
config.getString(config_prefix + ".access_key_id", ""),
|
||||
config.getString(config_prefix + ".secret_access_key", ""));
|
||||
|
||||
if (uri.key.back() != '/')
|
||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
String metadata_path = context.getPath() + "disks/" + name + "/";
|
||||
|
||||
auto s3disk = std::make_shared<DiskS3>(name, client, uri.bucket, uri.key, metadata_path);
|
||||
|
||||
/// This code is used only to check access to the corresponding disk.
|
||||
|
||||
{
|
||||
auto file = s3disk->writeFile("test_acl", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
|
||||
file->write("test", 4);
|
||||
}
|
||||
{
|
||||
auto file = s3disk->readFile("test_acl", DBMS_DEFAULT_BUFFER_SIZE);
|
||||
String buf(4, '0');
|
||||
file->readStrict(buf.data(), 4);
|
||||
if (buf != "test")
|
||||
throw Exception("No read accecss to S3 bucket in disk " + name, ErrorCodes::PATH_ACCESS_DENIED);
|
||||
}
|
||||
{
|
||||
s3disk->remove("test_acl");
|
||||
}
|
||||
|
||||
return s3disk;
|
||||
};
|
||||
factory.registerDiskType("s3", creator);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
93
dbms/src/Disks/DiskS3.h
Normal file
93
dbms/src/Disks/DiskS3.h
Normal file
@ -0,0 +1,93 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_AWS_S3
|
||||
# include "DiskFactory.h"
|
||||
|
||||
# include <aws/s3/S3Client.h>
|
||||
# include <Poco/DirectoryIterator.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/**
|
||||
* Storage for persisting data in S3 and metadata on the local disk.
|
||||
* Files are represented by file in local filesystem (clickhouse_root/disks/disk_name/path/to/file)
|
||||
* that contains S3 object key with actual data.
|
||||
*/
|
||||
class DiskS3 : public IDisk
|
||||
{
|
||||
public:
|
||||
friend class DiskS3Reservation;
|
||||
|
||||
DiskS3(String name_, std::shared_ptr<Aws::S3::S3Client> client_, String bucket_, String s3_root_path_, String metadata_path_);
|
||||
|
||||
const String & getName() const override { return name; }
|
||||
|
||||
const String & getPath() const override { return s3_root_path; }
|
||||
|
||||
ReservationPtr reserve(UInt64 bytes) override;
|
||||
|
||||
UInt64 getTotalSpace() const override { return std::numeric_limits<UInt64>::max(); }
|
||||
|
||||
UInt64 getAvailableSpace() const override { return std::numeric_limits<UInt64>::max(); }
|
||||
|
||||
UInt64 getUnreservedSpace() const override { return std::numeric_limits<UInt64>::max(); }
|
||||
|
||||
UInt64 getKeepingFreeSpace() const override { return 0; }
|
||||
|
||||
bool exists(const String & path) const override;
|
||||
|
||||
bool isFile(const String & path) const override;
|
||||
|
||||
bool isDirectory(const String & path) const override;
|
||||
|
||||
size_t getFileSize(const String & path) const override;
|
||||
|
||||
void createDirectory(const String & path) override;
|
||||
|
||||
void createDirectories(const String & path) override;
|
||||
|
||||
void clearDirectory(const String & path) override;
|
||||
|
||||
void moveDirectory(const String & from_path, const String & to_path) override { moveFile(from_path, to_path); }
|
||||
|
||||
DiskDirectoryIteratorPtr iterateDirectory(const String & path) override;
|
||||
|
||||
void moveFile(const String & from_path, const String & to_path) override;
|
||||
|
||||
void replaceFile(const String & from_path, const String & to_path) override;
|
||||
|
||||
void copyFile(const String & from_path, const String & to_path) override;
|
||||
|
||||
std::unique_ptr<ReadBuffer> readFile(const String & path, size_t buf_size) const override;
|
||||
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & path, size_t buf_size, WriteMode mode) override;
|
||||
|
||||
void remove(const String & path) override;
|
||||
|
||||
void removeRecursive(const String & path) override;
|
||||
|
||||
private:
|
||||
String getS3Path(const String & path) const;
|
||||
|
||||
String getRandomName() const;
|
||||
|
||||
bool tryReserve(UInt64 bytes);
|
||||
|
||||
private:
|
||||
const String name;
|
||||
std::shared_ptr<Aws::S3::S3Client> client;
|
||||
const String bucket;
|
||||
const String s3_root_path;
|
||||
const String metadata_path;
|
||||
|
||||
UInt64 reserved_bytes = 0;
|
||||
UInt64 reservation_count = 0;
|
||||
std::mutex reservation_mutex;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -2,6 +2,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool IDisk::isDirectoryEmpty(const String & path)
|
||||
{
|
||||
return !iterateDirectory(path)->isValid();
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <Poco/Path.h>
|
||||
@ -97,7 +98,7 @@ public:
|
||||
/// Create directory and all parent directories if necessary.
|
||||
virtual void createDirectories(const String & path) = 0;
|
||||
|
||||
/// Remove all files from the directory.
|
||||
/// Remove all files from the directory. Directories are not removed.
|
||||
virtual void clearDirectory(const String & path) = 0;
|
||||
|
||||
/// Move directory from `from_path` to `to_path`.
|
||||
@ -125,6 +126,12 @@ public:
|
||||
|
||||
/// Open the file for write and return WriteBuffer object.
|
||||
virtual std::unique_ptr<WriteBuffer> writeFile(const String & path, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, WriteMode mode = WriteMode::Rewrite) = 0;
|
||||
|
||||
/// Remove file or directory. Throws exception if file doesn't exists or if directory is not empty.
|
||||
virtual void remove(const String & path) = 0;
|
||||
|
||||
/// Remove file or directory with all children. Use with extra caution. Throws exception if file doesn't exists.
|
||||
virtual void removeRecursive(const String & path) = 0;
|
||||
};
|
||||
|
||||
using DiskPtr = std::shared_ptr<IDisk>;
|
||||
@ -151,7 +158,7 @@ public:
|
||||
/**
|
||||
* Information about reserved size on particular disk.
|
||||
*/
|
||||
class IReservation
|
||||
class IReservation : boost::noncopyable
|
||||
{
|
||||
public:
|
||||
/// Get reservation size.
|
||||
|
@ -1,10 +1,16 @@
|
||||
#include "DiskFactory.h"
|
||||
#include "registerDisks.h"
|
||||
|
||||
#include "DiskFactory.h"
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void registerDiskLocal(DiskFactory & factory);
|
||||
void registerDiskMemory(DiskFactory & factory);
|
||||
#if USE_AWS_S3
|
||||
void registerDiskS3(DiskFactory & factory);
|
||||
#endif
|
||||
|
||||
void registerDisks()
|
||||
{
|
||||
@ -12,6 +18,9 @@ void registerDisks()
|
||||
|
||||
registerDiskLocal(factory);
|
||||
registerDiskMemory(factory);
|
||||
#if USE_AWS_S3
|
||||
registerDiskS3(factory);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,5 +3,4 @@
|
||||
namespace DB
|
||||
{
|
||||
void registerDisks();
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,10 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#if !__clang__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
DB::DiskPtr createDisk();
|
||||
|
@ -121,6 +121,11 @@ struct NumericArraySource : public ArraySourceImpl<NumericArraySource<T>>
|
||||
}
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||
#endif
|
||||
|
||||
template <typename Base>
|
||||
struct ConstSource : public Base
|
||||
{
|
||||
@ -199,6 +204,10 @@ struct ConstSource : public Base
|
||||
}
|
||||
};
|
||||
|
||||
#if !__clang__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
struct StringSource
|
||||
{
|
||||
using Slice = NumericArraySlice<UInt8>;
|
||||
|
@ -20,6 +20,10 @@ struct BitCountImpl
|
||||
return __builtin_popcountll(a);
|
||||
if constexpr (std::is_same_v<A, UInt32> || std::is_same_v<A, Int32> || std::is_unsigned_v<A>)
|
||||
return __builtin_popcount(a);
|
||||
if constexpr (std::is_same_v<A, Int16>)
|
||||
return __builtin_popcount(static_cast<UInt16>(a));
|
||||
if constexpr (std::is_same_v<A, Int8>)
|
||||
return __builtin_popcount(static_cast<UInt8>(a));
|
||||
else
|
||||
return __builtin_popcountll(ext::bit_cast<unsigned long long>(a));
|
||||
}
|
||||
|
@ -93,11 +93,10 @@ namespace S3
|
||||
if (!endpoint.empty())
|
||||
cfg.endpointOverride = endpoint;
|
||||
|
||||
auto cred_provider = std::make_shared<Aws::Auth::SimpleAWSCredentialsProvider>(access_key_id,
|
||||
secret_access_key);
|
||||
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
||||
|
||||
return std::make_shared<Aws::S3::S3Client>(
|
||||
std::move(cred_provider), // Credentials provider.
|
||||
credentials, // Aws credentials.
|
||||
std::move(cfg), // Client configuration.
|
||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, // Sign policy.
|
||||
endpoint.empty() // Use virtual addressing only if endpoint is not specified.
|
||||
@ -105,7 +104,7 @@ namespace S3
|
||||
}
|
||||
|
||||
|
||||
URI::URI(Poco::URI & uri_)
|
||||
URI::URI(const Poco::URI & uri_)
|
||||
{
|
||||
static const std::regex BUCKET_KEY_PATTERN("([^/]+)/(.*)");
|
||||
|
||||
|
@ -49,7 +49,7 @@ struct URI
|
||||
String bucket;
|
||||
String key;
|
||||
|
||||
explicit URI (Poco::URI & uri_);
|
||||
explicit URI(const Poco::URI & uri_);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ public:
|
||||
size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE);
|
||||
|
||||
/// Receives response from the server after sending all data.
|
||||
void finalize();
|
||||
void finalize() override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ void WriteBufferFromS3::nextImpl()
|
||||
|
||||
void WriteBufferFromS3::finalize()
|
||||
{
|
||||
next();
|
||||
temporary_buffer->finalize();
|
||||
if (!buffer_string.empty())
|
||||
{
|
||||
|
@ -781,7 +781,8 @@ Block Aggregator::mergeAndConvertOneBucketToBlock(
|
||||
ManyAggregatedDataVariants & variants,
|
||||
Arena * arena,
|
||||
bool final,
|
||||
size_t bucket) const
|
||||
size_t bucket,
|
||||
std::atomic<bool> * is_cancelled) const
|
||||
{
|
||||
auto & merged_data = *variants[0];
|
||||
auto method = merged_data.type;
|
||||
@ -792,6 +793,8 @@ Block Aggregator::mergeAndConvertOneBucketToBlock(
|
||||
else if (method == AggregatedDataVariants::Type::NAME) \
|
||||
{ \
|
||||
mergeBucketImpl<decltype(merged_data.NAME)::element_type>(variants, bucket, arena); \
|
||||
if (is_cancelled && is_cancelled->load(std::memory_order_seq_cst)) \
|
||||
return {}; \
|
||||
block = convertOneBucketToBlock(merged_data, *merged_data.NAME, final, bucket); \
|
||||
}
|
||||
|
||||
@ -1482,12 +1485,15 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
|
||||
|
||||
template <typename Method>
|
||||
void NO_INLINE Aggregator::mergeBucketImpl(
|
||||
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena) const
|
||||
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic<bool> * is_cancelled) const
|
||||
{
|
||||
/// We merge all aggregation results to the first.
|
||||
AggregatedDataVariantsPtr & res = data[0];
|
||||
for (size_t result_num = 1, size = data.size(); result_num < size; ++result_num)
|
||||
{
|
||||
if (is_cancelled && is_cancelled->load(std::memory_order_seq_cst))
|
||||
return;
|
||||
|
||||
AggregatedDataVariants & current = *data[result_num];
|
||||
|
||||
mergeDataImpl<Method>(
|
||||
|
@ -1170,7 +1170,8 @@ protected:
|
||||
ManyAggregatedDataVariants & variants,
|
||||
Arena * arena,
|
||||
bool final,
|
||||
size_t bucket) const;
|
||||
size_t bucket,
|
||||
std::atomic<bool> * is_cancelled = nullptr) const;
|
||||
|
||||
Block prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_variants, bool final, bool is_overflows) const;
|
||||
Block prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const;
|
||||
@ -1206,7 +1207,7 @@ protected:
|
||||
|
||||
template <typename Method>
|
||||
void mergeBucketImpl(
|
||||
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena) const;
|
||||
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic<bool> * is_cancelled = nullptr) const;
|
||||
|
||||
template <typename Method>
|
||||
void convertBlockToTwoLevelImpl(
|
||||
|
@ -1955,7 +1955,8 @@ void InterpreterSelectQuery::executeAggregation(QueryPipeline & pipeline, const
|
||||
if (pipeline.getNumStreams() > 1)
|
||||
{
|
||||
/// Add resize transform to uniformly distribute data between aggregating streams.
|
||||
pipeline.resize(pipeline.getNumStreams(), true);
|
||||
if (!(storage && storage->hasEvenlyDistributedRead()))
|
||||
pipeline.resize(pipeline.getNumStreams(), true, true);
|
||||
|
||||
auto many_data = std::make_shared<ManyAggregatedData>(pipeline.getNumStreams());
|
||||
auto merge_threads = settings.aggregation_memory_efficient_merge_threads
|
||||
|
@ -10,8 +10,8 @@ namespace DB
|
||||
class ParserArray : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "array"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "array"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -22,8 +22,8 @@ protected:
|
||||
class ParserParenthesisExpression : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "parenthesized expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "parenthesized expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -32,8 +32,8 @@ protected:
|
||||
class ParserSubquery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "SELECT subquery"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "SELECT subquery"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -42,8 +42,8 @@ protected:
|
||||
class ParserIdentifier : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "identifier"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "identifier"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -52,16 +52,16 @@ protected:
|
||||
class ParserCompoundIdentifier : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "compound identifier"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "compound identifier"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/// Just *
|
||||
class ParserAsterisk : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "asterisk"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "asterisk"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Something like t.* or db.table.*
|
||||
@ -69,8 +69,8 @@ protected:
|
||||
class ParserQualifiedAsterisk : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "qualified asterisk"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "qualified asterisk"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** COLUMNS('<regular expression>')
|
||||
@ -78,8 +78,8 @@ protected:
|
||||
class ParserColumnsMatcher : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "COLUMNS matcher"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "COLUMNS matcher"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** A function, for example, f(x, y + 1, g(z)).
|
||||
@ -93,16 +93,16 @@ class ParserFunction : public IParserBase
|
||||
public:
|
||||
ParserFunction(bool allow_function_parameters_ = true) : allow_function_parameters(allow_function_parameters_) {}
|
||||
protected:
|
||||
const char * getName() const { return "function"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "function"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
bool allow_function_parameters;
|
||||
};
|
||||
|
||||
class ParserCodecDeclarationList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "codec declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "codec declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Parse compression codec
|
||||
@ -111,8 +111,8 @@ protected:
|
||||
class ParserCodec : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "codec"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "codec"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
class ParserCastExpression : public IParserBase
|
||||
@ -176,8 +176,8 @@ protected:
|
||||
class ParserNull : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "NULL"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "NULL"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -186,8 +186,8 @@ protected:
|
||||
class ParserNumber : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "number"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "number"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Unsigned integer, used in right hand side of tuple access operator (x.1).
|
||||
@ -195,8 +195,8 @@ protected:
|
||||
class ParserUnsignedInteger : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "unsigned integer"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "unsigned integer"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -205,8 +205,8 @@ protected:
|
||||
class ParserStringLiteral : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "string literal"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "string literal"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -219,8 +219,8 @@ protected:
|
||||
class ParserArrayOfLiterals : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "array"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "array"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -229,8 +229,8 @@ protected:
|
||||
class ParserLiteral : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "literal"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "literal"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -246,8 +246,8 @@ private:
|
||||
|
||||
bool allow_alias_without_as_keyword;
|
||||
|
||||
const char * getName() const { return "alias"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "alias"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -257,8 +257,8 @@ private:
|
||||
class ParserSubstitution : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "substitution"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "substitution"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -267,8 +267,8 @@ protected:
|
||||
class ParserExpressionElement : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "element of expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "element of expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -283,8 +283,8 @@ protected:
|
||||
ParserPtr elem_parser;
|
||||
bool allow_alias_without_as_keyword;
|
||||
|
||||
const char * getName() const { return "element of expression with optional alias"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "element of expression with optional alias"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -296,8 +296,8 @@ protected:
|
||||
class ParserOrderByElement : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "element of ORDER BY expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "element of ORDER BY expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Parser for function with arguments like KEY VALUE (space separated)
|
||||
@ -316,8 +316,8 @@ protected:
|
||||
class ParserIdentifierWithOptionalParameters : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "identifier with optional parameters"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "identifier with optional parameters"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Element of TTL expression - same as expression element, but in addition,
|
||||
@ -326,8 +326,8 @@ protected:
|
||||
class ParserTTLElement : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "element of TTL expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "element of TTL expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -27,8 +27,8 @@ public:
|
||||
{
|
||||
}
|
||||
protected:
|
||||
const char * getName() const { return "list of elements"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "list of elements"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
private:
|
||||
ParserPtr elem_parser;
|
||||
ParserPtr separator_parser;
|
||||
@ -63,9 +63,9 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "list, delimited by binary operators"; }
|
||||
const char * getName() const override { return "list, delimited by binary operators"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -86,9 +86,9 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "list, delimited by operator of variable arity"; }
|
||||
const char * getName() const override { return "list, delimited by operator of variable arity"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -110,8 +110,8 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "expression with prefix unary operator"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "expression with prefix unary operator"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -121,9 +121,9 @@ private:
|
||||
static const char * operators[];
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "array element expression"; }
|
||||
const char * getName() const override{ return "array element expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -133,9 +133,9 @@ private:
|
||||
static const char * operators[];
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "tuple element expression"; }
|
||||
const char * getName() const override { return "tuple element expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -146,9 +146,9 @@ private:
|
||||
ParserPrefixUnaryOperatorExpression operator_parser {operators, std::make_unique<ParserTupleElementExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "unary minus expression"; }
|
||||
const char * getName() const override { return "unary minus expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -159,9 +159,9 @@ private:
|
||||
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserUnaryMinusExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "multiplicative expression"; }
|
||||
const char * getName() const override{ return "multiplicative expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -174,8 +174,8 @@ class ParserIntervalOperatorExpression : public IParserBase
|
||||
protected:
|
||||
ParserMultiplicativeExpression next_parser;
|
||||
|
||||
const char * getName() const { return "INTERVAL operator expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "INTERVAL operator expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -186,9 +186,9 @@ private:
|
||||
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserIntervalOperatorExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "additive expression"; }
|
||||
const char * getName() const override{ return "additive expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -200,9 +200,9 @@ class ParserConcatExpression : public IParserBase
|
||||
ParserVariableArityOperatorList operator_parser {"||", "concat", std::make_unique<ParserAdditiveExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "string concatenation expression"; }
|
||||
const char * getName() const override { return "string concatenation expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -215,9 +215,9 @@ private:
|
||||
ParserConcatExpression elem_parser;
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "BETWEEN expression"; }
|
||||
const char * getName() const override { return "BETWEEN expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -228,9 +228,9 @@ private:
|
||||
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserBetweenExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "comparison expression"; }
|
||||
const char * getName() const override{ return "comparison expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -257,9 +257,9 @@ private:
|
||||
ParserPrefixUnaryOperatorExpression operator_parser {operators, std::make_unique<ParserNullityChecking>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "logical-NOT expression"; }
|
||||
const char * getName() const override{ return "logical-NOT expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -272,9 +272,9 @@ private:
|
||||
ParserVariableArityOperatorList operator_parser {"AND", "and", std::make_unique<ParserLogicalNotExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "logical-AND expression"; }
|
||||
const char * getName() const override { return "logical-AND expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -287,9 +287,9 @@ private:
|
||||
ParserVariableArityOperatorList operator_parser {"OR", "or", std::make_unique<ParserLogicalAndExpression>()};
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "logical-OR expression"; }
|
||||
const char * getName() const override { return "logical-OR expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return operator_parser.parse(pos, node, expected);
|
||||
}
|
||||
@ -305,9 +305,9 @@ private:
|
||||
ParserLogicalOrExpression elem_parser;
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "expression with ternary operator"; }
|
||||
const char * getName() const override { return "expression with ternary operator"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -317,9 +317,9 @@ private:
|
||||
ParserTernaryOperatorExpression elem_parser;
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "lambda expression"; }
|
||||
const char * getName() const override { return "lambda expression"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -333,9 +333,9 @@ public:
|
||||
protected:
|
||||
ParserPtr impl;
|
||||
|
||||
const char * getName() const { return "expression with optional alias"; }
|
||||
const char * getName() const override { return "expression with optional alias"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
return impl->parse(pos, node, expected);
|
||||
}
|
||||
@ -352,8 +352,8 @@ public:
|
||||
protected:
|
||||
bool allow_alias_without_as_keyword;
|
||||
|
||||
const char * getName() const { return "list of expressions"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "list of expressions"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -365,16 +365,16 @@ public:
|
||||
private:
|
||||
ParserExpressionList nested_parser;
|
||||
protected:
|
||||
const char * getName() const { return "not empty list of expressions"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "not empty list of expressions"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
class ParserOrderByExpressionList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "order by expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "order by expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -399,8 +399,8 @@ protected:
|
||||
class ParserTTLExpressionList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "ttl expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "ttl expression"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
bool parse(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parse(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
protected:
|
||||
virtual bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) = 0;
|
||||
|
@ -27,16 +27,16 @@ namespace DB
|
||||
class ParserAlterQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "ALTER query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "ALTER query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
class ParserAlterCommandList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "a list of ALTER commands"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "a list of ALTER commands"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
public:
|
||||
bool is_live_view;
|
||||
@ -48,8 +48,8 @@ public:
|
||||
class ParserAlterCommand : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "ALTER command"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "ALTER command"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
public:
|
||||
bool is_live_view;
|
||||
@ -62,8 +62,8 @@ public:
|
||||
class ParserAssignment : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "column assignment"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "column assignment"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ namespace DB
|
||||
class ParserCheckQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "ALTER query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "ALTER query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -19,8 +19,8 @@ namespace DB
|
||||
class ParserNestedTable : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "nested table"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "nested table"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -33,16 +33,16 @@ protected:
|
||||
class ParserIdentifierWithParameters : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "identifier with parameters"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "identifier with parameters"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
template <typename NameParser>
|
||||
class IParserNameTypePair : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "name and type pair"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "name and type pair"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** The name and type are separated by a space. For example, URL String. */
|
||||
@ -75,16 +75,16 @@ bool IParserNameTypePair<NameParser>::parseImpl(Pos & pos, ASTPtr & node, Expect
|
||||
class ParserNameTypePairList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "name and type pair list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "name and type pair list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** List of table names. */
|
||||
class ParserNameList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "name list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "name list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -99,9 +99,9 @@ public:
|
||||
protected:
|
||||
using ASTDeclarePtr = std::shared_ptr<ASTColumnDeclaration>;
|
||||
|
||||
const char * getName() const { return "column declaration"; }
|
||||
const char * getName() const override{ return "column declaration"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
bool require_type = true;
|
||||
};
|
||||
@ -224,8 +224,8 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
|
||||
class ParserColumnDeclarationList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "column declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "column declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -284,8 +284,8 @@ protected:
|
||||
class ParserStorage : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "storage definition"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "storage definition"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/** Query like this:
|
||||
@ -308,32 +308,32 @@ protected:
|
||||
class ParserCreateTableQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "CREATE TABLE or ATTACH TABLE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "CREATE TABLE or ATTACH TABLE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/// CREATE|ATTACH LIVE VIEW [IF NOT EXISTS] [db.]name [TO [db.]name] AS SELECT ...
|
||||
class ParserCreateLiveViewQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "CREATE LIVE VIEW query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "CREATE LIVE VIEW query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/// CREATE|ATTACH DATABASE db [ENGINE = engine]
|
||||
class ParserCreateDatabaseQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "CREATE DATABASE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "CREATE DATABASE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/// CREATE[OR REPLACE]|ATTACH [[MATERIALIZED] VIEW] | [VIEW]] [IF NOT EXISTS] [db.]name [TO [db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
|
||||
class ParserCreateViewQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "CREATE VIEW query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "CREATE VIEW query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
/// Parses complete dictionary create query. Uses ParserDictionary and
|
||||
@ -372,8 +372,8 @@ protected:
|
||||
class ParserCreateQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "CREATE TABLE or ATTACH TABLE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "CREATE TABLE or ATTACH TABLE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -13,8 +13,8 @@ namespace DB
|
||||
class ParserDescribeTableQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "DESCRIBE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "DESCRIBE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -25,8 +25,8 @@ protected:
|
||||
class ParserDictionaryAttributeDeclarationList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "attribute declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "attribute declaration list"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -19,8 +19,8 @@ namespace DB
|
||||
class ParserDropQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "DROP query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "DROP query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
bool parseDropQuery(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
bool parseDetachQuery(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
|
@ -12,8 +12,8 @@ namespace DB
|
||||
class ParserOptimizeQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "OPTIMIZE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "OPTIMIZE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -14,8 +14,8 @@ namespace DB
|
||||
class ParserRenameQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "RENAME query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "RENAME query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ namespace DB
|
||||
class ParserRoleList : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "RoleList"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "RoleList"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -12,8 +12,8 @@ namespace DB
|
||||
class ParserSampleRatio : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "Sample ratio or offset"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "Sample ratio or offset"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -14,9 +14,9 @@ namespace DB
|
||||
class ParserShowProcesslistQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "SHOW PROCESSLIST query"; }
|
||||
const char * getName() const override { return "SHOW PROCESSLIST query"; }
|
||||
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||
{
|
||||
auto query = std::make_shared<ASTShowProcesslistQuery>();
|
||||
|
||||
|
@ -14,8 +14,8 @@ namespace DB
|
||||
class ParserShowTablesQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "SHOW [TEMPORARY] TABLES|DATABASES [[NOT] LIKE 'str'] [LIMIT expr]"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "SHOW [TEMPORARY] TABLES|DATABASES [[NOT] LIKE 'str'] [LIMIT expr]"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ namespace DB
|
||||
class ParserTablesInSelectQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "table, table function, subquery or list of joined tables"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "table, table function, subquery or list of joined tables"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
@ -22,8 +22,8 @@ public:
|
||||
ParserTablesInSelectQueryElement(bool is_first_) : is_first(is_first_) {}
|
||||
|
||||
protected:
|
||||
const char * getName() const { return "table, table function, subquery or list of joined tables"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "table, table function, subquery or list of joined tables"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
||||
private:
|
||||
bool is_first;
|
||||
@ -33,16 +33,16 @@ private:
|
||||
class ParserTableExpression : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "table or subquery or table function"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "table or subquery or table function"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
class ParserArrayJoin : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "array join"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "array join"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
|
||||
|
@ -11,8 +11,8 @@ namespace DB
|
||||
class ParserUseQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "USE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override{ return "USE query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -23,8 +23,8 @@ namespace DB
|
||||
class ParserWatchQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const { return "WATCH query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected);
|
||||
const char * getName() const override { return "WATCH query"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -259,7 +259,6 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue
|
||||
/// In this method we have ownership on node.
|
||||
auto & node = graph[pid];
|
||||
|
||||
bool need_traverse = false;
|
||||
bool need_expand_pipeline = false;
|
||||
|
||||
std::vector<Edge *> updated_back_edges;
|
||||
@ -290,13 +289,11 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue
|
||||
case IProcessor::Status::NeedData:
|
||||
case IProcessor::Status::PortFull:
|
||||
{
|
||||
need_traverse = true;
|
||||
node.status = ExecStatus::Idle;
|
||||
break;
|
||||
}
|
||||
case IProcessor::Status::Finished:
|
||||
{
|
||||
need_traverse = true;
|
||||
node.status = ExecStatus::Finished;
|
||||
break;
|
||||
}
|
||||
@ -325,7 +322,6 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue
|
||||
}
|
||||
}
|
||||
|
||||
if (need_traverse)
|
||||
{
|
||||
for (auto & edge_id : node.post_updated_input_ports)
|
||||
{
|
||||
@ -346,7 +342,6 @@ bool PipelineExecutor::prepareProcessor(UInt64 pid, size_t thread_number, Queue
|
||||
}
|
||||
}
|
||||
|
||||
if (need_traverse)
|
||||
{
|
||||
for (auto & edge : updated_direct_edges)
|
||||
{
|
||||
@ -543,7 +538,13 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads
|
||||
|
||||
if (!task_queue.empty() && !threads_queue.empty() /*&& task_queue.quota() > threads_queue.size()*/)
|
||||
{
|
||||
auto thread_to_wake = threads_queue.pop_any();
|
||||
auto thread_to_wake = task_queue.getAnyThreadWithTasks(thread_num + 1 == num_threads ? 0 : (thread_num + 1));
|
||||
|
||||
if (threads_queue.has(thread_to_wake))
|
||||
threads_queue.pop(thread_to_wake);
|
||||
else
|
||||
thread_to_wake = threads_queue.pop_any();
|
||||
|
||||
lock.unlock();
|
||||
wake_up_executor(thread_to_wake);
|
||||
}
|
||||
@ -627,9 +628,15 @@ void PipelineExecutor::executeSingleThread(size_t thread_num, size_t num_threads
|
||||
queue.pop();
|
||||
}
|
||||
|
||||
if (!threads_queue.empty() /* && task_queue.quota() > threads_queue.size()*/)
|
||||
if (!threads_queue.empty() && !finished /* && task_queue.quota() > threads_queue.size()*/)
|
||||
{
|
||||
auto thread_to_wake = threads_queue.pop_any();
|
||||
auto thread_to_wake = task_queue.getAnyThreadWithTasks(thread_num + 1 == num_threads ? 0 : (thread_num + 1));
|
||||
|
||||
if (threads_queue.has(thread_to_wake))
|
||||
threads_queue.pop(thread_to_wake);
|
||||
else
|
||||
thread_to_wake = threads_queue.pop_any();
|
||||
|
||||
lock.unlock();
|
||||
|
||||
wake_up_executor(thread_to_wake);
|
||||
|
@ -149,32 +149,37 @@ private:
|
||||
++quota_;
|
||||
}
|
||||
|
||||
ExecutionState * pop(size_t thread_num)
|
||||
size_t getAnyThreadWithTasks(size_t from_thread = 0)
|
||||
{
|
||||
if (size_ == 0)
|
||||
throw Exception("TaskQueue is not empty.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("TaskQueue is empty.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
for (size_t i = 0; i < queues.size(); ++i)
|
||||
{
|
||||
if (!queues[thread_num].empty())
|
||||
{
|
||||
ExecutionState * state = queues[thread_num].front();
|
||||
queues[thread_num].pop();
|
||||
if (!queues[from_thread].empty())
|
||||
return from_thread;
|
||||
|
||||
--size_;
|
||||
|
||||
if (state->has_quota)
|
||||
++quota_;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
++thread_num;
|
||||
if (thread_num >= queues.size())
|
||||
thread_num = 0;
|
||||
++from_thread;
|
||||
if (from_thread >= queues.size())
|
||||
from_thread = 0;
|
||||
}
|
||||
|
||||
throw Exception("TaskQueue is not empty.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception("TaskQueue is empty.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
ExecutionState * pop(size_t thread_num)
|
||||
{
|
||||
auto thread_with_tasks = getAnyThreadWithTasks(thread_num);
|
||||
|
||||
ExecutionState * state = queues[thread_with_tasks].front();
|
||||
queues[thread_with_tasks].pop();
|
||||
|
||||
--size_;
|
||||
|
||||
if (state->has_quota)
|
||||
++quota_;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
size_t size() const { return size_; }
|
||||
|
@ -33,6 +33,10 @@ ISimpleTransform::Status ISimpleTransform::prepare()
|
||||
{
|
||||
output.pushData(std::move(current_data));
|
||||
transformed = false;
|
||||
|
||||
if (!no_more_data_needed)
|
||||
return Status::PortFull;
|
||||
|
||||
}
|
||||
|
||||
/// Stop if don't need more data.
|
||||
@ -52,12 +56,13 @@ ISimpleTransform::Status ISimpleTransform::prepare()
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
input.setNeeded();
|
||||
|
||||
if (!input.hasData())
|
||||
{
|
||||
input.setNeeded();
|
||||
return Status::NeedData;
|
||||
}
|
||||
|
||||
current_data = input.pullData();
|
||||
current_data = input.pullData(true);
|
||||
has_input = true;
|
||||
|
||||
if (current_data.exception)
|
||||
|
@ -161,12 +161,17 @@ protected:
|
||||
throw Exception("Cannot push block to port which already has data.", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE pull(DataPtr & data_, std::uintptr_t & flags)
|
||||
void ALWAYS_INLINE pull(DataPtr & data_, std::uintptr_t & flags, bool set_not_needed = false)
|
||||
{
|
||||
flags = data_.swap(data, 0, HAS_DATA);
|
||||
uintptr_t mask = HAS_DATA;
|
||||
|
||||
if (set_not_needed)
|
||||
mask |= IS_NEEDED;
|
||||
|
||||
flags = data_.swap(data, 0, mask);
|
||||
|
||||
/// It's ok to check because this flag can be changed only by pulling thread.
|
||||
if (unlikely((flags & IS_NEEDED) == 0))
|
||||
if (unlikely((flags & IS_NEEDED) == 0) && !set_not_needed)
|
||||
throw Exception("Cannot pull block from port which is not needed.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (unlikely((flags & HAS_DATA) == 0))
|
||||
@ -266,14 +271,15 @@ private:
|
||||
public:
|
||||
using Port::Port;
|
||||
|
||||
Data ALWAYS_INLINE pullData()
|
||||
Data ALWAYS_INLINE pullData(bool set_not_needed = false)
|
||||
{
|
||||
updateVersion();
|
||||
if (!set_not_needed)
|
||||
updateVersion();
|
||||
|
||||
assumeConnected();
|
||||
|
||||
std::uintptr_t flags = 0;
|
||||
state->pull(data, flags);
|
||||
state->pull(data, flags, set_not_needed);
|
||||
|
||||
is_finished = flags & State::IS_FINISHED;
|
||||
|
||||
@ -293,9 +299,9 @@ public:
|
||||
return std::move(*data);
|
||||
}
|
||||
|
||||
Chunk ALWAYS_INLINE pull()
|
||||
Chunk ALWAYS_INLINE pull(bool set_not_needed = false)
|
||||
{
|
||||
auto data_ = pullData();
|
||||
auto data_ = pullData(set_not_needed);
|
||||
|
||||
if (data_.exception)
|
||||
std::rethrow_exception(data_.exception);
|
||||
|
@ -234,7 +234,7 @@ void QueryPipeline::addDelayedStream(ProcessorPtr source)
|
||||
addPipe({ std::move(processor) });
|
||||
}
|
||||
|
||||
void QueryPipeline::resize(size_t num_streams, bool force)
|
||||
void QueryPipeline::resize(size_t num_streams, bool force, bool strict)
|
||||
{
|
||||
checkInitialized();
|
||||
|
||||
@ -243,7 +243,13 @@ void QueryPipeline::resize(size_t num_streams, bool force)
|
||||
|
||||
has_resize = true;
|
||||
|
||||
auto resize = std::make_shared<ResizeProcessor>(current_header, getNumStreams(), num_streams);
|
||||
ProcessorPtr resize;
|
||||
|
||||
if (strict)
|
||||
resize = std::make_shared<StrictResizeProcessor>(current_header, getNumStreams(), num_streams);
|
||||
else
|
||||
resize = std::make_shared<ResizeProcessor>(current_header, getNumStreams(), num_streams);
|
||||
|
||||
auto stream = streams.begin();
|
||||
for (auto & input : resize->getInputs())
|
||||
connect(**(stream++), input);
|
||||
|
@ -61,7 +61,7 @@ public:
|
||||
/// Check if resize transform was used. (In that case another distinct transform will be added).
|
||||
bool hasMixedStreams() const { return has_resize || hasMoreThanOneStream(); }
|
||||
|
||||
void resize(size_t num_streams, bool force = false);
|
||||
void resize(size_t num_streams, bool force = false, bool strict = false);
|
||||
|
||||
void enableQuotaForCurrentStreams();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include <Processors/ResizeProcessor.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -257,5 +257,143 @@ IProcessor::Status ResizeProcessor::prepare(const PortNumbers & updated_inputs,
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
IProcessor::Status StrictResizeProcessor::prepare(const PortNumbers & updated_inputs, const PortNumbers & updated_outputs)
|
||||
{
|
||||
if (!initialized)
|
||||
{
|
||||
initialized = true;
|
||||
|
||||
for (auto & input : inputs)
|
||||
input_ports.push_back({.port = &input, .status = InputStatus::NotActive, .waiting_output = -1});
|
||||
|
||||
for (UInt64 i = 0; i < input_ports.size(); ++i)
|
||||
disabled_input_ports.push(i);
|
||||
|
||||
for (auto & output : outputs)
|
||||
output_ports.push_back({.port = &output, .status = OutputStatus::NotActive});
|
||||
}
|
||||
|
||||
for (auto & output_number : updated_outputs)
|
||||
{
|
||||
auto & output = output_ports[output_number];
|
||||
if (output.port->isFinished())
|
||||
{
|
||||
if (output.status != OutputStatus::Finished)
|
||||
{
|
||||
++num_finished_outputs;
|
||||
output.status = OutputStatus::Finished;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (output.port->canPush())
|
||||
{
|
||||
if (output.status != OutputStatus::NeedData)
|
||||
{
|
||||
output.status = OutputStatus::NeedData;
|
||||
waiting_outputs.push(output_number);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (num_finished_outputs == outputs.size())
|
||||
{
|
||||
for (auto & input : inputs)
|
||||
input.close();
|
||||
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
std::queue<UInt64> inputs_with_data;
|
||||
|
||||
for (auto & input_number : updated_inputs)
|
||||
{
|
||||
auto & input = input_ports[input_number];
|
||||
if (input.port->isFinished())
|
||||
{
|
||||
if (input.status != InputStatus::Finished)
|
||||
{
|
||||
input.status = InputStatus::Finished;
|
||||
++num_finished_inputs;
|
||||
|
||||
waiting_outputs.push(input.waiting_output);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (input.port->hasData())
|
||||
{
|
||||
if (input.status != InputStatus::NotActive)
|
||||
{
|
||||
input.status = InputStatus::NotActive;
|
||||
inputs_with_data.push(input_number);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (!inputs_with_data.empty())
|
||||
{
|
||||
auto input_number = inputs_with_data.front();
|
||||
auto & input_with_data = input_ports[input_number];
|
||||
inputs_with_data.pop();
|
||||
|
||||
if (input_with_data.waiting_output == -1)
|
||||
throw Exception("No associated output for input with data.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
auto & waiting_output = output_ports[input_with_data.waiting_output];
|
||||
|
||||
if (waiting_output.status != OutputStatus::NeedData)
|
||||
throw Exception("Invalid status for associated output.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
waiting_output.port->pushData(input_with_data.port->pullData(/* set_not_deeded = */ true));
|
||||
waiting_output.status = OutputStatus::NotActive;
|
||||
|
||||
if (input_with_data.port->isFinished())
|
||||
{
|
||||
input_with_data.status = InputStatus::Finished;
|
||||
++num_finished_inputs;
|
||||
}
|
||||
else
|
||||
disabled_input_ports.push(input_number);
|
||||
}
|
||||
|
||||
if (num_finished_inputs == inputs.size())
|
||||
{
|
||||
for (auto & output : outputs)
|
||||
output.finish();
|
||||
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
/// Enable more inputs if needed.
|
||||
while (!disabled_input_ports.empty() && !waiting_outputs.empty())
|
||||
{
|
||||
auto & input = input_ports[disabled_input_ports.front()];
|
||||
disabled_input_ports.pop();
|
||||
|
||||
input.port->setNeeded();
|
||||
input.status = InputStatus::NeedData;
|
||||
input.waiting_output = waiting_outputs.front();
|
||||
|
||||
waiting_outputs.pop();
|
||||
}
|
||||
|
||||
while (!waiting_outputs.empty())
|
||||
{
|
||||
auto & output = output_ports[waiting_outputs.front()];
|
||||
waiting_outputs.pop();
|
||||
|
||||
output.status = OutputStatus::Finished;
|
||||
output.port->finish();
|
||||
++num_finished_outputs;
|
||||
}
|
||||
|
||||
if (disabled_input_ports.empty())
|
||||
return Status::NeedData;
|
||||
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -74,4 +74,60 @@ private:
|
||||
std::vector<OutputPortWithStatus> output_ports;
|
||||
};
|
||||
|
||||
class StrictResizeProcessor : public IProcessor
|
||||
{
|
||||
public:
|
||||
/// TODO Check that there is non zero number of inputs and outputs.
|
||||
StrictResizeProcessor(const Block & header, size_t num_inputs, size_t num_outputs)
|
||||
: IProcessor(InputPorts(num_inputs, header), OutputPorts(num_outputs, header))
|
||||
, current_input(inputs.begin())
|
||||
, current_output(outputs.begin())
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return "StrictResize"; }
|
||||
|
||||
Status prepare(const PortNumbers &, const PortNumbers &) override;
|
||||
|
||||
private:
|
||||
InputPorts::iterator current_input;
|
||||
OutputPorts::iterator current_output;
|
||||
|
||||
size_t num_finished_inputs = 0;
|
||||
size_t num_finished_outputs = 0;
|
||||
std::queue<UInt64> disabled_input_ports;
|
||||
std::queue<UInt64> waiting_outputs;
|
||||
bool initialized = false;
|
||||
|
||||
enum class OutputStatus
|
||||
{
|
||||
NotActive,
|
||||
NeedData,
|
||||
Finished,
|
||||
};
|
||||
|
||||
enum class InputStatus
|
||||
{
|
||||
NotActive,
|
||||
NeedData,
|
||||
Finished,
|
||||
};
|
||||
|
||||
struct InputPortWithStatus
|
||||
{
|
||||
InputPort * port;
|
||||
InputStatus status;
|
||||
ssize_t waiting_output;
|
||||
};
|
||||
|
||||
struct OutputPortWithStatus
|
||||
{
|
||||
OutputPort * port;
|
||||
OutputStatus status;
|
||||
};
|
||||
|
||||
std::vector<InputPortWithStatus> input_ports;
|
||||
std::vector<OutputPortWithStatus> output_ports;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -78,6 +78,7 @@ public:
|
||||
{
|
||||
std::atomic<UInt32> next_bucket_to_merge = 0;
|
||||
std::array<std::atomic<Int32>, NUM_BUCKETS> source_for_bucket;
|
||||
std::atomic<bool> is_cancelled = false;
|
||||
|
||||
SharedData()
|
||||
{
|
||||
@ -112,7 +113,7 @@ protected:
|
||||
if (bucket_num >= NUM_BUCKETS)
|
||||
return {};
|
||||
|
||||
Block block = params->aggregator.mergeAndConvertOneBucketToBlock(*data, arena, params->final, bucket_num);
|
||||
Block block = params->aggregator.mergeAndConvertOneBucketToBlock(*data, arena, params->final, bucket_num, &shared_data->is_cancelled);
|
||||
Chunk chunk = convertToChunk(block);
|
||||
|
||||
shared_data->source_for_bucket[bucket_num] = source_number;
|
||||
@ -201,6 +202,9 @@ public:
|
||||
for (auto & input : inputs)
|
||||
input.close();
|
||||
|
||||
if (shared_data)
|
||||
shared_data->is_cancelled.store(true);
|
||||
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
@ -429,11 +433,16 @@ IProcessor::Status AggregatingTransform::prepare()
|
||||
}
|
||||
}
|
||||
|
||||
input.setNeeded();
|
||||
if (!input.hasData())
|
||||
{
|
||||
input.setNeeded();
|
||||
return Status::NeedData;
|
||||
}
|
||||
|
||||
current_chunk = input.pull();
|
||||
if (is_consume_finished)
|
||||
input.setNeeded();
|
||||
|
||||
current_chunk = input.pull(/*set_not_needed = */ !is_consume_finished);
|
||||
read_current_chunk = true;
|
||||
|
||||
if (is_consume_finished)
|
||||
|
@ -74,16 +74,8 @@ IProcessor::Status MergingSortedTransform::prepare()
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
if (!output.isNeeded())
|
||||
{
|
||||
for (auto & in : inputs)
|
||||
in.setNotNeeded();
|
||||
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
if (output.hasData())
|
||||
return Status::PortFull;
|
||||
/// Do not disable inputs, so it will work in the same way as with AsynchronousBlockInputStream, like before.
|
||||
bool is_port_full = !output.canPush();
|
||||
|
||||
/// Special case for single input.
|
||||
if (inputs.size() == 1)
|
||||
@ -96,14 +88,20 @@ IProcessor::Status MergingSortedTransform::prepare()
|
||||
}
|
||||
|
||||
input.setNeeded();
|
||||
|
||||
if (input.hasData())
|
||||
output.push(input.pull());
|
||||
{
|
||||
if (!is_port_full)
|
||||
output.push(input.pull());
|
||||
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
return Status::NeedData;
|
||||
}
|
||||
|
||||
/// Push if has data.
|
||||
if (merged_data.mergedRows())
|
||||
if (merged_data.mergedRows() && !is_port_full)
|
||||
output.push(merged_data.pull());
|
||||
|
||||
if (!is_initialized)
|
||||
@ -119,7 +117,7 @@ IProcessor::Status MergingSortedTransform::prepare()
|
||||
|
||||
if (!cursors[i].empty())
|
||||
{
|
||||
input.setNotNeeded();
|
||||
// input.setNotNeeded();
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -159,6 +157,10 @@ IProcessor::Status MergingSortedTransform::prepare()
|
||||
{
|
||||
if (is_finished)
|
||||
{
|
||||
|
||||
if (is_port_full)
|
||||
return Status::PortFull;
|
||||
|
||||
for (auto & input : inputs)
|
||||
input.close();
|
||||
|
||||
@ -192,6 +194,9 @@ IProcessor::Status MergingSortedTransform::prepare()
|
||||
need_data = false;
|
||||
}
|
||||
|
||||
if (is_port_full)
|
||||
return Status::PortFull;
|
||||
|
||||
return Status::Ready;
|
||||
}
|
||||
}
|
||||
|
@ -241,12 +241,13 @@ IProcessor::Status SortingTransform::prepareConsume()
|
||||
if (input.isFinished())
|
||||
return Status::Finished;
|
||||
|
||||
input.setNeeded();
|
||||
|
||||
if (!input.hasData())
|
||||
{
|
||||
input.setNeeded();
|
||||
return Status::NeedData;
|
||||
}
|
||||
|
||||
current_chunk = input.pull();
|
||||
current_chunk = input.pull(true);
|
||||
}
|
||||
|
||||
/// Now consume.
|
||||
|
@ -114,6 +114,8 @@ public:
|
||||
/// Returns true if the blocks shouldn't be pushed to associated views on insert.
|
||||
virtual bool noPushingToViews() const { return false; }
|
||||
|
||||
virtual bool hasEvenlyDistributedRead() const { return false; }
|
||||
|
||||
/// Optional size information of each physical column.
|
||||
/// Currently it's only used by the MergeTree family for query optimizations.
|
||||
using ColumnSizeByName = std::unordered_map<std::string, ColumnSize>;
|
||||
|
@ -138,6 +138,9 @@ MergeTreeData::MergeTreeData(
|
||||
, data_parts_by_state_and_info(data_parts_indexes.get<TagByStateAndInfo>())
|
||||
, parts_mover(this)
|
||||
{
|
||||
if (relative_data_path.empty())
|
||||
throw Exception("MergeTree storages require data path", ErrorCodes::INCORRECT_FILE_NAME);
|
||||
|
||||
const auto settings = getSettings();
|
||||
setProperties(metadata);
|
||||
|
||||
|
@ -394,7 +394,6 @@ void MergeTreeDataPart::remove() const
|
||||
String to = full_path + "delete_tmp_" + name;
|
||||
// TODO directory delete_tmp_<name> is never removed if server crashes before returning from this function
|
||||
|
||||
|
||||
Poco::File from_dir{from};
|
||||
Poco::File to_dir{to};
|
||||
|
||||
|
@ -425,6 +425,12 @@ void StorageTinyLog::truncate(const ASTPtr &, const Context &, TableStructureWri
|
||||
addFiles(column.name, *column.type);
|
||||
}
|
||||
|
||||
void StorageTinyLog::drop(TableStructureWriteLockHolder &)
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
disk->removeRecursive(table_path);
|
||||
files.clear();
|
||||
}
|
||||
|
||||
void registerStorageTinyLog(StorageFactory & factory)
|
||||
{
|
||||
|
@ -46,6 +46,8 @@ public:
|
||||
|
||||
void truncate(const ASTPtr &, const Context &, TableStructureWriteLockHolder &) override;
|
||||
|
||||
void drop(TableStructureWriteLockHolder &) override;
|
||||
|
||||
protected:
|
||||
StorageTinyLog(
|
||||
DiskPtr disk_,
|
||||
|
@ -37,6 +37,8 @@ public:
|
||||
size_t max_block_size,
|
||||
unsigned num_streams) override;
|
||||
|
||||
bool hasEvenlyDistributedRead() const override { return true; }
|
||||
|
||||
private:
|
||||
bool multithreaded;
|
||||
bool even_distribution;
|
||||
|
0
dbms/tests/integration/test_tinylog_s3/__init__.py
Normal file
0
dbms/tests/integration/test_tinylog_s3/__init__.py
Normal file
@ -0,0 +1,12 @@
|
||||
<yandex>
|
||||
<shutdown_wait_unfinished>3</shutdown_wait_unfinished>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/log.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||
</logger>
|
||||
</yandex>
|
40
dbms/tests/integration/test_tinylog_s3/configs/config.xml
Normal file
40
dbms/tests/integration/test_tinylog_s3/configs/config.xml
Normal file
@ -0,0 +1,40 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
</default>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
|
||||
|
||||
<tcp_port>9000</tcp_port>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
|
||||
<openSSL>
|
||||
<client>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<verificationMode>none</verificationMode>
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<max_concurrent_queries>500</max_concurrent_queries>
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
<path>./clickhouse/</path>
|
||||
<users_config>users.xml</users_config>
|
||||
</yandex>
|
23
dbms/tests/integration/test_tinylog_s3/configs/users.xml
Normal file
23
dbms/tests/integration/test_tinylog_s3/configs/users.xml
Normal file
@ -0,0 +1,23 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
</default>
|
||||
</profiles>
|
||||
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<quotas>
|
||||
<default>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
49
dbms/tests/integration/test_tinylog_s3/test.py
Normal file
49
dbms/tests/integration/test_tinylog_s3/test.py
Normal file
@ -0,0 +1,49 @@
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
logging.getLogger().addHandler(logging.StreamHandler())
|
||||
|
||||
|
||||
# Creates S3 bucket for tests and allows anonymous read-write access to it.
|
||||
def prepare_s3_bucket(cluster):
|
||||
minio_client = cluster.minio_client
|
||||
|
||||
if minio_client.bucket_exists(cluster.minio_bucket):
|
||||
minio_client.remove_bucket(cluster.minio_bucket)
|
||||
|
||||
minio_client.make_bucket(cluster.minio_bucket)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def cluster():
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
cluster.add_instance("node", config_dir="configs", with_minio=True)
|
||||
logging.info("Starting cluster...")
|
||||
cluster.start()
|
||||
logging.info("Cluster started")
|
||||
|
||||
prepare_s3_bucket(cluster)
|
||||
logging.info("S3 bucket created")
|
||||
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_tinylog_s3(cluster):
|
||||
node = cluster.instances["node"]
|
||||
minio = cluster.minio_client
|
||||
|
||||
node.query("CREATE TABLE s3_test (id UInt64) Engine=TinyLog")
|
||||
node.query("INSERT INTO s3_test SELECT number FROM numbers(3)")
|
||||
assert node.query("SELECT * FROM s3_test") == "0\n1\n2\n"
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 2
|
||||
node.query("INSERT INTO s3_test SELECT number + 3 FROM numbers(3)")
|
||||
assert node.query("SELECT * FROM s3_test") == "0\n1\n2\n3\n4\n5\n"
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 2
|
||||
node.query("DROP TABLE s3_test")
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 0
|
@ -0,0 +1,16 @@
|
||||
-- unmerged state
|
||||
1 0000-00-00 00:00:00 0000-00-00 00:00:00 18446744073709551615 0
|
||||
1 0000-00-00 00:00:00 2000-01-01 10:00:00 7200 0
|
||||
1 0000-00-00 00:00:00 2000-01-01 11:10:00 600 0
|
||||
1 2000-01-01 08:00:00 0000-00-00 00:00:00 18446744073709551615 0
|
||||
1 2000-01-01 11:00:00 0000-00-00 00:00:00 18446744073709551615 3600
|
||||
2 0000-00-00 00:00:00 0000-00-00 00:00:00 18446744073709551615 0
|
||||
2 0000-00-00 00:00:00 2000-01-01 10:00:00 7200 0
|
||||
2 0000-00-00 00:00:00 2000-01-01 11:10:00 600 0
|
||||
2 0000-00-00 00:00:00 2001-01-01 11:10:02 1 0
|
||||
2 2000-01-01 08:00:00 0000-00-00 00:00:00 18446744073709551615 0
|
||||
2 2000-01-01 11:00:00 0000-00-00 00:00:00 18446744073709551615 3600
|
||||
2 2001-01-01 11:10:01 0000-00-00 00:00:00 18446744073709551615 31622401
|
||||
-- merged state
|
||||
1 2000-01-01 11:00:00 2000-01-01 11:10:00 600 3600
|
||||
2 2001-01-01 11:10:01 2001-01-01 11:10:02 1 31622401
|
@ -0,0 +1,127 @@
|
||||
SYSTEM STOP MERGES;
|
||||
|
||||
-- incremental streaming usecase
|
||||
-- that has sense only if data filling order has guarantees of chronological order
|
||||
|
||||
DROP TABLE IF EXISTS target_table;
|
||||
DROP TABLE IF EXISTS logins;
|
||||
DROP TABLE IF EXISTS mv_logins2target;
|
||||
DROP TABLE IF EXISTS checkouts;
|
||||
DROP TABLE IF EXISTS mv_checkouts2target;
|
||||
|
||||
-- that is the final table, which is filled incrementally from 2 different sources
|
||||
|
||||
CREATE TABLE target_table Engine=SummingMergeTree() ORDER BY id
|
||||
AS
|
||||
SELECT
|
||||
number as id,
|
||||
maxState( toDateTime(0) ) as latest_login_time,
|
||||
maxState( toDateTime(0) ) as latest_checkout_time,
|
||||
minState( toUInt64(-1) ) as fastest_session,
|
||||
maxState( toUInt64(0) ) as biggest_inactivity_period
|
||||
FROM numbers(50000)
|
||||
GROUP BY id;
|
||||
|
||||
-- source table #1
|
||||
|
||||
CREATE TABLE logins (
|
||||
id UInt64,
|
||||
ts DateTime
|
||||
) Engine=MergeTree ORDER BY id;
|
||||
|
||||
|
||||
-- and mv with something like feedback from target table
|
||||
|
||||
CREATE MATERIALIZED VIEW mv_logins2target TO target_table
|
||||
AS
|
||||
SELECT
|
||||
id,
|
||||
maxState( ts ) as latest_login_time,
|
||||
maxState( toDateTime(0) ) as latest_checkout_time,
|
||||
minState( toUInt64(-1) ) as fastest_session,
|
||||
if(max(current_latest_checkout_time) > 0, maxState(toUInt64(ts - current_latest_checkout_time)), maxState( toUInt64(0) ) ) as biggest_inactivity_period
|
||||
FROM logins
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
id,
|
||||
maxMerge(latest_checkout_time) as current_latest_checkout_time
|
||||
|
||||
-- normal MV sees only the incoming block, but we need something like feedback here
|
||||
-- so we do join with target table, the most important thing here is that
|
||||
-- we extract from target table only row affected by that MV, referencing src table
|
||||
-- it second time
|
||||
FROM target_table
|
||||
WHERE id IN (SELECT id FROM logins)
|
||||
GROUP BY id
|
||||
) USING (id)
|
||||
GROUP BY id;
|
||||
|
||||
|
||||
-- the same for second pipeline
|
||||
CREATE TABLE checkouts (
|
||||
id UInt64,
|
||||
ts DateTime
|
||||
) Engine=MergeTree ORDER BY id;
|
||||
|
||||
CREATE MATERIALIZED VIEW mv_checkouts2target TO target_table
|
||||
AS
|
||||
SELECT
|
||||
id,
|
||||
maxState( toDateTime(0) ) as latest_login_time,
|
||||
maxState( ts ) as latest_checkout_time,
|
||||
if(max(current_latest_login_time) > 0, minState( toUInt64(ts - current_latest_login_time)), minState( toUInt64(-1) ) ) as fastest_session,
|
||||
maxState( toUInt64(0) ) as biggest_inactivity_period
|
||||
FROM checkouts
|
||||
LEFT JOIN (SELECT id, maxMerge(latest_login_time) as current_latest_login_time FROM target_table WHERE id IN (SELECT id FROM checkouts) GROUP BY id) USING (id)
|
||||
GROUP BY id;
|
||||
|
||||
|
||||
-- feed with some initial values
|
||||
INSERT INTO logins SELECT number as id, '2000-01-01 08:00:00' from numbers(50000);
|
||||
INSERT INTO checkouts SELECT number as id, '2000-01-01 10:00:00' from numbers(50000);
|
||||
|
||||
-- ensure that we don't read whole target table during join
|
||||
set max_rows_to_read = 2000;
|
||||
|
||||
INSERT INTO logins SELECT number as id, '2000-01-01 11:00:00' from numbers(1000);
|
||||
INSERT INTO checkouts SELECT number as id, '2000-01-01 11:10:00' from numbers(1000);
|
||||
|
||||
set max_rows_to_read = 10;
|
||||
|
||||
INSERT INTO logins SELECT number+2 as id, '2001-01-01 11:10:01' from numbers(1);
|
||||
INSERT INTO checkouts SELECT number+2 as id, '2001-01-01 11:10:02' from numbers(1);
|
||||
|
||||
|
||||
set max_rows_to_read = 0;
|
||||
|
||||
select '-- unmerged state';
|
||||
|
||||
select
|
||||
id,
|
||||
finalizeAggregation(latest_login_time) as current_latest_login_time,
|
||||
finalizeAggregation(latest_checkout_time) as current_latest_checkout_time,
|
||||
finalizeAggregation(fastest_session) as current_fastest_session,
|
||||
finalizeAggregation(biggest_inactivity_period) as current_biggest_inactivity_period
|
||||
from target_table
|
||||
where id in (1,2)
|
||||
ORDER BY id, current_latest_login_time, current_latest_checkout_time;
|
||||
|
||||
select '-- merged state';
|
||||
|
||||
SELECT
|
||||
id,
|
||||
maxMerge(latest_login_time) as current_latest_login_time,
|
||||
maxMerge(latest_checkout_time) as current_latest_checkout_time,
|
||||
minMerge(fastest_session) as current_fastest_session,
|
||||
maxMerge(biggest_inactivity_period) as current_biggest_inactivity_period
|
||||
FROM target_table
|
||||
where id in (1,2)
|
||||
GROUP BY id
|
||||
ORDER BY id;
|
||||
|
||||
DROP TABLE IF EXISTS logins;
|
||||
DROP TABLE IF EXISTS mv_logins2target;
|
||||
DROP TABLE IF EXISTS checkouts;
|
||||
DROP TABLE IF EXISTS mv_checkouts2target;
|
||||
|
||||
SYSTEM START MERGES;
|
@ -0,0 +1,7 @@
|
||||
CREATE DATABASE memory_01069 ENGINE = Memory()
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
3
|
||||
4
|
18
dbms/tests/queries/0_stateless/01069_database_memory.sql
Normal file
18
dbms/tests/queries/0_stateless/01069_database_memory.sql
Normal file
@ -0,0 +1,18 @@
|
||||
DROP DATABASE IF EXISTS memory_01069;
|
||||
CREATE DATABASE memory_01069 ENGINE = Memory;
|
||||
SHOW CREATE DATABASE memory_01069;
|
||||
|
||||
CREATE TABLE memory_01069.mt (n UInt8) ENGINE = MergeTree() ORDER BY n;
|
||||
CREATE TABLE memory_01069.file (n UInt8) ENGINE = File(CSV);
|
||||
|
||||
INSERT INTO memory_01069.mt VALUES (1), (2);
|
||||
INSERT INTO memory_01069.file VALUES (3), (4);
|
||||
|
||||
SELECT * FROM memory_01069.mt ORDER BY n;
|
||||
SELECT * FROM memory_01069.file ORDER BY n;
|
||||
|
||||
DROP TABLE memory_01069.mt;
|
||||
SELECT * FROM memory_01069.mt ORDER BY n; -- { serverError 60 }
|
||||
SELECT * FROM memory_01069.file ORDER BY n;
|
||||
|
||||
DROP DATABASE memory_01069;
|
@ -0,0 +1,2 @@
|
||||
2
|
||||
2
|
@ -0,0 +1,7 @@
|
||||
create temporary table t1 (a Nullable(UInt8));
|
||||
insert into t1 values (2.4);
|
||||
select * from t1;
|
||||
|
||||
create temporary table t2 (a UInt8);
|
||||
insert into t2 values (2.4);
|
||||
select * from t2;
|
@ -6,7 +6,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
p7zip-full bash git ncdu wget psmisc python3 python3-pip tzdata python3-dev g++ \
|
||||
p7zip-full bash git moreutils ncdu wget psmisc python3 python3-pip tzdata tree python3-dev g++ \
|
||||
&& pip3 --no-cache-dir install clickhouse_driver \
|
||||
&& apt-get purge --yes python3-dev g++ \
|
||||
&& apt-get autoremove --yes \
|
||||
|
@ -23,8 +23,16 @@ function download
|
||||
|
||||
la="$left_pr-$left_sha.tgz"
|
||||
ra="$right_pr-$right_sha.tgz"
|
||||
wget -q -nd -c "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/performance/performance.tgz" -O "$la" && tar -C left --strip-components=1 -zxvf "$la" &
|
||||
wget -q -nd -c "https://clickhouse-builds.s3.yandex.net/$right_pr/$right_sha/performance/performance.tgz" -O "$ra" && tar -C right --strip-components=1 -zxvf "$ra" &
|
||||
|
||||
# might have the same version on left and right
|
||||
if ! [ "$la" = "$ra" ]
|
||||
then
|
||||
wget -q -nd -c "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/performance/performance.tgz" -O "$la" && tar -C left --strip-components=1 -zxvf "$la" &
|
||||
wget -q -nd -c "https://clickhouse-builds.s3.yandex.net/$right_pr/$right_sha/performance/performance.tgz" -O "$ra" && tar -C right --strip-components=1 -zxvf "$ra" &
|
||||
else
|
||||
wget -q -nd -c "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/performance/performance.tgz" -O "$la" && { tar -C left --strip-components=1 -zxvf "$la" & tar -C right --strip-components=1 -zxvf "$ra" & } &
|
||||
fi
|
||||
|
||||
cd db0 && wget -q -nd -c "https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar" && tar -xvf hits_10m_single.tar &
|
||||
cd db0 && wget -q -nd -c "https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar" && tar -xvf hits_100m_single.tar &
|
||||
cd db0 && wget -q -nd -c "https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar" && tar -xvf hits_v1.tar &
|
||||
@ -119,7 +127,7 @@ function run_tests
|
||||
"$script_dir/perf.py" --help > /dev/null
|
||||
|
||||
# FIXME remove some broken long tests
|
||||
rm left/performance/IPv* ||:
|
||||
rm left/performance/{IPv4,IPv6,modulo,parse_engine_file,number_formatting_formats,select_format}.xml ||:
|
||||
|
||||
# Run the tests
|
||||
for test in left/performance/*.xml
|
||||
|
@ -1,7 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
cd /workspace
|
||||
chown nobody workspace output
|
||||
chgrp nogroup workspace output
|
||||
chmod 777 workspace output
|
||||
|
||||
cd workspace
|
||||
|
||||
# We will compare to the most recent testing tag in master branch, let's find it.
|
||||
rm -rf ch ||:
|
||||
@ -22,7 +26,7 @@ set +e
|
||||
# It's probably at fault for using `kill 0` as an error handling mechanism,
|
||||
# but I can't be bothered to change this now.
|
||||
set -m
|
||||
../compare.sh 0 $ref_sha $PR_TO_TEST $SHA_TO_TEST 2>&1 | tee compare.log
|
||||
time ../compare.sh 0 $ref_sha $PR_TO_TEST $SHA_TO_TEST 2>&1 | ts | tee compare.log
|
||||
set +m
|
||||
|
||||
7z a /output/output.7z *.log *.tsv
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import itertools
|
||||
import clickhouse_driver
|
||||
@ -12,6 +13,9 @@ import traceback
|
||||
parser = argparse.ArgumentParser(description='Run performance test.')
|
||||
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
|
||||
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
|
||||
parser.add_argument('--host', nargs='*', default=['127.0.0.1', '127.0.0.1'])
|
||||
parser.add_argument('--port', nargs='*', default=[9001, 9002])
|
||||
parser.add_argument('--runs', type=int, default=int(os.environ.get('CHPC_RUNS', 7)))
|
||||
args = parser.parse_args()
|
||||
|
||||
tree = et.parse(args.file[0])
|
||||
@ -28,7 +32,7 @@ if infinite_sign is not None:
|
||||
raise Exception('Looks like the test is infinite (sign 1)')
|
||||
|
||||
# Open connections
|
||||
servers = [{'host': 'localhost', 'port': 9001, 'client_name': 'left'}, {'host': 'localhost', 'port': 9002, 'client_name': 'right'}]
|
||||
servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)]
|
||||
connections = [clickhouse_driver.Client(**server) for server in servers]
|
||||
|
||||
# Check tables that should exist
|
||||
@ -95,7 +99,7 @@ for q in test_queries:
|
||||
# excessive data.
|
||||
start_seconds = time.perf_counter()
|
||||
server_seconds = 0
|
||||
for run in range(0, 13):
|
||||
for run in range(0, args.runs):
|
||||
for conn_index, c in enumerate(connections):
|
||||
res = c.execute(q)
|
||||
print('query\t' + tsv_escape(q) + '\t' + str(run) + '\t' + str(conn_index) + '\t' + str(c.last_query.elapsed))
|
||||
|
233
docs/README.md
233
docs/README.md
@ -1,62 +1,209 @@
|
||||
# How to Contribute to ClickHouse Documentation
|
||||
# Contributing to ClickHouse Documentation
|
||||
|
||||
ClickHouse uses the "documentation as code" approach, so you can edit Markdown files in this folder from the GitHub web interface. Alternatively, fork the ClickHouse repository, edit, commit, push, and open a pull request.
|
||||
## Why You Need to Document ClickHouse
|
||||
|
||||
At the moment documentation is bilingual in English and Russian. Try to keep all languages in sync if you can, but this is not strictly required. There are people who are responsible for monitoring language versions and syncing them. If you add a new article, you should also add it to `toc_{en,ru,zh,ja,fa}.yaml` files with the pages index.
|
||||
The main reason is that ClickHouse is an open source project, and if you don't write the docs, nobody does. "Incomplete or Confusing Documentation" is the top complaint about open source software by the results of a [Github Open Source Survey](http://opensourcesurvey.org/2017/) of 2017. Documentation is highly valued but often overlooked. One of the most important contributions someone can make to an open source repository is a documentation update.
|
||||
|
||||
The master branch is then asynchronously published to the ClickHouse official website:
|
||||
Many developers can say that the code is the best docs by itself, and they are right. But, ClickHouse is not a project for C++ developers. Most of its users don't know C++, and they can't understand the code quickly. ClickHouse is large enough to absorb almost any change without a noticeable trace. Nobody will find your very useful function, or an important setting, or a very informative new column in a system table if it is not referenced in the documentation.
|
||||
|
||||
* In English: https://clickhouse.yandex/docs/en/
|
||||
* In Russian: https://clickhouse.yandex/docs/ru/
|
||||
* In Chinese: https://clickhouse.yandex/docs/zh/
|
||||
* In Japanese: https://clickhouse.yandex/docs/ja/
|
||||
* In Farsi: https://clickhouse.yandex/docs/fa/
|
||||
If you want to help ClickHouse with documentation you can face, for example, the following questions:
|
||||
|
||||
The infrastructure to build Markdown for publishing on the documentation website resides in the [tools](tools) folder. It has its own [README.md](tools/README.md) file with more details.
|
||||
- "I don't know how to write."
|
||||
|
||||
We have prepared some [recommendations](#what-to-write) for you.
|
||||
|
||||
# How to Write Content for ClickHouse Documentation
|
||||
- "I know what I want to write, but I don't know how to contribute to docs."
|
||||
|
||||
## Target Audience
|
||||
Here are some [tips](#how-to-contribute).
|
||||
|
||||
When you write pretty much any text, the first thing you should think about is who will read it and which terms you should use for communicating with them.
|
||||
Writing the docs is extremely useful for project's users and developers, and grows your karma.
|
||||
|
||||
ClickHouse can be directly used by all sorts of analysts and engineers. For generic parts of documentation (like the query language, tutorials or overviews), assume that the reader only has a basic technical background. For more technical sections (like articles that describe ClickHouse internals, guides for operating ClickHouse clusters, or rules for contributing to C++ code), you can use technical language and concepts.
|
||||
**Contents**
|
||||
|
||||
## Specific Recommendations
|
||||
- [What is the ClickHouse Documentation](#clickhouse-docs)
|
||||
- [How to Contribute to ClickHouse Documentation](#how-to-contribute)
|
||||
- [Markdown Dialect Cheatsheet](#markdown-cheatsheet)
|
||||
- [Adding a New File](#adding-a-new-file)
|
||||
- [Adding a New Language](#adding-a-new-language)
|
||||
- [How to Write Content for ClickHouse Documentation](#what-to-write)
|
||||
- [Documentation for Different Audience](#target-audience)
|
||||
- [Common Recommendations](#common-recommendations)
|
||||
- [Description Templates](#templates)
|
||||
- [How to Build Documentation](#how-to-build-docs)
|
||||
|
||||
* Documentation should make sense when you read it through from beginning to end. If you add new content, try to place it where the necessary concepts have already been explained.
|
||||
* If a documentation section consists of many similar items, like functions or operators, try to order them from more generic (usable by a wide audience) to more specific (for specific use cases or application types). If several items are intended to be mostly used together, group them together in the documentation.
|
||||
* Try to avoid slang. Use the most common and specific terms possible for everything. If some terms are used as synonyms, state this explicitly.
|
||||
* All descriptions of functionality should be accompanied by examples. Basic examples are acceptable, but real world examples are welcome, too.
|
||||
* Sensitive topics like politics, religion, race, and so on are strictly prohibited in documentation, examples, comments, and code.
|
||||
* Proofread your text before publishing. Look for typos, missing punctuation, or repetitions that could be avoided.
|
||||
* Try to avoid addressing the reader directly, although this is not strictly prohibited.
|
||||
|
||||
# How to Add a New Language
|
||||
<a name="clickhouse-docs"/>
|
||||
|
||||
## What is the ClickHouse Documentation
|
||||
|
||||
The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries.
|
||||
|
||||
At the moment, [documentation](https://clickhouse.yandex/docs) exists in English, Russian, Chinese, Japanese, and Farsi. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs).
|
||||
|
||||
Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones.
|
||||
|
||||
<a name="how-to-contribute"/>
|
||||
|
||||
## How to Contribute to ClickHouse Documentation
|
||||
|
||||
You can contribute to the documentation in many ways, for example:
|
||||
|
||||
- Fork the ClickHouse repository, edit, commit, push, and open a pull request.
|
||||
|
||||
Add the `documentation` label to this pull request for proper automatic checks applying. If you have no permissions for adding labels, the reviewer of your PR adds it.
|
||||
|
||||
- Open a required file in the ClickHouse repository and edit it from the GitHub web interface.
|
||||
|
||||
You can do it on GitHub, or on the [ClickHouse Documentation](https://clickhouse.yandex/docs/en/) site. Each page of ClickHouse Documentation site contains an "Edit this page" (🖋) element in the upper right corner. Clicking this symbol, you get to the ClickHouse docs file opened for editing.
|
||||
|
||||
When you are saving a file, GitHub opens a pull-request for your contribution. Add the `documentation` label to this pull request for proper automatic checks applying. If you have no permissions for adding labels, the reviewer of your PR adds it.
|
||||
|
||||
Contribute all new information in English language. Other languages are translations from English.
|
||||
|
||||
<a name="markdown-cheatsheet"/>
|
||||
|
||||
### Markdown Dialect Cheatsheet
|
||||
|
||||
- Headings: Place them on a separate line and start with `# `, `## ` or `### `. Use the [Title Case](https://titlecase.com/) for them. Example:
|
||||
|
||||
```text
|
||||
# The First Obligatory Title on a Page.
|
||||
```
|
||||
|
||||
- Bold text: `**asterisks**` or `__underlines__`.
|
||||
- Links: `[link text](uri)`. Examples:
|
||||
|
||||
- External link: `[ClickHouse repo](https://github.com/ClickHouse/ClickHouse)`
|
||||
- Cross link: `[How to build docs](tools/README.md)`
|
||||
|
||||
- Images: `![Exclamation sign](uri)`. You can refer to local images as well as remote in internet.
|
||||
- Lists: Lists can be of two types:
|
||||
|
||||
- `- unordered`: Each item starts from the `-`.
|
||||
- `1. ordered`: Each item starts from the number.
|
||||
|
||||
A list must be separated from the text by an empty line. Nested lists must be indented with 4 spaces.
|
||||
|
||||
- Inline code: `` `in backticks` ``.
|
||||
- Multiline code blocks:
|
||||
<pre lang="no-highlight"><code>```lang_name
|
||||
code
|
||||
lines
|
||||
```</code></pre>
|
||||
- Note:
|
||||
|
||||
```text
|
||||
!!! info "Header"
|
||||
4 spaces indented text.
|
||||
```
|
||||
|
||||
- Warning:
|
||||
|
||||
```text
|
||||
!!! warning "Header"
|
||||
4 spaces indented text.
|
||||
```
|
||||
|
||||
- Text hidden behind a cut (single sting that opens on click):
|
||||
|
||||
```text
|
||||
<details markdown="1"> <summary>Visible text</summary>
|
||||
Hidden content.
|
||||
</details>`.
|
||||
```
|
||||
- Colored text: `<span style="color: red;">text</span>`.
|
||||
- Heading anchor to be linked to: `# Title {#anchor-name}`.
|
||||
- Table:
|
||||
```
|
||||
| Header 1 | Header 2 | Header 3 |
|
||||
| ----------- | ----------- | ----------- |
|
||||
| Cell A1 | Cell A2 | Cell A3 |
|
||||
| Cell B1 | Cell B2 | Cell B3 |
|
||||
| Cell C1 | Cell C2 | Cell C3 |
|
||||
```
|
||||
|
||||
<a name="adding-a-new-file"/>
|
||||
|
||||
### Adding a New File
|
||||
|
||||
When adding a new file:
|
||||
|
||||
- Make symbolic links for all other languages. You can use the following commands:
|
||||
|
||||
```bash
|
||||
$ cd /ClickHouse/clone/directory/docs
|
||||
$ ln -sr en/new/file.md lang/new/file.md
|
||||
```
|
||||
|
||||
- Reference the file from `toc_{en,ru,zh,ja,fa}.yaml` files with the pages index.
|
||||
|
||||
|
||||
<a name="adding-a-new-language"/>
|
||||
|
||||
### Adding a New Language
|
||||
|
||||
1. Create a new docs subfolder named using the [ISO-639-1 language code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes).
|
||||
2. Add Markdown files with the translation, mirroring the folder structure of other languages.
|
||||
3. Commit and open a pull request with the new content.
|
||||
|
||||
Some additional configuration has to be done to actually make a new language live on the official website, but it's not automated or documented yet, so we'll do it on our own after the pull request with the content is merged.
|
||||
When everything is ready, we will add the new language to the website.
|
||||
|
||||
# Markdown Dialect Cheatsheet
|
||||
<a name="what-to-write"/>
|
||||
|
||||
* Headings are on a separate line starting with `# `, `## ` or `### `.
|
||||
* Bold is in `**asterisks**` or `__underlines__`.
|
||||
* Links `[anchor](http://...)`, images `![with exclamation sign](http://...jpeg)`.
|
||||
* Lists are on lines starting with `* unordered` or `1. ordered`, but there should be an empty line before the first list item. Sub-lists must be indented with 4 spaces.
|
||||
* Inline code fragments are <code>`in backticks`</code>.
|
||||
* Multiline code blocks are <code>```in triple backtick quotes ```</code>.
|
||||
* Brightly highlighted text starts with `!!! info "Header"`, followed by 4 spaces on the next line and content. For a warning, replace `info` with `warning`.
|
||||
* Hidden block that opens on click: `<details markdown="1"> <summary>Header</summary> hidden content</details>`.
|
||||
* Colored text: `<span style="color: red;">text</span>`.
|
||||
* Heading anchor to be linked to: `Title {#anchor-name}`.
|
||||
* Table:
|
||||
```
|
||||
| Header 1 | Header 2 | Header 3 |
|
||||
| ----------- | ----------- | ----------- |
|
||||
| Cell A1 | Cell A2 | Cell A3 |
|
||||
| Cell B1 | Cell B2 | Cell B3 |
|
||||
| Cell C1 | Cell C2 | Cell C3 |
|
||||
```
|
||||
## How to Write Content for ClickHouse Documentation
|
||||
|
||||
|
||||
<a name="target-audience"/>
|
||||
|
||||
### Documentation for Different Audience
|
||||
|
||||
When writing documentation, think about people who read it. Each audience has specific requirements for terms they use in communications.
|
||||
|
||||
ClickHouse documentation can be divided by the audience for the following parts:
|
||||
|
||||
- Conceptual topics in [Introduction](https://clickhouse.yandex/docs/en/), tutorials and overviews, changelog.
|
||||
|
||||
These topics are for the most common auditory. When editing text in them, use the most common terms that are comfortable for the audience with basic technical skills.
|
||||
|
||||
- Query language reference and related topics.
|
||||
|
||||
These parts of the documentation are dedicated to those who use ClickHouse for data analysis. Carefully describe syntax, input, and output data for expressions. Don't forget the examples.
|
||||
|
||||
- Description of table engines and operation details.
|
||||
|
||||
Operation engineers who help data analysts to solve their tasks should know how to install/update a ClickHouse server, maintain the ClickHouse cluster, how to integrate it with other tools and systems, how to get the maximum performance of their entire environment.
|
||||
|
||||
- Developer's guides.
|
||||
|
||||
The documentation provides code writers with information about how to write code for ClickHouse and how to build it in different environments.
|
||||
|
||||
<a name="common-recommendations"/>
|
||||
|
||||
### Common Recommendations
|
||||
|
||||
- When searching for a position for your text, try to place it in the most anticipated place.
|
||||
- Group entities. For example, if several functions solve similar tasks or belong to a specific group by use case or an application type, place them together.
|
||||
- Try to avoid slang. Use the most common and specific terms possible. If some terms are used as synonyms, state this explicitly.
|
||||
- Add examples for all the functionality. Add basic examples to show how the function works by itself. Add use case examples to show how the function participates in solving specific tasks.
|
||||
- Any text concerning politics, religion, or other social related themes are strictly prohibited in all the ClickHouse texts.
|
||||
- Proofread your text before publishing. Look for typos, missing punctuation, or repetitions that could be avoided.
|
||||
|
||||
<a name="templates"/>
|
||||
|
||||
### Description Templates
|
||||
|
||||
When writing docs, you can use prepared templates. Copy the code of a template and use it in your contribution. Sometimes you just need to change level of headers.
|
||||
|
||||
Templates:
|
||||
|
||||
- [Function](dscr-templates/template-function.md)
|
||||
- [Setting](dscr-templates/template-setting.md)
|
||||
- [Table engine](dscr-templates/template-table-engine.md)
|
||||
- [System table](dscr-templates/template-system-table.md)
|
||||
|
||||
|
||||
<a name="how-to-build-docs"/>
|
||||
|
||||
## How to Build Documentation
|
||||
|
||||
You can build your documentation manually by following the instructions in [docs/tools/README.md](docs/tools/README.md). Also, our CI runs the documentation build after the `documentation` label is added to PR. You can see the results of a build in the GitHub interface. If you have no permissions to add labels, a reviewer of your PR will add it.
|
||||
|
48
docs/dscr-templates/template-function.md
Normal file
48
docs/dscr-templates/template-function.md
Normal file
@ -0,0 +1,48 @@
|
||||
## function-name {#function-name-in-lower-case}
|
||||
|
||||
Short description.
|
||||
|
||||
**Syntax** (without SELECT)
|
||||
|
||||
```sql
|
||||
<function syntax>
|
||||
```
|
||||
|
||||
Alias: `<alias name>`. (Optional)
|
||||
|
||||
More text (Optional).
|
||||
|
||||
**Parameters** (Optional)
|
||||
|
||||
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Returned value(s)**
|
||||
|
||||
- Returned values list.
|
||||
|
||||
Type: [Type](relative/path/to/type/dscr.md#type).
|
||||
|
||||
**Example**
|
||||
|
||||
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
|
||||
|
||||
Input table (Optional):
|
||||
|
||||
```text
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
```
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
||||
|
27
docs/dscr-templates/template-setting.md
Normal file
27
docs/dscr-templates/template-setting.md
Normal file
@ -0,0 +1,27 @@
|
||||
## setting-name {#setting-name-in-lower-case}
|
||||
|
||||
Description.
|
||||
|
||||
For switcher setting, use the typical phrase: "Enables or disables something ...".
|
||||
|
||||
Possible values:
|
||||
|
||||
*For switcher setting:*
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
*For another setting (typical phrases):*
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Disabled or unlimited or smth. else.
|
||||
|
||||
Default value: `value`.
|
||||
|
||||
**Additional Info** (Optional)
|
||||
|
||||
The name of additional section can be any, for example **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
21
docs/dscr-templates/template-system-table.md
Normal file
21
docs/dscr-templates/template-system-table.md
Normal file
@ -0,0 +1,21 @@
|
||||
## system.table_name {#system_tables-table_name}
|
||||
|
||||
Description.
|
||||
|
||||
Columns:
|
||||
|
||||
- `column_name` ([data_type_name](data_type_uri)) — Description.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.table_name
|
||||
```
|
||||
|
||||
```text
|
||||
Some output. It shouldn't be long.
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Article name](uri) — Some words about referenced information.
|
51
docs/dscr-templates/template-table-engine.md
Normal file
51
docs/dscr-templates/template-table-engine.md
Normal file
@ -0,0 +1,51 @@
|
||||
# EngineName
|
||||
|
||||
- What the engine does.
|
||||
- Relations with other engines if they exist.
|
||||
|
||||
## Creating a Table
|
||||
|
||||
```
|
||||
Syntax codeblock
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
**Query Clauses**
|
||||
|
||||
|
||||
## Virtual columns
|
||||
|
||||
If they exist.
|
||||
|
||||
## Specifics and recommendations
|
||||
Algorithms
|
||||
Specifics of read/write processes
|
||||
Examples of tasks
|
||||
Recommendations for usage
|
||||
Specifics of data storage
|
||||
|
||||
## Usage Example
|
||||
|
||||
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
|
||||
|
||||
Input table:
|
||||
|
||||
```text
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
```
|
||||
|
||||
Any text clarifying the example.
|
||||
|
||||
## See Also
|
||||
|
||||
- [link](#)
|
@ -66,7 +66,7 @@ def build_for_lang(lang, args):
|
||||
'logo': 'images/logo.svg',
|
||||
'favicon': 'assets/images/favicon.ico',
|
||||
'include_search_page': False,
|
||||
'search_index_only': True,
|
||||
'search_index_only': False,
|
||||
'static_templates': ['404.html'],
|
||||
'extra': {
|
||||
'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching
|
||||
@ -113,15 +113,8 @@ def build_for_lang(lang, args):
|
||||
}
|
||||
}
|
||||
],
|
||||
plugins=[{
|
||||
'search': {
|
||||
'lang': ['en', 'ru'] if lang == 'ru' else ['en']
|
||||
}
|
||||
}],
|
||||
plugins=[],
|
||||
extra={
|
||||
'search': {
|
||||
'language': 'en,ru' if lang == 'ru' else 'en'
|
||||
},
|
||||
'stable_releases': args.stable_releases,
|
||||
'version_prefix': args.version_prefix
|
||||
}
|
||||
|
@ -209,3 +209,23 @@ h1, h2, h3, .md-logo {
|
||||
content: "" !important;
|
||||
}
|
||||
}
|
||||
|
||||
.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.algolia-autocomplete .algolia-docsearch-suggestion--content {
|
||||
float: none !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.algolia-autocomplete .algolia-docsearch-suggestion--content:before {
|
||||
content: none !important;
|
||||
}
|
||||
|
||||
.algolia-autocomplete .ds-dropdown-menu {
|
||||
width: 100% !important;
|
||||
max-height: 512px;
|
||||
overflow-x: hidden;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
{% set palette = config.theme.palette %}
|
||||
{% set font = config.theme.font %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="{{ lang.t('language') }}" class="no-js">
|
||||
<html lang="{{ lang.t('language') }}" class="no-js" data-version="{{ config.extra.version_prefix }}">
|
||||
<head>
|
||||
{% block site_meta %}
|
||||
<meta charset="utf-8">
|
||||
@ -49,6 +49,7 @@
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
{% block styles %}
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
|
||||
<link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application.2a88008a.css">
|
||||
{% if palette.primary or palette.accent %}
|
||||
<link rel="stylesheet" href="{{ base_url }}/assets/stylesheets/application-palette.792431c1.css">
|
||||
@ -269,6 +270,19 @@
|
||||
)
|
||||
});
|
||||
</script>
|
||||
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
|
||||
<script type="text/javascript"> docsearch({
|
||||
apiKey: 'e239649803024433599de47a53b2d416',
|
||||
indexName: 'yandex_clickhouse',
|
||||
inputSelector: '#md-search__input',
|
||||
algoliaOptions: {
|
||||
hitsPerPage: 25,
|
||||
'facetFilters': ["lang:{{ lang.t('language') }}"]
|
||||
},
|
||||
debug: false
|
||||
});
|
||||
</script>
|
||||
|
||||
<!-- Yandex.Metrika counter -->
|
||||
<script type="text/javascript">
|
||||
(function (d, w, c) {
|
||||
|
@ -34,10 +34,8 @@
|
||||
</div>
|
||||
<div class="md-flex__cell md-flex__cell--shrink">
|
||||
{% block search_box %}
|
||||
{% if "search" in config["plugins"] %}
|
||||
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
|
||||
{% include "partials/search.html" %}
|
||||
{% endif %}
|
||||
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
|
||||
{% include "partials/search.html" %}
|
||||
{% endblock %}
|
||||
</div>
|
||||
{% if page %}
|
||||
|
@ -3,7 +3,7 @@
|
||||
<label class="md-search__overlay" for="search"></label>
|
||||
<div class="md-search__inner">
|
||||
<form class="md-search__form" name="search">
|
||||
<input type="text" class="md-search__input" name="query" required placeholder="{{ lang.t('search.placeholder') }}" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="query">
|
||||
<input type="text" id="md-search__input" class="md-search__input" name="query" required placeholder="{{ lang.t('search.placeholder') }}" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false">
|
||||
<label class="md-icon md-search__icon" for="search"></label>
|
||||
<button type="reset" class="md-icon md-search__icon" data-md-component="reset"></button>
|
||||
</form>
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
BASE_DIR=$(dirname $(readlink -f $0))
|
||||
BUILD_DIR="${BASE_DIR}/../build"
|
||||
IMAGE="clickhouse/website"
|
||||
|
@ -12,9 +12,9 @@
|
||||
class KillingErrorHandler : public Poco::ErrorHandler
|
||||
{
|
||||
public:
|
||||
void exception(const Poco::Exception &) { std::terminate(); }
|
||||
void exception(const std::exception &) { std::terminate(); }
|
||||
void exception() { std::terminate(); }
|
||||
void exception(const Poco::Exception &) override { std::terminate(); }
|
||||
void exception(const std::exception &) override { std::terminate(); }
|
||||
void exception() override { std::terminate(); }
|
||||
};
|
||||
|
||||
|
||||
@ -23,9 +23,9 @@ public:
|
||||
class ServerErrorHandler : public Poco::ErrorHandler
|
||||
{
|
||||
public:
|
||||
void exception(const Poco::Exception &) { logException(); }
|
||||
void exception(const std::exception &) { logException(); }
|
||||
void exception() { logException(); }
|
||||
void exception(const Poco::Exception &) override { logException(); }
|
||||
void exception(const std::exception &) override { logException(); }
|
||||
void exception() override { logException(); }
|
||||
|
||||
private:
|
||||
Logger * log = &Logger::get("ServerErrorHandler");
|
||||
|
@ -2,7 +2,17 @@
|
||||
#include <common/LineReader.h>
|
||||
|
||||
#if USE_REPLXX
|
||||
# include <replxx.hxx>
|
||||
#include <replxx.hxx>
|
||||
#else
|
||||
|
||||
/// We can detect if code is linked with one or another readline variants or open the library dynamically.
|
||||
#include <dlfcn.h>
|
||||
extern "C"
|
||||
{
|
||||
char * readline(const char *) __attribute__((__weak__));
|
||||
char * (*readline_ptr)(const char *) = readline;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
@ -146,10 +156,38 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
|
||||
return (errno != EAGAIN) ? ABORT : RESET_LINE;
|
||||
input = cinput;
|
||||
#else
|
||||
std::cout << prompt;
|
||||
std::getline(std::cin, input);
|
||||
if (!std::cin.good())
|
||||
return ABORT;
|
||||
|
||||
if (!readline_ptr)
|
||||
{
|
||||
for (auto name : {"libreadline.so", "libreadline.so.0", "libeditline.so", "libeditline.so.0"})
|
||||
{
|
||||
void * dl_handle = dlopen(name, RTLD_LAZY);
|
||||
if (dl_handle)
|
||||
{
|
||||
readline_ptr = reinterpret_cast<char * (*)(const char *)>(dlsym(dl_handle, "readline"));
|
||||
if (readline_ptr)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal support for readline
|
||||
if (readline_ptr)
|
||||
{
|
||||
char * line_read = (*readline_ptr)(prompt.c_str());
|
||||
if (!line_read)
|
||||
return ABORT;
|
||||
input = line_read;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << prompt;
|
||||
std::getline(std::cin, input);
|
||||
if (!std::cin.good())
|
||||
return ABORT;
|
||||
}
|
||||
#endif
|
||||
|
||||
trim(input);
|
||||
|
@ -1,49 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <mysqlxx/Types.h>
|
||||
#include <Poco/Exception.h>
|
||||
#include <mysqlxx/Types.h>
|
||||
|
||||
|
||||
namespace mysqlxx
|
||||
{
|
||||
|
||||
/** Общий класс исключений, которые могут быть выкинуты функциями из библиотеки.
|
||||
* Функции code() и errnum() возвращают номер ошибки MySQL. (см. mysqld_error.h)
|
||||
*/
|
||||
/// Common exception class for MySQL library. Functions code() and errnum() return error numbers from MySQL, for details see mysqld_error.h
|
||||
struct Exception : public Poco::Exception
|
||||
{
|
||||
Exception(const std::string & msg, int code = 0) : Poco::Exception(msg, code) {}
|
||||
int errnum() const { return code(); }
|
||||
const char * name() const throw() { return "mysqlxx::Exception"; }
|
||||
const char * className() const throw() { return "mysqlxx::Exception"; }
|
||||
const char * name() const throw() override { return "mysqlxx::Exception"; }
|
||||
const char * className() const throw() override { return "mysqlxx::Exception"; }
|
||||
};
|
||||
|
||||
|
||||
/// Не удалось соединиться с сервером.
|
||||
/// Cannot connect to MySQL server
|
||||
struct ConnectionFailed : public Exception
|
||||
{
|
||||
ConnectionFailed(const std::string & msg, int code = 0) : Exception(msg, code) {}
|
||||
const char * name() const throw() { return "mysqlxx::ConnectionFailed"; }
|
||||
const char * className() const throw() { return "mysqlxx::ConnectionFailed"; }
|
||||
const char * name() const throw() override { return "mysqlxx::ConnectionFailed"; }
|
||||
const char * className() const throw() override { return "mysqlxx::ConnectionFailed"; }
|
||||
};
|
||||
|
||||
|
||||
/// Запрос содержит ошибку.
|
||||
/// Erroneous query.
|
||||
struct BadQuery : public Exception
|
||||
{
|
||||
BadQuery(const std::string & msg, int code = 0) : Exception(msg, code) {}
|
||||
const char * name() const throw() { return "mysqlxx::BadQuery"; }
|
||||
const char * className() const throw() { return "mysqlxx::BadQuery"; }
|
||||
const char * name() const throw() override { return "mysqlxx::BadQuery"; }
|
||||
const char * className() const throw() override { return "mysqlxx::BadQuery"; }
|
||||
};
|
||||
|
||||
|
||||
/// Невозможно распарсить значение.
|
||||
/// Value parsing failure
|
||||
struct CannotParseValue : public Exception
|
||||
{
|
||||
CannotParseValue(const std::string & msg, int code = 0) : Exception(msg, code) {}
|
||||
const char * name() const throw() { return "mysqlxx::CannotParseValue"; }
|
||||
const char * className() const throw() { return "mysqlxx::CannotParseValue"; }
|
||||
const char * name() const throw() override { return "mysqlxx::CannotParseValue"; }
|
||||
const char * className() const throw() override { return "mysqlxx::CannotParseValue"; }
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user