mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge branch 'master' into add-separate-access-for-use-named-collections
This commit is contained in:
commit
e97e107bcc
@ -47,11 +47,13 @@ ENV TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
# Unpin the docker version after the release 24.0.3 is released
|
||||
# https://github.com/moby/moby/issues/45770#issuecomment-1618255130
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -c -s) ${DOCKER_CHANNEL}" \
|
||||
&& apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
docker-ce \
|
||||
docker-ce='5:23.*' \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
|
@ -76,7 +76,8 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
|
@ -54,7 +54,7 @@ $ sudo mysql
|
||||
|
||||
``` sql
|
||||
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
|
||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
|
||||
```
|
||||
|
||||
Then configure the connection in `/etc/odbc.ini`.
|
||||
@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
|
||||
SERVER = 127.0.0.1
|
||||
PORT = 3306
|
||||
DATABASE = test
|
||||
USERNAME = clickhouse
|
||||
USER = clickhouse
|
||||
PASSWORD = clickhouse
|
||||
```
|
||||
|
||||
@ -83,6 +83,9 @@ $ isql -v mysqlconn
|
||||
Table in MySQL:
|
||||
|
||||
``` text
|
||||
mysql> CREATE DATABASE test;
|
||||
Query OK, 1 row affected (0,01 sec)
|
||||
|
||||
mysql> CREATE TABLE `test`.`test` (
|
||||
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
||||
-> `int_nullable` INT NULL DEFAULT NULL,
|
||||
@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
|
||||
-> PRIMARY KEY (`int_id`));
|
||||
Query OK, 0 rows affected (0,09 sec)
|
||||
|
||||
mysql> insert into test (`int_id`, `float`) VALUES (1,2);
|
||||
mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
|
||||
Query OK, 1 row affected (0,00 sec)
|
||||
|
||||
mysql> select * from test;
|
||||
mysql> select * from test.test;
|
||||
+------+----------+-----+----------+
|
||||
| int_id | int_nullable | float | float_nullable |
|
||||
+------+----------+-----+----------+
|
||||
|
@ -192,7 +192,7 @@ SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook
|
||||
**返回值**
|
||||
|
||||
- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。
|
||||
- 如果`x`为`NULL`,返回对应非`Nullable`类型的默认值。
|
||||
- 如果`x`为`NULL`,则返回任意值。
|
||||
|
||||
**示例**
|
||||
|
||||
|
@ -8,7 +8,9 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
#include <Poco/SimpleFileChannel.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -50,6 +52,8 @@
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if defined(FUZZING_MODE)
|
||||
#include <Functions/getFuzzerData.h>
|
||||
#endif
|
||||
@ -170,6 +174,13 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
void LocalServer::tryInitPath()
|
||||
@ -669,7 +680,7 @@ void LocalServer::processConfig()
|
||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||
*/
|
||||
std::string default_database = config().getString("default_database", "_local");
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, global_context));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions(global_context);
|
||||
|
||||
|
@ -12,7 +12,8 @@
|
||||
--chart-background: white;
|
||||
--shadow-color: rgba(0, 0, 0, 0.25);
|
||||
--input-shadow-color: rgba(0, 255, 0, 1);
|
||||
--error-color: white;
|
||||
--error-color: red;
|
||||
--auth-error-color: white;
|
||||
--legend-background: rgba(255, 255, 255, 0.75);
|
||||
--title-color: #666;
|
||||
--text-color: black;
|
||||
@ -258,7 +259,7 @@
|
||||
width: 60%;
|
||||
padding: .5rem;
|
||||
|
||||
color: var(--error-color);
|
||||
color: var(--auth-error-color);
|
||||
|
||||
display: flex;
|
||||
flex-flow: row nowrap;
|
||||
@ -906,9 +907,9 @@ async function draw(idx, chart, url_params, query) {
|
||||
|
||||
if (error) {
|
||||
const errorMatch = errorMessages.find(({ regex }) => error.match(regex))
|
||||
if (errorMatch) {
|
||||
const match = error.match(errorMatch.regex)
|
||||
const message = errorMatch.messageFunc(match)
|
||||
const match = error.match(errorMatch.regex)
|
||||
const message = errorMatch.messageFunc(match)
|
||||
if (message) {
|
||||
const authError = new Error(message)
|
||||
throw authError
|
||||
}
|
||||
@ -930,7 +931,7 @@ async function draw(idx, chart, url_params, query) {
|
||||
let title_div = chart.querySelector('.title');
|
||||
if (error) {
|
||||
error_div.firstChild.data = error;
|
||||
title_div.style.display = 'none';
|
||||
title_div.style.display = 'none';
|
||||
error_div.style.display = 'block';
|
||||
return false;
|
||||
} else {
|
||||
@ -1019,13 +1020,15 @@ async function drawAll() {
|
||||
firstLoad = false;
|
||||
} else {
|
||||
enableReloadButton();
|
||||
enableRunButton();
|
||||
}
|
||||
if (!results.includes(false)) {
|
||||
if (results.includes(true)) {
|
||||
const element = document.querySelector('.inputs');
|
||||
element.classList.remove('unconnected');
|
||||
const add = document.querySelector('#add');
|
||||
add.style.display = 'block';
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
const charts = document.querySelector('#charts')
|
||||
charts.style.height = '0px';
|
||||
}
|
||||
@ -1050,6 +1053,13 @@ function disableReloadButton() {
|
||||
reloadButton.classList.add('disabled')
|
||||
}
|
||||
|
||||
function disableRunButton() {
|
||||
const runButton = document.getElementById('run')
|
||||
runButton.value = 'Reloading...'
|
||||
runButton.disabled = true
|
||||
runButton.classList.add('disabled')
|
||||
}
|
||||
|
||||
function enableReloadButton() {
|
||||
const reloadButton = document.getElementById('reload')
|
||||
reloadButton.value = 'Reload'
|
||||
@ -1057,11 +1067,19 @@ function enableReloadButton() {
|
||||
reloadButton.classList.remove('disabled')
|
||||
}
|
||||
|
||||
function enableRunButton() {
|
||||
const runButton = document.getElementById('run')
|
||||
runButton.value = 'Ok'
|
||||
runButton.disabled = false
|
||||
runButton.classList.remove('disabled')
|
||||
}
|
||||
|
||||
function reloadAll() {
|
||||
updateParams();
|
||||
drawAll();
|
||||
saveState();
|
||||
disableReloadButton()
|
||||
disableReloadButton();
|
||||
disableRunButton();
|
||||
}
|
||||
|
||||
document.getElementById('params').onsubmit = function(event) {
|
||||
|
@ -2297,7 +2297,9 @@ void ClientBase::runInteractive()
|
||||
catch (const ErrnoException & e)
|
||||
{
|
||||
if (e.getErrno() != EEXIST)
|
||||
throw;
|
||||
{
|
||||
std::cerr << getCurrentExceptionMessage(false) << '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,15 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
||||
quota_key = config.getString("quota_key", "");
|
||||
|
||||
/// By default compression is disabled if address looks like localhost.
|
||||
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||
|
||||
/// Avoid DNS request if the host is "localhost".
|
||||
/// If ClickHouse is run under QEMU-user with a binary for a different architecture,
|
||||
/// and there are all listed startup dependency shared libraries available, but not the runtime dependencies of glibc,
|
||||
/// the glibc cannot open "plugins" for DNS resolving, and the DNS resolution does not work.
|
||||
/// At the same time, I want clickhouse-local to always work, regardless.
|
||||
/// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
|
||||
|
||||
compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
||||
|
||||
timeouts = ConnectionTimeouts(
|
||||
|
@ -101,9 +101,8 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti
|
||||
add_column("name", "columns", true, suggestion_limit);
|
||||
}
|
||||
|
||||
/// FIXME: Forbid this query using new analyzer because of bug https://github.com/ClickHouse/ClickHouse/issues/50669
|
||||
/// We should remove this restriction after resolving this bug.
|
||||
query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res) SETTINGS allow_experimental_analyzer=0";
|
||||
/// FIXME: This query does not work with the new analyzer because of bug https://github.com/ClickHouse/ClickHouse/issues/50669
|
||||
query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res)";
|
||||
return query;
|
||||
}
|
||||
|
||||
|
@ -540,7 +540,7 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
||||
}
|
||||
}
|
||||
|
||||
return re2->Match(StringPieceType(subject, subject_size), 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
||||
return re2->Match({subject, subject_size}, 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,9 +585,9 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
||||
return false;
|
||||
}
|
||||
|
||||
StringPieceType piece;
|
||||
std::string_view piece;
|
||||
|
||||
if (!RegexType::PartialMatch(StringPieceType(subject, subject_size), *re2, &piece))
|
||||
if (!RegexType::PartialMatch({subject, subject_size}, *re2, &piece))
|
||||
return false;
|
||||
else
|
||||
{
|
||||
@ -652,10 +652,10 @@ unsigned OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject
|
||||
return 0;
|
||||
}
|
||||
|
||||
DB::PODArrayWithStackMemory<StringPieceType, 128> pieces(limit);
|
||||
DB::PODArrayWithStackMemory<std::string_view, 128> pieces(limit);
|
||||
|
||||
if (!re2->Match(
|
||||
StringPieceType(subject, subject_size),
|
||||
{subject, subject_size},
|
||||
0,
|
||||
subject_size,
|
||||
RegexType::UNANCHORED,
|
||||
|
@ -52,7 +52,6 @@ public:
|
||||
using MatchVec = std::vector<Match>;
|
||||
|
||||
using RegexType = std::conditional_t<thread_safe, re2::RE2, re2_st::RE2>;
|
||||
using StringPieceType = std::conditional_t<thread_safe, re2::StringPiece, re2_st::StringPiece>;
|
||||
|
||||
OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT
|
||||
/// StringSearcher store pointers to required_substring, it must be updated on move.
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <atomic>
|
||||
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
@ -44,7 +43,7 @@ private:
|
||||
const std::string regexp_string;
|
||||
|
||||
const RE2 regexp;
|
||||
const re2::StringPiece replacement;
|
||||
const std::string_view replacement;
|
||||
|
||||
#ifndef NDEBUG
|
||||
mutable std::atomic<std::uint64_t> matches_count = 0;
|
||||
|
@ -27,7 +27,7 @@ static thread_local size_t max_stack_size = 0;
|
||||
* @param out_address - if not nullptr, here the address of the stack will be written.
|
||||
* @return stack size
|
||||
*/
|
||||
size_t getStackSize(void ** out_address)
|
||||
static size_t getStackSize(void ** out_address)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
@ -54,7 +54,15 @@ size_t getStackSize(void ** out_address)
|
||||
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
# else
|
||||
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
{
|
||||
if (errno == ENOENT)
|
||||
{
|
||||
/// Most likely procfs is not mounted.
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
}
|
||||
# endif
|
||||
|
||||
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
||||
@ -83,6 +91,10 @@ __attribute__((__weak__)) void checkStackSize()
|
||||
if (!stack_address)
|
||||
max_stack_size = getStackSize(&stack_address);
|
||||
|
||||
/// The check is impossible.
|
||||
if (!max_stack_size)
|
||||
return;
|
||||
|
||||
const void * frame_address = __builtin_frame_address(0);
|
||||
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
||||
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
@ -33,14 +32,14 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
|
||||
std::string escaped_with_globs = buf_for_escaping.str();
|
||||
|
||||
static const re2::RE2 enum_or_range(R"({([\d]+\.\.[\d]+|[^{}*,]+,[^{}*]*[^{}*,])})"); /// regexp for {expr1,expr2,expr3} or {M..N}, where M and N - non-negative integers, expr's should be without "{", "}", "*" and ","
|
||||
re2::StringPiece input(escaped_with_globs);
|
||||
re2::StringPiece matched;
|
||||
std::string_view input(escaped_with_globs);
|
||||
std::string_view matched;
|
||||
std::ostringstream oss_for_replacing; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
oss_for_replacing.exceptions(std::ios::failbit);
|
||||
size_t current_index = 0;
|
||||
while (RE2::FindAndConsume(&input, enum_or_range, &matched))
|
||||
{
|
||||
std::string buffer{matched};
|
||||
std::string buffer(matched);
|
||||
oss_for_replacing << escaped_with_globs.substr(current_index, matched.data() - escaped_with_globs.data() - current_index - 1) << '(';
|
||||
|
||||
if (buffer.find(',') == std::string::npos)
|
||||
|
@ -42,7 +42,6 @@ private:
|
||||
UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const override;
|
||||
|
||||
mutable LZ4::PerformanceStatistics lz4_stat;
|
||||
ASTPtr codec_desc;
|
||||
};
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <filesystem>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabaseDictionary.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseLazy.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseOrdinary.h>
|
||||
@ -47,6 +48,14 @@
|
||||
#include <Databases/SQLite/DatabaseSQLite.h>
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Databases/DatabaseS3.h>
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
@ -131,13 +140,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
static const std::unordered_set<std::string_view> database_engines{"Ordinary", "Atomic", "Memory",
|
||||
"Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
|
||||
if (!database_engines.contains(engine_name))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name);
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
|
||||
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
|
||||
@ -432,6 +441,63 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
}
|
||||
#endif
|
||||
|
||||
else if (engine_name == "Filesystem")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If init_path is empty, then the current path will be used
|
||||
std::string init_path;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
init_path = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseFilesystem>(database_name, init_path, context);
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
else if (engine_name == "S3")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
DatabaseS3::Configuration config;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
ASTs & engine_args = engine->arguments->children;
|
||||
config = DatabaseS3::parseArguments(engine_args, context);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseS3>(database_name, config, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
else if (engine_name == "HDFS")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If source_url is empty, then table name must contain full url
|
||||
std::string source_url;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
source_url = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseHDFS>(database_name, source_url, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name);
|
||||
}
|
||||
|
||||
|
245
src/Databases/DatabaseFilesystem.cpp
Normal file
245
src/Databases/DatabaseFilesystem.cpp
Normal file
@ -0,0 +1,245 @@
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int PATH_ACCESS_DENIED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
}
|
||||
|
||||
DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")"))
|
||||
{
|
||||
bool is_local = context_->getApplicationType() == Context::ApplicationType::LOCAL;
|
||||
fs::path user_files_path = is_local ? "" : fs::canonical(getContext()->getUserFilesPath());
|
||||
|
||||
if (fs::path(path).is_relative())
|
||||
{
|
||||
path = user_files_path / path;
|
||||
}
|
||||
else if (!is_local && !pathStartsWith(fs::path(path), user_files_path))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Path must be inside user-files path: {}", user_files_path.string());
|
||||
}
|
||||
|
||||
path = fs::absolute(path).lexically_normal();
|
||||
if (!fs::exists(path))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path does not exist: {}", path);
|
||||
}
|
||||
|
||||
std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
fs::path table_path = fs::path(path) / table_name;
|
||||
return table_path.lexically_normal().string();
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
/// If run in Local mode, no need for path checking.
|
||||
bool check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL;
|
||||
const auto & user_files_path = context_->getUserFilesPath();
|
||||
|
||||
/// Check access for file before checking its existence.
|
||||
if (check_path && !fileOrSymlinkPathStartsWith(table_path, user_files_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File is not inside {}", user_files_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if the corresponding file exists.
|
||||
if (!fs::exists(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!fs::is_regular_file(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST,
|
||||
"File is directory, but expected a file: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTableFromCache(const std::string & name) const
|
||||
{
|
||||
StoragePtr table = nullptr;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
table = it->second;
|
||||
}
|
||||
|
||||
/// Invalidate cache if file no longer exists.
|
||||
if (table && !fs::exists(getTablePath(name)))
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.erase(name);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
if (tryGetTableFromCache(name))
|
||||
return true;
|
||||
|
||||
return checkTableFilePath(getTablePath(name), context_, /* throw_on_error */false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if table exists in loaded tables map.
|
||||
if (auto table = tryGetTableFromCache(name))
|
||||
return table;
|
||||
|
||||
auto table_path = getTablePath(name);
|
||||
checkTableFilePath(table_path, context_, /* throw_on_error */true);
|
||||
|
||||
/// If the file exists, create a new table using TableFunctionFile and return it.
|
||||
auto args = makeASTFunction("file", std::make_shared<ASTLiteral>(table_path));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionFile throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// getTableImpl can throw exceptions, do not catch them to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionFile, which indicate that there is no table
|
||||
/// see tests/02722_database_filesystem.sh for more details.
|
||||
if (e.code() == ErrorCodes::FILE_DOESNT_EXIST)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path);
|
||||
|
||||
ParserCreateQuery parser;
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
67
src/Databases/DatabaseFilesystem.h
Normal file
67
src/Databases/DatabaseFilesystem.h
Normal file
@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseFilesystem allows to interact with files stored on the local filesystem.
|
||||
* Uses TableFunctionFile to implicitly load file when a user requests the table,
|
||||
* and provides a read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access
|
||||
*
|
||||
* Used in clickhouse-local to access local files.
|
||||
* For clickhouse-server requires allows to access file only from user_files directory.
|
||||
*/
|
||||
class DatabaseFilesystem : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseFilesystem(const String & name, const String & path, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "Filesystem"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
StoragePtr tryGetTableFromCache(const std::string & name) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
private:
|
||||
String path;
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
234
src/Databases/DatabaseHDFS.cpp
Normal file
234
src/Databases/DatabaseHDFS.cpp
Normal file
@ -0,0 +1,234 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/HDFS/HDFSCommon.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <Poco/URI.h>
|
||||
#include <re2/re2.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int ACCESS_DENIED;
|
||||
extern const int DATABASE_ACCESS_DENIED;
|
||||
extern const int HDFS_ERROR;
|
||||
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
|
||||
}
|
||||
|
||||
static constexpr std::string_view HDFS_HOST_REGEXP = "^hdfs://[^/]*";
|
||||
|
||||
|
||||
DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, source(source_url)
|
||||
, log(&Poco::Logger::get("DatabaseHDFS(" + name_ + ")"))
|
||||
{
|
||||
if (!source.empty())
|
||||
{
|
||||
if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP)))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>'", source);
|
||||
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source));
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseHDFS::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseHDFS::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
if (table_name.starts_with("hdfs://"))
|
||||
return table_name;
|
||||
|
||||
if (source.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>/path'", table_name);
|
||||
|
||||
return fs::path(source) / table_name;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
checkHDFSURL(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(url));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(name, context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getTablePath(name);
|
||||
|
||||
checkUrl(url, context_, true);
|
||||
|
||||
auto args = makeASTFunction("hdfs", std::make_shared<ASTLiteral>(url));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionHDFS throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionHDFS to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
// Ignore exceptions thrown by TableFunctionHDFS, which indicate that there is no table
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::DATABASE_ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL
|
||||
|| e.code() == ErrorCodes::HDFS_ERROR
|
||||
|| e.code() == ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseHDFS::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseHDFS::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
} // DB
|
||||
|
||||
#endif
|
68
src/Databases/DatabaseHDFS.h
Normal file
68
src/Databases/DatabaseHDFS.h
Normal file
@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseHDFS allows to interact with files stored on the file system.
|
||||
* Uses TableFunctionHDFS to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseHDFS : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseHDFS(const String & name, const String & source_url, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
private:
|
||||
const String source;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
312
src/Databases/DatabaseS3.cpp
Normal file
312
src/Databases/DatabaseS3.cpp
Normal file
@ -0,0 +1,312 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <Databases/DatabaseS3.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <IO/S3/URI.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/NamedCollectionsHelpers.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static const std::unordered_set<std::string_view> optional_configuration_keys = {
|
||||
"url",
|
||||
"access_key_id",
|
||||
"secret_access_key",
|
||||
"no_sign_request"
|
||||
};
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int S3_ERROR;
|
||||
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
DatabaseS3::DatabaseS3(const String & name_, const Configuration& config_, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, config(config_)
|
||||
, log(&Poco::Logger::get("DatabaseS3(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseS3::getFullUrl(const std::string & name) const
|
||||
{
|
||||
if (!config.url_prefix.empty())
|
||||
return fs::path(config.url_prefix) / name;
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
S3::URI uri(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri.uri);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(getFullUrl(name), context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getFullUrl(name);
|
||||
checkUrl(url, context_, /* throw_on_error */true);
|
||||
|
||||
auto function = std::make_shared<ASTFunction>();
|
||||
function->name = "s3";
|
||||
function->arguments = std::make_shared<ASTExpressionList>();
|
||||
function->children.push_back(function->arguments);
|
||||
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(url));
|
||||
if (config.no_sign_request)
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>("NOSIGN"));
|
||||
}
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.access_key_id.value()));
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.secret_access_key.value()));
|
||||
}
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(function, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionS3 throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(function, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionS3 to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionS3, which indicate that there is no table.
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::S3_ERROR
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseS3::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseS3::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
std::string creation_args;
|
||||
creation_args += fmt::format("'{}'", config.url_prefix);
|
||||
if (config.no_sign_request)
|
||||
creation_args += ", 'NOSIGN'";
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value());
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseS3::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPtr context_)
|
||||
{
|
||||
Configuration result;
|
||||
|
||||
if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context_))
|
||||
{
|
||||
auto & collection = *named_collection;
|
||||
|
||||
validateNamedCollection(collection, {}, optional_configuration_keys);
|
||||
|
||||
result.url_prefix = collection.getOrDefault<String>("url", "");
|
||||
result.no_sign_request = collection.getOrDefault<bool>("no_sign_request", false);
|
||||
|
||||
auto key_id = collection.getOrDefault<String>("access_key_id", "");
|
||||
auto secret_key = collection.getOrDefault<String>("secret_access_key", "");
|
||||
|
||||
if (!key_id.empty())
|
||||
result.access_key_id = key_id;
|
||||
|
||||
if (!secret_key.empty())
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
else
|
||||
{
|
||||
const std::string supported_signature =
|
||||
" - S3()\n"
|
||||
" - S3('url')\n"
|
||||
" - S3('url', 'NOSIGN')\n"
|
||||
" - S3('url', 'access_key_id', 'secret_access_key')\n";
|
||||
const auto error_message =
|
||||
fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature);
|
||||
|
||||
for (auto & arg : engine_args)
|
||||
arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_);
|
||||
|
||||
if (engine_args.size() > 3)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str());
|
||||
|
||||
if (engine_args.empty())
|
||||
return result;
|
||||
|
||||
result.url_prefix = checkAndGetLiteralArgument<String>(engine_args[0], "url");
|
||||
|
||||
// url, NOSIGN
|
||||
if (engine_args.size() == 2)
|
||||
{
|
||||
auto second_arg = checkAndGetLiteralArgument<String>(engine_args[1], "NOSIGN");
|
||||
if (boost::iequals(second_arg, "NOSIGN"))
|
||||
result.no_sign_request = true;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
}
|
||||
|
||||
// url, access_key_id, secret_access_key
|
||||
if (engine_args.size() == 3)
|
||||
{
|
||||
auto key_id = checkAndGetLiteralArgument<String>(engine_args[1], "access_key_id");
|
||||
auto secret_key = checkAndGetLiteralArgument<String>(engine_args[2], "secret_access_key");
|
||||
|
||||
if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
|
||||
result.access_key_id = key_id;
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseS3::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
81
src/Databases/DatabaseS3.h
Normal file
81
src/Databases/DatabaseS3.h
Normal file
@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseS3 provides access to data stored in S3.
|
||||
* Uses TableFunctionS3 to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseS3 : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
struct Configuration
|
||||
{
|
||||
std::string url_prefix;
|
||||
|
||||
bool no_sign_request = false;
|
||||
|
||||
std::optional<std::string> access_key_id;
|
||||
std::optional<std::string> secret_access_key;
|
||||
};
|
||||
|
||||
DatabaseS3(const String & name, const Configuration& config, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
// Contains only temporary tables
|
||||
bool shouldBeEmptyOnDetach() const override { return false; }
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
static Configuration parseArguments(ASTs engine_args, ContextPtr context);
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getFullUrl(const std::string & name) const;
|
||||
|
||||
private:
|
||||
const Configuration config;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
266
src/Databases/DatabasesOverlay.cpp
Normal file
266
src/Databases/DatabasesOverlay.cpp
Normal file
@ -0,0 +1,266 @@
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
}
|
||||
|
||||
DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get("DatabaseOverlay(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
DatabasesOverlay & DatabasesOverlay::registerNextDatabase(DatabasePtr database)
|
||||
{
|
||||
databases.push_back(std::move(database));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::isTableExist(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::tryGetTable(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTable(table_name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const ASTPtr & query)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly())
|
||||
{
|
||||
db->createTable(context_, table_name, table, query);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for CREATE TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
{
|
||||
db->dropTable(context_, table_name, sync);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DROP TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::attachTable(
|
||||
ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
try
|
||||
{
|
||||
db->attachTable(context_, table_name, table, relative_table_path);
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ATTACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name)
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return db->detachTable(context_, table_name);
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DETACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetCreateTableQuery(name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
if (!result && throw_on_error)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY,
|
||||
"There is no metadata of table `{}` in database `{}` (engine {})",
|
||||
name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* DatabaseOverlay cannot be constructed by "CREATE DATABASE" query, as it is not a traditional ClickHouse database
|
||||
* To use DatabaseOverlay, it must be constructed programmatically in code
|
||||
*/
|
||||
ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const
|
||||
{
|
||||
return std::make_shared<ASTCreateQuery>();
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const String & table_name) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(table_name);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(query);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
UUID result = UUIDHelpers::Nil;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTableUUID(table_name);
|
||||
if (result != UUIDHelpers::Nil)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::drop(ContextPtr context_)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->drop(context_);
|
||||
}
|
||||
|
||||
void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly() && db->isTableExist(table_id.table_name, local_context))
|
||||
{
|
||||
db->alterTable(local_context, table_id, metadata);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ALTER TABLE `{}` query in database `{}` (engine {})",
|
||||
table_id.table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>>
|
||||
DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const
|
||||
{
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
auto db_backup = db->getTablesForBackup(filter, local_context);
|
||||
result.insert(result.end(), std::make_move_iterator(db_backup.begin()), std::make_move_iterator(db_backup.end()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTableRestoredFromBackup(
|
||||
const ASTPtr & create_table_query,
|
||||
ContextMutablePtr local_context,
|
||||
std::shared_ptr<IRestoreCoordination> /*restore_coordination*/,
|
||||
UInt64 /*timeout_ms*/)
|
||||
{
|
||||
/// Creates a tables by executing a "CREATE TABLE" query.
|
||||
InterpreterCreateQuery interpreter{create_table_query, local_context};
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::empty() const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (!db->empty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::shutdown()
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->shutdown();
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context_, const FilterByNameFunction & filter_by_table_name) const
|
||||
{
|
||||
Tables tables;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
for (auto table_it = db->getTablesIterator(context_, filter_by_table_name); table_it->isValid(); table_it->next())
|
||||
tables.insert({table_it->name(), table_it->table()});
|
||||
}
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(std::move(tables), getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
66
src/Databases/DatabasesOverlay.h
Normal file
66
src/Databases/DatabasesOverlay.h
Normal file
@ -0,0 +1,66 @@
|
||||
#pragma once
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
* Implements the IDatabase interface and combines multiple other databases
|
||||
* Searches for tables in each database in order until found, and delegates operations to the appropriate database
|
||||
* Useful for combining databases
|
||||
*
|
||||
* Used in clickhouse-local to combine DatabaseFileSystem and DatabaseMemory
|
||||
*/
|
||||
class DatabasesOverlay : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabasesOverlay(const String & name_, ContextPtr context_);
|
||||
|
||||
/// Not thread-safe. Use only as factory to initialize database
|
||||
DatabasesOverlay & registerNextDatabase(DatabasePtr database);
|
||||
|
||||
String getEngineName() const override { return "Overlay"; }
|
||||
|
||||
public:
|
||||
bool isTableExist(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override;
|
||||
|
||||
void dropTable(ContextPtr context, const String & table_name, bool sync) override;
|
||||
|
||||
void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override;
|
||||
|
||||
StoragePtr detachTable(ContextPtr context, const String & table_name) override;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override;
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
String getTableDataPath(const String & table_name) const override;
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override;
|
||||
|
||||
UUID tryGetTableUUID(const String & table_name) const override;
|
||||
|
||||
void drop(ContextPtr context) override;
|
||||
|
||||
void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override;
|
||||
|
||||
void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr<IRestoreCoordination> restore_coordination, UInt64 timeout_ms) override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
protected:
|
||||
std::vector<DatabasePtr> databases;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -170,7 +170,7 @@ public:
|
||||
/// Get the table for work. Return nullptr if there is no table.
|
||||
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
virtual StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
|
||||
virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; }
|
||||
|
||||
@ -183,6 +183,8 @@ public:
|
||||
/// Is the database empty.
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
|
||||
/// Add the table to the database. Record its presence in the metadata.
|
||||
virtual void createTable(
|
||||
ContextPtr /*context*/,
|
||||
|
@ -30,8 +30,6 @@
|
||||
#include <Dictionaries/RegExpTreeDictionary.h>
|
||||
#include <Dictionaries/YAMLRegExpTreeDictionarySource.h>
|
||||
|
||||
#include <re2_st/stringpiece.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_VECTORSCAN
|
||||
@ -469,10 +467,9 @@ public:
|
||||
|
||||
std::pair<String, bool> processBackRefs(const String & data, const re2_st::RE2 & searcher, const std::vector<StringPiece> & pieces)
|
||||
{
|
||||
re2_st::StringPiece haystack(data.data(), data.size());
|
||||
re2_st::StringPiece matches[10];
|
||||
std::string_view matches[10];
|
||||
String result;
|
||||
searcher.Match(haystack, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
|
||||
searcher.Match({data.data(), data.size()}, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
|
||||
/// if the pattern is a single '$1' but fails to match, we would use the default value.
|
||||
if (pieces.size() == 1 && pieces[0].ref_num >= 0 && pieces[0].ref_num < 10 && matches[pieces[0].ref_num].empty())
|
||||
return std::make_pair(result, true);
|
||||
|
@ -99,8 +99,8 @@ struct ReplaceRegexpImpl
|
||||
int num_captures,
|
||||
const Instructions & instructions)
|
||||
{
|
||||
re2_st::StringPiece haystack(haystack_data, haystack_length);
|
||||
re2_st::StringPiece matches[max_captures];
|
||||
std::string_view haystack(haystack_data, haystack_length);
|
||||
std::string_view matches[max_captures];
|
||||
|
||||
size_t copy_pos = 0;
|
||||
size_t match_pos = 0;
|
||||
|
@ -45,8 +45,8 @@ bool isLargerThanFifty(std::string_view str)
|
||||
/// Check for sub-patterns of the form x{n} or x{n,} can be expensive. Ignore spaces before/after n and m.
|
||||
bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
|
||||
{
|
||||
re2_st::StringPiece haystack(regexp.data(), regexp.size());
|
||||
re2_st::StringPiece matches[2];
|
||||
std::string_view haystack(regexp.data(), regexp.size());
|
||||
std::string_view matches[2];
|
||||
size_t start_pos = 0;
|
||||
while (start_pos < haystack.size())
|
||||
{
|
||||
@ -67,8 +67,8 @@ bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
|
||||
/// Check if sub-patterns of the form x{n,m} can be expensive. Ignore spaces before/after n and m.
|
||||
bool SlowWithHyperscanChecker::isSlowTwoRepeats(std::string_view regexp)
|
||||
{
|
||||
re2_st::StringPiece haystack(regexp.data(), regexp.size());
|
||||
re2_st::StringPiece matches[3];
|
||||
std::string_view haystack(regexp.data(), regexp.size());
|
||||
std::string_view matches[3];
|
||||
size_t start_pos = 0;
|
||||
while (start_pos < haystack.size())
|
||||
{
|
||||
|
@ -94,7 +94,6 @@ public:
|
||||
if (needle.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Length of 'needle' argument must be greater than 0.");
|
||||
|
||||
using StringPiece = typename Regexps::Regexp::StringPieceType;
|
||||
const Regexps::Regexp holder = Regexps::createRegexp<false, false, false>(needle);
|
||||
const auto & regexp = holder.getRE2();
|
||||
|
||||
@ -111,7 +110,7 @@ public:
|
||||
groups_count, std::to_string(MAX_GROUPS_COUNT - 1));
|
||||
|
||||
// Including 0-group, which is the whole regexp.
|
||||
PODArrayWithStackMemory<StringPiece, MAX_GROUPS_COUNT> matched_groups(groups_count + 1);
|
||||
PODArrayWithStackMemory<std::string_view, MAX_GROUPS_COUNT> matched_groups(groups_count + 1);
|
||||
|
||||
ColumnArray::ColumnOffsets::MutablePtr root_offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
ColumnArray::ColumnOffsets::MutablePtr nested_offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
@ -160,7 +159,7 @@ public:
|
||||
/// Additional limit to fail fast on supposedly incorrect usage.
|
||||
const auto max_matches_per_row = context->getSettingsRef().regexp_max_matches_per_row;
|
||||
|
||||
PODArray<StringPiece, 0> all_matches;
|
||||
PODArray<std::string_view, 0> all_matches;
|
||||
/// Number of times RE matched on each row of haystack column.
|
||||
PODArray<size_t, 0> number_of_matches_per_row;
|
||||
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There are no groups in regexp: {}", needle);
|
||||
|
||||
// Including 0-group, which is the whole regexp.
|
||||
PODArrayWithStackMemory<re2_st::StringPiece, 128> matched_groups(groups_count + 1);
|
||||
PODArrayWithStackMemory<std::string_view, 128> matched_groups(groups_count + 1);
|
||||
|
||||
ColumnArray::ColumnOffsets::MutablePtr offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
ColumnString::MutablePtr data_col = ColumnString::create();
|
||||
@ -89,7 +89,7 @@ public:
|
||||
{
|
||||
std::string_view current_row = column_haystack->getDataAt(i).toView();
|
||||
|
||||
if (re2->Match(re2_st::StringPiece(current_row.data(), current_row.size()),
|
||||
if (re2->Match({current_row.data(), current_row.size()},
|
||||
0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(),
|
||||
static_cast<int>(matched_groups.size())))
|
||||
{
|
||||
|
@ -356,7 +356,8 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
|
||||
auto table = database->tryGetTable(table_id.table_name, context_);
|
||||
if (!table && exception)
|
||||
exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs()));
|
||||
exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs()));
|
||||
|
||||
if (!table)
|
||||
database = nullptr;
|
||||
|
||||
|
@ -11,16 +11,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
///
|
||||
/// -------- Column --------- Type ------
|
||||
/// | event_date | DateTime |
|
||||
/// | event_time | UInt64 |
|
||||
/// | query_id | String |
|
||||
/// | remote_file_path | String |
|
||||
/// | segment_range | Tuple |
|
||||
/// | read_type | String |
|
||||
/// -------------------------------------
|
||||
///
|
||||
|
||||
struct FilesystemCacheLogElement
|
||||
{
|
||||
enum class CacheType
|
||||
|
@ -370,15 +370,15 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
else
|
||||
{
|
||||
auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache;
|
||||
if (query.delete_key.empty())
|
||||
if (query.key_to_drop.empty())
|
||||
{
|
||||
cache->removeAllReleasable();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto key = FileCacheKey::fromKeyString(query.delete_key);
|
||||
if (query.delete_offset.has_value())
|
||||
cache->removeFileSegment(key, query.delete_offset.value());
|
||||
auto key = FileCacheKey::fromKeyString(query.key_to_drop);
|
||||
if (query.offset_to_drop.has_value())
|
||||
cache->removeFileSegment(key, query.offset_to_drop.value());
|
||||
else
|
||||
cache->removeKey(key);
|
||||
}
|
||||
|
@ -212,11 +212,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
if (!filesystem_cache_name.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name;
|
||||
if (!delete_key.empty())
|
||||
if (!key_to_drop.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key;
|
||||
if (delete_offset.has_value())
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value();
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << key_to_drop;
|
||||
if (offset_to_drop.has_value())
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << offset_to_drop.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -107,8 +107,8 @@ public:
|
||||
UInt64 seconds{};
|
||||
|
||||
String filesystem_cache_name;
|
||||
std::string delete_key;
|
||||
std::optional<size_t> delete_offset;
|
||||
std::string key_to_drop;
|
||||
std::optional<size_t> offset_to_drop;
|
||||
|
||||
String backup_name;
|
||||
|
||||
|
@ -409,9 +409,9 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
|
||||
res->filesystem_cache_name = ast->as<ASTLiteral>()->value.safeGet<String>();
|
||||
if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected))
|
||||
{
|
||||
res->delete_key = ast->as<ASTIdentifier>()->name();
|
||||
res->key_to_drop = ast->as<ASTIdentifier>()->name();
|
||||
if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected))
|
||||
res->delete_offset = ast->as<ASTLiteral>()->value.safeGet<UInt64>();
|
||||
res->offset_to_drop = ast->as<ASTLiteral>()->value.safeGet<UInt64>();
|
||||
}
|
||||
}
|
||||
if (!parseQueryWithOnCluster(res, pos, expected))
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <re2_st/re2.h>
|
||||
#include <re2_st/stringpiece.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <Core/Block.h>
|
||||
@ -28,14 +27,14 @@ public:
|
||||
/// Return true if row was successfully parsed and row fields were extracted.
|
||||
bool parseRow(PeekableReadBuffer & buf);
|
||||
|
||||
re2_st::StringPiece getField(size_t index) { return matched_fields[index]; }
|
||||
std::string_view getField(size_t index) { return matched_fields[index]; }
|
||||
size_t getMatchedFieldsSize() const { return matched_fields.size(); }
|
||||
size_t getNumberOfGroups() const { return regexp.NumberOfCapturingGroups(); }
|
||||
|
||||
private:
|
||||
const re2_st::RE2 regexp;
|
||||
// The vector of fields extracted from line using regexp.
|
||||
std::vector<re2_st::StringPiece> matched_fields;
|
||||
std::vector<std::string_view> matched_fields;
|
||||
// These two vectors are needed to use RE2::FullMatchN (function for extracting fields).
|
||||
std::vector<re2_st::RE2::Arg> re2_arguments;
|
||||
std::vector<re2_st::RE2::Arg *> re2_arguments_ptrs;
|
||||
|
@ -44,6 +44,8 @@
|
||||
#include <Poco/String.h>
|
||||
#include <Poco/Net/SocketAddress.h>
|
||||
|
||||
#include <re2/re2.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <sstream>
|
||||
|
||||
@ -1163,8 +1165,8 @@ void PredefinedQueryHandler::customizeContext(HTTPServerRequest & request, Conte
|
||||
{
|
||||
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
|
||||
|
||||
re2::StringPiece matches[num_captures];
|
||||
re2::StringPiece input(begin, end - begin);
|
||||
std::string_view matches[num_captures];
|
||||
std::string_view input(begin, end - begin);
|
||||
if (compiled_regex->Match(input, 0, end - begin, re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures))
|
||||
{
|
||||
for (const auto & [capturing_name, capturing_index] : compiled_regex->NamedCapturingGroups())
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
#include <Poco/StringTokenizer.h>
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
|
||||
@ -26,9 +25,8 @@ static inline bool checkRegexExpression(std::string_view match_str, const Compil
|
||||
{
|
||||
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
|
||||
|
||||
re2::StringPiece matches[num_captures];
|
||||
re2::StringPiece match_input(match_str.data(), match_str.size());
|
||||
return compiled_regex->Match(match_input, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
|
||||
std::string_view matches[num_captures];
|
||||
return compiled_regex->Match({match_str.data(), match_str.size()}, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
|
||||
}
|
||||
|
||||
static inline bool checkExpression(std::string_view match_str, const std::pair<String, CompiledRegexPtr> & expression)
|
||||
|
@ -7196,7 +7196,10 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
|
||||
if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState)
|
||||
{
|
||||
if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info))
|
||||
{
|
||||
query_info.parallel_replicas_disabled = true;
|
||||
return QueryProcessingStage::Enum::FetchColumns;
|
||||
}
|
||||
|
||||
/// ReplicatedMergeTree
|
||||
if (supportsReplication())
|
||||
|
@ -255,6 +255,8 @@ struct SelectQueryInfo
|
||||
Block minmax_count_projection_block;
|
||||
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
|
||||
|
||||
bool parallel_replicas_disabled = false;
|
||||
|
||||
bool is_parameterized_view = false;
|
||||
NameToNameMap parameterized_view_values;
|
||||
|
||||
|
@ -209,7 +209,9 @@ void StorageMergeTree::read(
|
||||
size_t max_block_size,
|
||||
size_t num_streams)
|
||||
{
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
if (!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnInitiator() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
|
||||
@ -240,7 +242,10 @@ void StorageMergeTree::read(
|
||||
}
|
||||
else
|
||||
{
|
||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
const bool enable_parallel_reading =
|
||||
!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnFollower() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
|
||||
if (auto plan = reader.read(
|
||||
column_names, storage_snapshot, query_info,
|
||||
@ -929,44 +934,70 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge(
|
||||
|
||||
SelectPartsDecision select_decision = SelectPartsDecision::CANNOT_SELECT;
|
||||
|
||||
if (!canEnqueueBackgroundTask())
|
||||
auto is_background_memory_usage_ok = [](String * disable_reason) -> bool
|
||||
{
|
||||
if (out_disable_reason)
|
||||
*out_disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
|
||||
if (canEnqueueBackgroundTask())
|
||||
return true;
|
||||
if (disable_reason)
|
||||
*disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
|
||||
formatReadableSizeWithBinarySuffix(background_memory_tracker.get()),
|
||||
formatReadableSizeWithBinarySuffix(background_memory_tracker.getSoftLimit()));
|
||||
}
|
||||
else if (partition_id.empty())
|
||||
{
|
||||
UInt64 max_source_parts_size = merger_mutator.getMaxSourcePartsSizeForMerge();
|
||||
bool merge_with_ttl_allowed = getTotalMergesWithTTLInMergeList() < data_settings->max_number_of_merges_with_ttl_in_pool;
|
||||
return false;
|
||||
};
|
||||
|
||||
/// TTL requirements is much more strict than for regular merge, so
|
||||
/// if regular not possible, than merge with ttl is not also not
|
||||
/// possible.
|
||||
if (max_source_parts_size > 0)
|
||||
if (partition_id.empty())
|
||||
{
|
||||
if (is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
select_decision = merger_mutator.selectPartsToMerge(
|
||||
future_part,
|
||||
aggressive,
|
||||
max_source_parts_size,
|
||||
can_merge,
|
||||
merge_with_ttl_allowed,
|
||||
txn,
|
||||
out_disable_reason);
|
||||
UInt64 max_source_parts_size = merger_mutator.getMaxSourcePartsSizeForMerge();
|
||||
bool merge_with_ttl_allowed = getTotalMergesWithTTLInMergeList() < data_settings->max_number_of_merges_with_ttl_in_pool;
|
||||
|
||||
/// TTL requirements is much more strict than for regular merge, so
|
||||
/// if regular not possible, than merge with ttl is not also not
|
||||
/// possible.
|
||||
if (max_source_parts_size > 0)
|
||||
{
|
||||
select_decision = merger_mutator.selectPartsToMerge(
|
||||
future_part,
|
||||
aggressive,
|
||||
max_source_parts_size,
|
||||
can_merge,
|
||||
merge_with_ttl_allowed,
|
||||
txn,
|
||||
out_disable_reason);
|
||||
}
|
||||
else if (out_disable_reason)
|
||||
*out_disable_reason = "Current value of max_source_parts_size is zero";
|
||||
}
|
||||
else if (out_disable_reason)
|
||||
*out_disable_reason = "Current value of max_source_parts_size is zero";
|
||||
}
|
||||
else
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
|
||||
future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
|
||||
auto timeout_ms = getSettings()->lock_acquire_timeout_for_background_operations.totalMilliseconds();
|
||||
auto timeout = std::chrono::milliseconds(timeout_ms);
|
||||
|
||||
if (!is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
constexpr auto poll_interval = std::chrono::seconds(1);
|
||||
Int64 attempts = timeout / poll_interval;
|
||||
bool ok = false;
|
||||
for (Int64 i = 0; i < attempts; ++i)
|
||||
{
|
||||
std::this_thread::sleep_for(poll_interval);
|
||||
if (is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!ok)
|
||||
break;
|
||||
}
|
||||
|
||||
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
|
||||
future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
|
||||
|
||||
/// If final - we will wait for currently processing merges to finish and continue.
|
||||
if (final
|
||||
&& select_decision != SelectPartsDecision::SELECTED
|
||||
|
@ -36,6 +36,7 @@
|
||||
01455_shard_leaf_max_rows_bytes_to_read
|
||||
01495_subqueries_in_with_statement
|
||||
01504_rocksdb
|
||||
01526_client_start_and_exit
|
||||
01527_dist_sharding_key_dictGet_reload
|
||||
01528_allow_nondeterministic_optimize_skip_unused_shards
|
||||
01540_verbatim_partition_pruning
|
||||
@ -50,6 +51,7 @@
|
||||
01624_soft_constraints
|
||||
01651_bugs_from_15889
|
||||
01656_test_query_log_factories_info
|
||||
01676_clickhouse_client_autocomplete
|
||||
01681_bloom_filter_nullable_column
|
||||
01700_system_zookeeper_path_in
|
||||
01710_projection_additional_filters
|
||||
|
@ -243,7 +243,7 @@ function check_logs_for_critical_errors()
|
||||
# Remove file fatal_messages.txt if it's empty
|
||||
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
|
||||
|
||||
rg -Fa "########################################" /test_output/* > /dev/null \
|
||||
rg -Faz "########################################" /test_output/* > /dev/null \
|
||||
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
function get_gdb_log_context()
|
||||
|
@ -32,5 +32,10 @@
|
||||
<secret_access_key>testtest</secret_access_key>
|
||||
<structure>auto</structure>
|
||||
</s3_conn>
|
||||
<s3_conn_db>
|
||||
<url>http://localhost:11111/test/</url>
|
||||
<access_key_id>test</access_key_id>
|
||||
<secret_access_key>testtest</secret_access_key>
|
||||
</s3_conn_db>
|
||||
</named_collections>
|
||||
</clickhouse>
|
||||
|
@ -111,6 +111,23 @@ cat > /usr/local/hadoop/etc/hadoop/hdfs-site.xml << EOF
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:1006</value>
|
||||
</property>
|
||||
<!-- If the port is 0 then the server will start on a free port. -->
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.backup.address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.backup.http-address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<!--
|
||||
<property>
|
||||
<name>dfs.http.policy</name>
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
@ -12,12 +11,8 @@ $CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" | grep value
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" | grep value
|
||||
|
||||
#${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=readonly&session_timeout=3600" -d 'SET readonly = 1'
|
||||
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=readonly&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode.' && echo "OK" || echo "FAIL"
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=readonly&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=readonly&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
set -ue
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ue
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -3,8 +3,6 @@ set -ue
|
||||
|
||||
# this test doesn't need 'current_database = currentDatabase()',
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -5,8 +5,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -2,8 +2,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ue
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
@ -11,4 +9,4 @@ $CLICKHOUSE_CLIENT --query="SELECT * from format('TSV', '123')"
|
||||
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --query="SELECT * from numbers(1)"
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --query="SELECT * from format('TSV', '123')" 2>&1 | grep -Fq "Cannot execute query in readonly mode. (READONLY)" && echo 'ERROR' || echo 'OK'
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --query="INSERT INTO FUNCTION null('x String') (x) FORMAT TSV '123'" 2>&1 | grep -Fq "Cannot execute query in readonly mode. (READONLY)" && echo 'ERROR' || echo 'OK'
|
||||
$CLICKHOUSE_CLIENT --readonly=1 --query="INSERT INTO FUNCTION null('x String') (x) FORMAT TSV '123'" 2>&1 | grep -Fq "Cannot execute query in readonly mode. (READONLY)" && echo 'ERROR' || echo 'OK'
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -1,4 +1,6 @@
|
||||
-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan
|
||||
-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel
|
||||
|
||||
-- no-parallel because the sets use a lot of memory, which may interfere with other tests
|
||||
|
||||
DROP TABLE IF EXISTS 02581_trips;
|
||||
|
||||
|
@ -1,4 +1,6 @@
|
||||
-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan
|
||||
-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel
|
||||
|
||||
-- no-parallel because the sets use a lot of memory, which may interfere with other tests
|
||||
|
||||
DROP TABLE IF EXISTS 02581_trips;
|
||||
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
@ -0,0 +1,9 @@
|
||||
Test 1: check explicit and implicit call of the file table function
|
||||
explicit:
|
||||
4
|
||||
implicit:
|
||||
4
|
||||
Test 2: check Filesystem database
|
||||
4
|
||||
Test 3: check show database with Filesystem
|
||||
test02707
|
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
dir=${CLICKHOUSE_TEST_UNIQUE_NAME}
|
||||
[[ -d $dir ]] && rm -rd $dir
|
||||
mkdir $dir
|
||||
|
||||
# Create temporary csv file for tests
|
||||
echo '"id","str","int","text"' > $dir/tmp.csv
|
||||
echo '1,"abc",123,"abacaba"' >> $dir/tmp.csv
|
||||
echo '2,"def",456,"bacabaa"' >> $dir/tmp.csv
|
||||
echo '3,"story",78912,"acabaab"' >> $dir/tmp.csv
|
||||
echo '4,"history",21321321,"cabaaba"' >> $dir/tmp.csv
|
||||
|
||||
#################
|
||||
echo "Test 1: check explicit and implicit call of the file table function"
|
||||
|
||||
echo "explicit:"
|
||||
$CLICKHOUSE_LOCAL -q "SELECT COUNT(*) FROM file('${dir}/tmp.csv')"
|
||||
echo "implicit:"
|
||||
$CLICKHOUSE_LOCAL -q "SELECT COUNT(*) FROM \"${dir}/tmp.csv\""
|
||||
|
||||
#################
|
||||
echo "Test 2: check Filesystem database"
|
||||
$CLICKHOUSE_LOCAL --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test;
|
||||
CREATE DATABASE test ENGINE = Filesystem('${dir}');
|
||||
SELECT COUNT(*) FROM test.\`tmp.csv\`;
|
||||
DROP DATABASE test;
|
||||
"""
|
||||
|
||||
#################
|
||||
echo "Test 3: check show database with Filesystem"
|
||||
$CLICKHOUSE_LOCAL --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test02707;
|
||||
CREATE DATABASE test02707 ENGINE = Filesystem('${dir}');
|
||||
SHOW DATABASES;
|
||||
DROP DATABASE test02707;
|
||||
""" | grep "test02707"
|
||||
|
||||
# Remove temporary dir with files
|
||||
rm -rd $dir
|
@ -0,0 +1,15 @@
|
||||
Test 1: create filesystem database and check implicit calls
|
||||
0
|
||||
test1
|
||||
4
|
||||
4
|
||||
4
|
||||
Test 2: check DatabaseFilesystem access rights and errors handling on server
|
||||
OK
|
||||
OK
|
||||
OK
|
||||
OK
|
||||
OK
|
||||
OK
|
||||
OK
|
||||
OK
|
72
tests/queries/0_stateless/02722_database_filesystem.sh
Executable file
72
tests/queries/0_stateless/02722_database_filesystem.sh
Executable file
@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
# see 01658_read_file_to_stringcolumn.sh
|
||||
CLICKHOUSE_USER_FILES_PATH=$(clickhouse-client --query "select _path, _file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}')
|
||||
|
||||
# Prepare data
|
||||
unique_name=${CLICKHOUSE_TEST_UNIQUE_NAME}
|
||||
user_files_tmp_dir=${CLICKHOUSE_USER_FILES_PATH}/${unique_name}
|
||||
mkdir -p ${user_files_tmp_dir}/tmp/
|
||||
echo '"id","str","int","text"' > ${user_files_tmp_dir}/tmp.csv
|
||||
echo '1,"abc",123,"abacaba"' >> ${user_files_tmp_dir}/tmp.csv
|
||||
echo '2,"def",456,"bacabaa"' >> ${user_files_tmp_dir}/tmp.csv
|
||||
echo '3,"story",78912,"acabaab"' >> ${user_files_tmp_dir}/tmp.csv
|
||||
echo '4,"history",21321321,"cabaaba"' >> ${user_files_tmp_dir}/tmp.csv
|
||||
|
||||
tmp_dir=${CLICKHOUSE_TEST_UNIQUE_NAME}
|
||||
[[ -d $tmp_dir ]] && rm -rd $tmp_dir
|
||||
mkdir $tmp_dir
|
||||
cp ${user_files_tmp_dir}/tmp.csv ${tmp_dir}/tmp.csv
|
||||
cp ${user_files_tmp_dir}/tmp.csv ${user_files_tmp_dir}/tmp/tmp.csv
|
||||
cp ${user_files_tmp_dir}/tmp.csv ${user_files_tmp_dir}/tmp.myext
|
||||
|
||||
#################
|
||||
echo "Test 1: create filesystem database and check implicit calls"
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test1;
|
||||
CREATE DATABASE test1 ENGINE = Filesystem;
|
||||
"""
|
||||
echo $?
|
||||
${CLICKHOUSE_CLIENT} --query "SHOW DATABASES" | grep "test1"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp.csv\`;"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp/tmp.csv\`;"
|
||||
${CLICKHOUSE_LOCAL} -q "SELECT COUNT(*) FROM \"${tmp_dir}/tmp.csv\""
|
||||
|
||||
#################
|
||||
echo "Test 2: check DatabaseFilesystem access rights and errors handling on server"
|
||||
# DATABASE_ACCESS_DENIED: Allows list files only inside user_files
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../tmp.csv\`;" 2>&1| grep -F "Code: 481" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`/tmp/tmp.csv\`;" 2>&1| grep -F "Code: 481" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery --query """
|
||||
USE test1;
|
||||
SELECT COUNT(*) FROM \"../${tmp_dir}/tmp.csv\";
|
||||
""" 2>&1| grep -F "Code: 481" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`../../../../../../tmp.csv\`;" 2>&1| grep -F "Code: 481" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
|
||||
# BAD_ARGUMENTS: path should be inside user_files
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
CREATE DATABASE test2 ENGINE = Filesystem('/tmp');
|
||||
""" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
|
||||
# BAD_ARGUMENTS: .../user_files/relative_unknown_dir does not exists
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
CREATE DATABASE test2 ENGINE = Filesystem('relative_unknown_dir');
|
||||
""" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
|
||||
# FILE_DOESNT_EXIST: unknown file
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`tmp2.csv\`;" 2>&1| grep -F "Code: 60" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
|
||||
# BAD_ARGUMENTS: Cannot determine the file format by it's extension
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT COUNT(*) FROM test1.\`${unique_name}/tmp.myext\`;" 2>&1| grep -F "Code: 36" > /dev/null && echo "OK" || echo 'FAIL' ||:
|
||||
|
||||
# Clean
|
||||
${CLICKHOUSE_CLIENT} --query "DROP DATABASE test1;"
|
||||
rm -rd $tmp_dir
|
||||
rm -rd $user_files_tmp_dir
|
21
tests/queries/0_stateless/02724_database_s3.reference
Normal file
21
tests/queries/0_stateless/02724_database_s3.reference
Normal file
@ -0,0 +1,21 @@
|
||||
Test 1: select from s3
|
||||
1 2 3
|
||||
4 5 6
|
||||
7 8 9
|
||||
0 0 0
|
||||
test1
|
||||
10 11 12
|
||||
13 14 15
|
||||
16 17 18
|
||||
0 0 0
|
||||
10 11 12
|
||||
13 14 15
|
||||
16 17 18
|
||||
0 0 0
|
||||
10 11 12
|
||||
13 14 15
|
||||
16 17 18
|
||||
0 0 0
|
||||
Test 2: check exceptions
|
||||
OK
|
||||
OK
|
63
tests/queries/0_stateless/02724_database_s3.sh
Executable file
63
tests/queries/0_stateless/02724_database_s3.sh
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, no-parallel
|
||||
# Tag no-fasttest: Depends on AWS
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
#################
|
||||
echo "Test 1: select from s3"
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test1;
|
||||
CREATE DATABASE test1 ENGINE = S3;
|
||||
USE test1;
|
||||
SELECT * FROM \"http://localhost:11111/test/a.tsv\"
|
||||
"""
|
||||
${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1
|
||||
|
||||
# check credentials with absolute path
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
CREATE DATABASE test2 ENGINE = S3('', 'test', 'testtest');
|
||||
USE test2;
|
||||
SELECT * FROM \"http://localhost:11111/test/b.tsv\"
|
||||
"""
|
||||
|
||||
# check credentials with relative path
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test4;
|
||||
CREATE DATABASE test4 ENGINE = S3('http://localhost:11111/test', 'test', 'testtest');
|
||||
USE test4;
|
||||
SELECT * FROM \"b.tsv\"
|
||||
"""
|
||||
|
||||
# Check named collection loading
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test5;
|
||||
CREATE DATABASE test5 ENGINE = S3(s3_conn_db);
|
||||
SELECT * FROM test5.\`b.tsv\`
|
||||
"""
|
||||
|
||||
#################
|
||||
echo "Test 2: check exceptions"
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test3;
|
||||
CREATE DATABASE test3 ENGINE = S3;
|
||||
USE test3;
|
||||
SELECT * FROM \"http://localhost:11111/test/a.myext\"
|
||||
""" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
USE test3;
|
||||
SELECT * FROM \"abacaba\"
|
||||
""" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK"
|
||||
|
||||
# Cleanup
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test1;
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
DROP DATABASE IF EXISTS test3;
|
||||
DROP DATABASE IF EXISTS test4;
|
||||
DROP DATABASE IF EXISTS test5;
|
||||
"""
|
12
tests/queries/0_stateless/02725_database_hdfs.reference
Normal file
12
tests/queries/0_stateless/02725_database_hdfs.reference
Normal file
@ -0,0 +1,12 @@
|
||||
Test 1: select from hdfs database
|
||||
1 2 3
|
||||
test1
|
||||
1 2 3
|
||||
test2
|
||||
Test 2: check exceptions
|
||||
OK0
|
||||
OK1
|
||||
OK2
|
||||
OK3
|
||||
OK4
|
||||
OK5
|
60
tests/queries/0_stateless/02725_database_hdfs.sh
Executable file
60
tests/queries/0_stateless/02725_database_hdfs.sh
Executable file
@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest, use-hdfs, no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
# Prepare data
|
||||
${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_1.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 1, 2, 3 settings hdfs_truncate_on_insert=1;"
|
||||
${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_2.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 4, 5, 6 settings hdfs_truncate_on_insert=1;"
|
||||
|
||||
#################
|
||||
echo "Test 1: select from hdfs database"
|
||||
|
||||
# Database without specific host
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test1;
|
||||
CREATE DATABASE test1 ENGINE = HDFS;
|
||||
USE test1;
|
||||
SELECT * FROM \"hdfs://localhost:12222/test_02725_1.tsv\"
|
||||
"""
|
||||
${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test1
|
||||
|
||||
# Database with host
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
CREATE DATABASE test2 ENGINE = HDFS('hdfs://localhost:12222');
|
||||
USE test2;
|
||||
SELECT * FROM \"test_02725_1.tsv\"
|
||||
"""
|
||||
${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test2
|
||||
|
||||
#################
|
||||
echo "Test 2: check exceptions"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test3;
|
||||
CREATE DATABASE test3 ENGINE = HDFS('abacaba');
|
||||
""" 2>&1| grep -F "BAD_ARGUMENTS" > /dev/null && echo "OK0"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test4;
|
||||
CREATE DATABASE test4 ENGINE = HDFS;
|
||||
USE test4;
|
||||
SELECT * FROM \"abacaba/file.tsv\"
|
||||
""" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK1"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "SELECT * FROM test4.\`http://localhost:11111/test/a.tsv\`" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK2"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/file.myext\`" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK3"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222/test_02725_3.tsv\`" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK4"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test4.\`hdfs://localhost:12222\`" 2>&1| grep -F "UNKNOWN_TABLE" > /dev/null && echo "OK5"
|
||||
|
||||
|
||||
# Cleanup
|
||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q """
|
||||
DROP DATABASE IF EXISTS test1;
|
||||
DROP DATABASE IF EXISTS test2;
|
||||
DROP DATABASE IF EXISTS test3;
|
||||
DROP DATABASE IF EXISTS test4;
|
||||
"""
|
@ -0,0 +1,4 @@
|
||||
-- count() ------------------------------
|
||||
2
|
||||
-- count() with parallel replicas -------
|
||||
2
|
@ -0,0 +1,24 @@
|
||||
DROP TABLE IF EXISTS users;
|
||||
CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid;
|
||||
|
||||
INSERT INTO users VALUES (111, 'JFK', 33);
|
||||
INSERT INTO users VALUES (6666, 'KLM', 48);
|
||||
INSERT INTO users VALUES (88888, 'AMS', 50);
|
||||
|
||||
SELECT '-- count() ------------------------------';
|
||||
SELECT count() FROM users PREWHERE uid > 2000;
|
||||
|
||||
-- enable parallel replicas but with high granules threshold
|
||||
SET
|
||||
skip_unavailable_shards=1,
|
||||
allow_experimental_parallel_reading_from_replicas=1,
|
||||
max_parallel_replicas=3,
|
||||
use_hedged_requests=0,
|
||||
cluster_for_parallel_replicas='parallel_replicas',
|
||||
parallel_replicas_for_non_replicated_merge_tree=1,
|
||||
parallel_replicas_min_number_of_granules_to_enable=1000;
|
||||
|
||||
SELECT '-- count() with parallel replicas -------';
|
||||
SELECT count() FROM users PREWHERE uid > 2000;
|
||||
|
||||
DROP TABLE users;
|
@ -2,8 +2,6 @@
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
|
||||
unset CLICKHOUSE_LOG_COMMENT
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
Loading…
Reference in New Issue
Block a user