diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 48e8fbbba05..64c3d2f8342 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -167,10 +167,16 @@ jobs:
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
- name: Builds report
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
+ - name: Set status
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 3e898c69ade..6bf846d7535 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -184,10 +184,16 @@ jobs:
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
- name: Builds report
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug binary_darwin binary_darwin_aarch64
+ - name: Set status
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
MarkReleaseReady:
if: ${{ !failure() && !cancelled() }}
needs:
diff --git a/base/base/getFQDNOrHostName.cpp b/base/base/getFQDNOrHostName.cpp
index 2a4ba8e2e11..6b3da9699b9 100644
--- a/base/base/getFQDNOrHostName.cpp
+++ b/base/base/getFQDNOrHostName.cpp
@@ -6,6 +6,9 @@ namespace
{
std::string getFQDNOrHostNameImpl()
{
+#if defined(OS_DARWIN)
+ return Poco::Net::DNS::hostName();
+#else
try
{
return Poco::Net::DNS::thisHost().name();
@@ -14,6 +17,7 @@ namespace
{
return Poco::Net::DNS::hostName();
}
+#endif
}
}
diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt
index b633f0fda50..6c874221a94 100644
--- a/contrib/jemalloc-cmake/CMakeLists.txt
+++ b/contrib/jemalloc-cmake/CMakeLists.txt
@@ -34,9 +34,9 @@ if (OS_LINUX)
# avoid spurious latencies and additional work associated with
# MADV_DONTNEED. See
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
- set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
+ set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
else()
- set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
+ set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
endif()
# CACHE variable is empty to allow changing defaults without the necessity
# to purge cache
diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md
index 91438cfb675..59ee05d1f9e 100644
--- a/docs/en/operations/named-collections.md
+++ b/docs/en/operations/named-collections.md
@@ -5,6 +5,10 @@ sidebar_label: "Named collections"
title: "Named collections"
---
+import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
+
+
+
Named collections provide a way to store collections of key-value pairs to be
used to configure integrations with external sources. You can use named collections with
dictionaries, tables, table functions, and object storage.
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index db8157592db..8278f8c8699 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -498,6 +498,8 @@ Default: 0.9
Interval in seconds during which the server's maximum allowed memory consumption is adjusted by the corresponding threshold in cgroups. (see
settings `cgroup_memory_watcher_hard_limit_ratio` and `cgroup_memory_watcher_soft_limit_ratio`).
+To disable the cgroup observer, set this value to `0`.
+
Type: UInt64
Default: 15
diff --git a/docs/en/operations/utilities/clickhouse-local.md b/docs/en/operations/utilities/clickhouse-local.md
index 93a3fecf3c6..f19643a3fa5 100644
--- a/docs/en/operations/utilities/clickhouse-local.md
+++ b/docs/en/operations/utilities/clickhouse-local.md
@@ -236,10 +236,10 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
Previous example is the same as:
``` bash
-$ echo -e "1,2\n3,4" | clickhouse-local --query "
+$ echo -e "1,2\n3,4" | clickhouse-local -n --query "
CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin);
SELECT a, b FROM table;
- DROP TABLE table"
+ DROP TABLE table;"
Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
1 2
3 4
diff --git a/docs/en/sql-reference/statements/alter/named-collection.md b/docs/en/sql-reference/statements/alter/named-collection.md
index 71d4bfadd9c..ab772fe4dcf 100644
--- a/docs/en/sql-reference/statements/alter/named-collection.md
+++ b/docs/en/sql-reference/statements/alter/named-collection.md
@@ -3,6 +3,10 @@ slug: /en/sql-reference/statements/alter/named-collection
sidebar_label: NAMED COLLECTION
---
+import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
+
+
+
# ALTER NAMED COLLECTION
This query intends to modify already existing named collections.
diff --git a/docs/en/sql-reference/statements/alter/view.md b/docs/en/sql-reference/statements/alter/view.md
index fb7a5bd7c03..5f3dae0a9c0 100644
--- a/docs/en/sql-reference/statements/alter/view.md
+++ b/docs/en/sql-reference/statements/alter/view.md
@@ -134,8 +134,8 @@ PRIMARY KEY (event_type, ts)
ORDER BY (event_type, ts, browser)
SETTINGS index_granularity = 8192
--- !!! The columns' definition is unchanged but it does not matter, we are not quering
--- MATERIALIZED VIEW, we are quering TO (storage) table.
+-- !!! The columns' definition is unchanged but it does not matter, we are not querying
+-- MATERIALIZED VIEW, we are querying TO (storage) table.
-- SELECT section is updated.
SHOW CREATE TABLE mv FORMAT TSVRaw;
diff --git a/docs/en/sql-reference/statements/create/named-collection.md b/docs/en/sql-reference/statements/create/named-collection.md
index f69fa2e3678..a4e146c814c 100644
--- a/docs/en/sql-reference/statements/create/named-collection.md
+++ b/docs/en/sql-reference/statements/create/named-collection.md
@@ -3,6 +3,10 @@ slug: /en/sql-reference/statements/create/named-collection
sidebar_label: NAMED COLLECTION
---
+import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
+
+
+
# CREATE NAMED COLLECTION
Creates a new named collection.
diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp
index c4878b18f00..6343dc85d00 100644
--- a/programs/client/Client.cpp
+++ b/programs/client/Client.cpp
@@ -248,6 +248,10 @@ std::vector Client::loadWarningMessages()
}
}
+Poco::Util::LayeredConfiguration & Client::getClientConfiguration()
+{
+ return config();
+}
void Client::initialize(Poco::Util::Application & self)
{
@@ -697,9 +701,7 @@ bool Client::processWithFuzzing(const String & full_query)
const char * begin = full_query.data();
orig_ast = parseQuery(begin, begin + full_query.size(),
global_context->getSettingsRef(),
- /*allow_multi_statements=*/ true,
- /*is_interactive=*/ is_interactive,
- /*ignore_error=*/ ignore_error);
+ /*allow_multi_statements=*/ true);
}
catch (const Exception & e)
{
diff --git a/programs/client/Client.h b/programs/client/Client.h
index bef948b3c1e..229608f787d 100644
--- a/programs/client/Client.h
+++ b/programs/client/Client.h
@@ -16,6 +16,9 @@ public:
int main(const std::vector & /*args*/) override;
protected:
+
+ Poco::Util::LayeredConfiguration & getClientConfiguration() override;
+
bool processWithFuzzing(const String & full_query) override;
std::optional processFuzzingStep(const String & query_to_execute, const ASTPtr & parsed_query);
diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt
index 2fca10ce4d7..86410d712ec 100644
--- a/programs/library-bridge/CMakeLists.txt
+++ b/programs/library-bridge/CMakeLists.txt
@@ -11,7 +11,6 @@ set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
LibraryBridgeHandlers.cpp
SharedLibrary.cpp
library-bridge.cpp
- createFunctionBaseCast.cpp
)
clickhouse_add_executable(clickhouse-library-bridge ${CLICKHOUSE_LIBRARY_BRIDGE_SOURCES})
@@ -20,6 +19,7 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
daemon
dbms
bridge
+ clickhouse_functions
)
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp
index cb1c35743b2..503cb0fb97d 100644
--- a/programs/local/LocalServer.cpp
+++ b/programs/local/LocalServer.cpp
@@ -82,6 +82,11 @@ void applySettingsOverridesForLocal(ContextMutablePtr context)
context->setSettings(settings);
}
+Poco::Util::LayeredConfiguration & LocalServer::getClientConfiguration()
+{
+ return config();
+}
+
void LocalServer::processError(const String &) const
{
if (ignore_error)
@@ -117,19 +122,19 @@ void LocalServer::initialize(Poco::Util::Application & self)
Poco::Util::Application::initialize(self);
/// Load config files if exists
- if (config().has("config-file") || fs::exists("config.xml"))
+ if (getClientConfiguration().has("config-file") || fs::exists("config.xml"))
{
- const auto config_path = config().getString("config-file", "config.xml");
+ const auto config_path = getClientConfiguration().getString("config-file", "config.xml");
ConfigProcessor config_processor(config_path, false, true);
ConfigProcessor::setConfigPath(fs::path(config_path).parent_path());
auto loaded_config = config_processor.loadConfig();
- config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false);
+ getClientConfiguration().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false);
}
GlobalThreadPool::initialize(
- config().getUInt("max_thread_pool_size", 10000),
- config().getUInt("max_thread_pool_free_size", 1000),
- config().getUInt("thread_pool_queue_size", 10000)
+ getClientConfiguration().getUInt("max_thread_pool_size", 10000),
+ getClientConfiguration().getUInt("max_thread_pool_free_size", 1000),
+ getClientConfiguration().getUInt("thread_pool_queue_size", 10000)
);
#if USE_AZURE_BLOB_STORAGE
@@ -141,18 +146,18 @@ void LocalServer::initialize(Poco::Util::Application & self)
#endif
getIOThreadPool().initialize(
- config().getUInt("max_io_thread_pool_size", 100),
- config().getUInt("max_io_thread_pool_free_size", 0),
- config().getUInt("io_thread_pool_queue_size", 10000));
+ getClientConfiguration().getUInt("max_io_thread_pool_size", 100),
+ getClientConfiguration().getUInt("max_io_thread_pool_free_size", 0),
+ getClientConfiguration().getUInt("io_thread_pool_queue_size", 10000));
- const size_t active_parts_loading_threads = config().getUInt("max_active_parts_loading_thread_pool_size", 64);
+ const size_t active_parts_loading_threads = getClientConfiguration().getUInt("max_active_parts_loading_thread_pool_size", 64);
getActivePartsLoadingThreadPool().initialize(
active_parts_loading_threads,
0, // We don't need any threads one all the parts will be loaded
active_parts_loading_threads);
- const size_t outdated_parts_loading_threads = config().getUInt("max_outdated_parts_loading_thread_pool_size", 32);
+ const size_t outdated_parts_loading_threads = getClientConfiguration().getUInt("max_outdated_parts_loading_thread_pool_size", 32);
getOutdatedPartsLoadingThreadPool().initialize(
outdated_parts_loading_threads,
0, // We don't need any threads one all the parts will be loaded
@@ -160,7 +165,7 @@ void LocalServer::initialize(Poco::Util::Application & self)
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(active_parts_loading_threads);
- const size_t unexpected_parts_loading_threads = config().getUInt("max_unexpected_parts_loading_thread_pool_size", 32);
+ const size_t unexpected_parts_loading_threads = getClientConfiguration().getUInt("max_unexpected_parts_loading_thread_pool_size", 32);
getUnexpectedPartsLoadingThreadPool().initialize(
unexpected_parts_loading_threads,
0, // We don't need any threads one all the parts will be loaded
@@ -168,7 +173,7 @@ void LocalServer::initialize(Poco::Util::Application & self)
getUnexpectedPartsLoadingThreadPool().setMaxTurboThreads(active_parts_loading_threads);
- const size_t cleanup_threads = config().getUInt("max_parts_cleaning_thread_pool_size", 128);
+ const size_t cleanup_threads = getClientConfiguration().getUInt("max_parts_cleaning_thread_pool_size", 128);
getPartsCleaningThreadPool().initialize(
cleanup_threads,
0, // We don't need any threads one all the parts will be deleted
@@ -201,10 +206,10 @@ void LocalServer::tryInitPath()
{
std::string path;
- if (config().has("path"))
+ if (getClientConfiguration().has("path"))
{
// User-supplied path.
- path = config().getString("path");
+ path = getClientConfiguration().getString("path");
Poco::trimInPlace(path);
if (path.empty())
@@ -263,13 +268,13 @@ void LocalServer::tryInitPath()
global_context->setUserFilesPath(""); /// user's files are everywhere
- std::string user_scripts_path = config().getString("user_scripts_path", fs::path(path) / "user_scripts/");
+ std::string user_scripts_path = getClientConfiguration().getString("user_scripts_path", fs::path(path) / "user_scripts/");
global_context->setUserScriptsPath(user_scripts_path);
/// top_level_domains_lists
- const std::string & top_level_domains_path = config().getString("top_level_domains_path", fs::path(path) / "top_level_domains/");
+ const std::string & top_level_domains_path = getClientConfiguration().getString("top_level_domains_path", fs::path(path) / "top_level_domains/");
if (!top_level_domains_path.empty())
- TLDListsHolder::getInstance().parseConfig(fs::path(top_level_domains_path) / "", config());
+ TLDListsHolder::getInstance().parseConfig(fs::path(top_level_domains_path) / "", getClientConfiguration());
}
@@ -311,14 +316,14 @@ void LocalServer::cleanup()
std::string LocalServer::getInitialCreateTableQuery()
{
- if (!config().has("table-structure") && !config().has("table-file") && !config().has("table-data-format") && (!isRegularFile(STDIN_FILENO) || queries.empty()))
+ if (!getClientConfiguration().has("table-structure") && !getClientConfiguration().has("table-file") && !getClientConfiguration().has("table-data-format") && (!isRegularFile(STDIN_FILENO) || queries.empty()))
return {};
- auto table_name = backQuoteIfNeed(config().getString("table-name", "table"));
- auto table_structure = config().getString("table-structure", "auto");
+ auto table_name = backQuoteIfNeed(getClientConfiguration().getString("table-name", "table"));
+ auto table_structure = getClientConfiguration().getString("table-structure", "auto");
String table_file;
- if (!config().has("table-file") || config().getString("table-file") == "-")
+ if (!getClientConfiguration().has("table-file") || getClientConfiguration().getString("table-file") == "-")
{
/// Use Unix tools stdin naming convention
table_file = "stdin";
@@ -326,7 +331,7 @@ std::string LocalServer::getInitialCreateTableQuery()
else
{
/// Use regular file
- auto file_name = config().getString("table-file");
+ auto file_name = getClientConfiguration().getString("table-file");
table_file = quoteString(file_name);
}
@@ -374,18 +379,18 @@ void LocalServer::setupUsers()
ConfigurationPtr users_config;
auto & access_control = global_context->getAccessControl();
- access_control.setNoPasswordAllowed(config().getBool("allow_no_password", true));
- access_control.setPlaintextPasswordAllowed(config().getBool("allow_plaintext_password", true));
- if (config().has("config-file") || fs::exists("config.xml"))
+ access_control.setNoPasswordAllowed(getClientConfiguration().getBool("allow_no_password", true));
+ access_control.setPlaintextPasswordAllowed(getClientConfiguration().getBool("allow_plaintext_password", true));
+ if (getClientConfiguration().has("config-file") || fs::exists("config.xml"))
{
- String config_path = config().getString("config-file", "");
- bool has_user_directories = config().has("user_directories");
+ String config_path = getClientConfiguration().getString("config-file", "");
+ bool has_user_directories = getClientConfiguration().has("user_directories");
const auto config_dir = fs::path{config_path}.remove_filename().string();
- String users_config_path = config().getString("users_config", "");
+ String users_config_path = getClientConfiguration().getString("users_config", "");
if (users_config_path.empty() && has_user_directories)
{
- users_config_path = config().getString("user_directories.users_xml.path");
+ users_config_path = getClientConfiguration().getString("user_directories.users_xml.path");
if (fs::path(users_config_path).is_relative() && fs::exists(fs::path(config_dir) / users_config_path))
users_config_path = fs::path(config_dir) / users_config_path;
}
@@ -409,10 +414,10 @@ void LocalServer::setupUsers()
void LocalServer::connect()
{
- connection_parameters = ConnectionParameters(config(), "localhost");
+ connection_parameters = ConnectionParameters(getClientConfiguration(), "localhost");
ReadBuffer * in;
- auto table_file = config().getString("table-file", "-");
+ auto table_file = getClientConfiguration().getString("table-file", "-");
if (table_file == "-" || table_file == "stdin")
{
in = &std_in;
@@ -433,7 +438,7 @@ try
UseSSL use_ssl;
thread_status.emplace();
- StackTrace::setShowAddresses(config().getBool("show_addresses_in_stack_traces", true));
+ StackTrace::setShowAddresses(getClientConfiguration().getBool("show_addresses_in_stack_traces", true));
setupSignalHandler();
@@ -448,7 +453,7 @@ try
if (rlim.rlim_cur < rlim.rlim_max)
{
- rlim.rlim_cur = config().getUInt("max_open_files", static_cast(rlim.rlim_max));
+ rlim.rlim_cur = getClientConfiguration().getUInt("max_open_files", static_cast(rlim.rlim_max));
int rc = setrlimit(RLIMIT_NOFILE, &rlim);
if (rc != 0)
std::cerr << fmt::format("Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, errnoToString()) << '\n';
@@ -456,8 +461,8 @@ try
}
is_interactive = stdin_is_a_tty
- && (config().hasOption("interactive")
- || (queries.empty() && !config().has("table-structure") && queries_files.empty() && !config().has("table-file")));
+ && (getClientConfiguration().hasOption("interactive")
+ || (queries.empty() && !getClientConfiguration().has("table-structure") && queries_files.empty() && !getClientConfiguration().has("table-file")));
if (!is_interactive)
{
@@ -481,7 +486,7 @@ try
SCOPE_EXIT({ cleanup(); });
- initTTYBuffer(toProgressOption(config().getString("progress", "default")));
+ initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
applyCmdSettings(global_context);
@@ -489,7 +494,7 @@ try
/// try to load user defined executable functions, throw on error and die
try
{
- global_context->loadOrReloadUserDefinedExecutableFunctions(config());
+ global_context->loadOrReloadUserDefinedExecutableFunctions(getClientConfiguration());
}
catch (...)
{
@@ -530,7 +535,7 @@ try
}
catch (const DB::Exception & e)
{
- bool need_print_stack_trace = config().getBool("stacktrace", false);
+ bool need_print_stack_trace = getClientConfiguration().getBool("stacktrace", false);
std::cerr << getExceptionMessage(e, need_print_stack_trace, true) << std::endl;
return e.code() ? e.code() : -1;
}
@@ -542,42 +547,42 @@ catch (...)
void LocalServer::updateLoggerLevel(const String & logs_level)
{
- config().setString("logger.level", logs_level);
- updateLevels(config(), logger());
+ getClientConfiguration().setString("logger.level", logs_level);
+ updateLevels(getClientConfiguration(), logger());
}
void LocalServer::processConfig()
{
- if (!queries.empty() && config().has("queries-file"))
+ if (!queries.empty() && getClientConfiguration().has("queries-file"))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Options '--query' and '--queries-file' cannot be specified at the same time");
- if (config().has("multiquery"))
+ if (getClientConfiguration().has("multiquery"))
is_multiquery = true;
- pager = config().getString("pager", "");
+ pager = getClientConfiguration().getString("pager", "");
- delayed_interactive = config().has("interactive") && (!queries.empty() || config().has("queries-file"));
+ delayed_interactive = getClientConfiguration().has("interactive") && (!queries.empty() || getClientConfiguration().has("queries-file"));
if (!is_interactive || delayed_interactive)
{
- echo_queries = config().hasOption("echo") || config().hasOption("verbose");
- ignore_error = config().getBool("ignore-error", false);
+ echo_queries = getClientConfiguration().hasOption("echo") || getClientConfiguration().hasOption("verbose");
+ ignore_error = getClientConfiguration().getBool("ignore-error", false);
}
- print_stack_trace = config().getBool("stacktrace", false);
+ print_stack_trace = getClientConfiguration().getBool("stacktrace", false);
const std::string clickhouse_dialect{"clickhouse"};
- load_suggestions = (is_interactive || delayed_interactive) && !config().getBool("disable_suggestion", false)
- && config().getString("dialect", clickhouse_dialect) == clickhouse_dialect;
- wait_for_suggestions_to_load = config().getBool("wait_for_suggestions_to_load", false);
+ load_suggestions = (is_interactive || delayed_interactive) && !getClientConfiguration().getBool("disable_suggestion", false)
+ && getClientConfiguration().getString("dialect", clickhouse_dialect) == clickhouse_dialect;
+ wait_for_suggestions_to_load = getClientConfiguration().getBool("wait_for_suggestions_to_load", false);
- auto logging = (config().has("logger.console")
- || config().has("logger.level")
- || config().has("log-level")
- || config().has("send_logs_level")
- || config().has("logger.log"));
+ auto logging = (getClientConfiguration().has("logger.console")
+ || getClientConfiguration().has("logger.level")
+ || getClientConfiguration().has("log-level")
+ || getClientConfiguration().has("send_logs_level")
+ || getClientConfiguration().has("logger.log"));
- auto level = config().getString("log-level", "trace");
+ auto level = getClientConfiguration().getString("log-level", "trace");
- if (config().has("server_logs_file"))
+ if (getClientConfiguration().has("server_logs_file"))
{
auto poco_logs_level = Poco::Logger::parseLevel(level);
Poco::Logger::root().setLevel(poco_logs_level);
@@ -587,10 +592,10 @@ void LocalServer::processConfig()
}
else
{
- config().setString("logger", "logger");
+ getClientConfiguration().setString("logger", "logger");
auto log_level_default = logging ? level : "fatal";
- config().setString("logger.level", config().getString("log-level", config().getString("send_logs_level", log_level_default)));
- buildLoggers(config(), logger(), "clickhouse-local");
+ getClientConfiguration().setString("logger.level", getClientConfiguration().getString("log-level", getClientConfiguration().getString("send_logs_level", log_level_default)));
+ buildLoggers(getClientConfiguration(), logger(), "clickhouse-local");
}
shared_context = Context::createShared();
@@ -604,13 +609,13 @@ void LocalServer::processConfig()
LoggerRawPtr log = &logger();
/// Maybe useless
- if (config().has("macros"))
- global_context->setMacros(std::make_unique(config(), "macros", log));
+ if (getClientConfiguration().has("macros"))
+ global_context->setMacros(std::make_unique(getClientConfiguration(), "macros", log));
setDefaultFormatsAndCompressionFromConfiguration();
/// Sets external authenticators config (LDAP, Kerberos).
- global_context->setExternalAuthenticatorsConfig(config());
+ global_context->setExternalAuthenticatorsConfig(getClientConfiguration());
setupUsers();
@@ -619,12 +624,12 @@ void LocalServer::processConfig()
global_context->getProcessList().setMaxSize(0);
const size_t physical_server_memory = getMemoryAmount();
- const double cache_size_to_ram_max_ratio = config().getDouble("cache_size_to_ram_max_ratio", 0.5);
+ const double cache_size_to_ram_max_ratio = getClientConfiguration().getDouble("cache_size_to_ram_max_ratio", 0.5);
const size_t max_cache_size = static_cast(physical_server_memory * cache_size_to_ram_max_ratio);
- String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", DEFAULT_UNCOMPRESSED_CACHE_POLICY);
- size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE);
- double uncompressed_cache_size_ratio = config().getDouble("uncompressed_cache_size_ratio", DEFAULT_UNCOMPRESSED_CACHE_SIZE_RATIO);
+ String uncompressed_cache_policy = getClientConfiguration().getString("uncompressed_cache_policy", DEFAULT_UNCOMPRESSED_CACHE_POLICY);
+ size_t uncompressed_cache_size = getClientConfiguration().getUInt64("uncompressed_cache_size", DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE);
+ double uncompressed_cache_size_ratio = getClientConfiguration().getDouble("uncompressed_cache_size_ratio", DEFAULT_UNCOMPRESSED_CACHE_SIZE_RATIO);
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
@@ -632,9 +637,9 @@ void LocalServer::processConfig()
}
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size, uncompressed_cache_size_ratio);
- String mark_cache_policy = config().getString("mark_cache_policy", DEFAULT_MARK_CACHE_POLICY);
- size_t mark_cache_size = config().getUInt64("mark_cache_size", DEFAULT_MARK_CACHE_MAX_SIZE);
- double mark_cache_size_ratio = config().getDouble("mark_cache_size_ratio", DEFAULT_MARK_CACHE_SIZE_RATIO);
+ String mark_cache_policy = getClientConfiguration().getString("mark_cache_policy", DEFAULT_MARK_CACHE_POLICY);
+ size_t mark_cache_size = getClientConfiguration().getUInt64("mark_cache_size", DEFAULT_MARK_CACHE_MAX_SIZE);
+ double mark_cache_size_ratio = getClientConfiguration().getDouble("mark_cache_size_ratio", DEFAULT_MARK_CACHE_SIZE_RATIO);
if (!mark_cache_size)
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
if (mark_cache_size > max_cache_size)
@@ -644,9 +649,9 @@ void LocalServer::processConfig()
}
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
- String index_uncompressed_cache_policy = config().getString("index_uncompressed_cache_policy", DEFAULT_INDEX_UNCOMPRESSED_CACHE_POLICY);
- size_t index_uncompressed_cache_size = config().getUInt64("index_uncompressed_cache_size", DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE);
- double index_uncompressed_cache_size_ratio = config().getDouble("index_uncompressed_cache_size_ratio", DEFAULT_INDEX_UNCOMPRESSED_CACHE_SIZE_RATIO);
+ String index_uncompressed_cache_policy = getClientConfiguration().getString("index_uncompressed_cache_policy", DEFAULT_INDEX_UNCOMPRESSED_CACHE_POLICY);
+ size_t index_uncompressed_cache_size = getClientConfiguration().getUInt64("index_uncompressed_cache_size", DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE);
+ double index_uncompressed_cache_size_ratio = getClientConfiguration().getDouble("index_uncompressed_cache_size_ratio", DEFAULT_INDEX_UNCOMPRESSED_CACHE_SIZE_RATIO);
if (index_uncompressed_cache_size > max_cache_size)
{
index_uncompressed_cache_size = max_cache_size;
@@ -654,9 +659,9 @@ void LocalServer::processConfig()
}
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
- String index_mark_cache_policy = config().getString("index_mark_cache_policy", DEFAULT_INDEX_MARK_CACHE_POLICY);
- size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", DEFAULT_INDEX_MARK_CACHE_MAX_SIZE);
- double index_mark_cache_size_ratio = config().getDouble("index_mark_cache_size_ratio", DEFAULT_INDEX_MARK_CACHE_SIZE_RATIO);
+ String index_mark_cache_policy = getClientConfiguration().getString("index_mark_cache_policy", DEFAULT_INDEX_MARK_CACHE_POLICY);
+ size_t index_mark_cache_size = getClientConfiguration().getUInt64("index_mark_cache_size", DEFAULT_INDEX_MARK_CACHE_MAX_SIZE);
+ double index_mark_cache_size_ratio = getClientConfiguration().getDouble("index_mark_cache_size_ratio", DEFAULT_INDEX_MARK_CACHE_SIZE_RATIO);
if (index_mark_cache_size > max_cache_size)
{
index_mark_cache_size = max_cache_size;
@@ -664,7 +669,7 @@ void LocalServer::processConfig()
}
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
- size_t mmap_cache_size = config().getUInt64("mmap_cache_size", DEFAULT_MMAP_CACHE_MAX_SIZE);
+ size_t mmap_cache_size = getClientConfiguration().getUInt64("mmap_cache_size", DEFAULT_MMAP_CACHE_MAX_SIZE);
if (mmap_cache_size > max_cache_size)
{
mmap_cache_size = max_cache_size;
@@ -676,8 +681,8 @@ void LocalServer::processConfig()
global_context->setQueryCache(0, 0, 0, 0);
#if USE_EMBEDDED_COMPILER
- size_t compiled_expression_cache_max_size_in_bytes = config().getUInt64("compiled_expression_cache_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE);
- size_t compiled_expression_cache_max_elements = config().getUInt64("compiled_expression_cache_elements_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES);
+ size_t compiled_expression_cache_max_size_in_bytes = getClientConfiguration().getUInt64("compiled_expression_cache_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE);
+ size_t compiled_expression_cache_max_elements = getClientConfiguration().getUInt64("compiled_expression_cache_elements_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES);
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements);
#endif
@@ -689,16 +694,16 @@ void LocalServer::processConfig()
applyCmdOptions(global_context);
/// Load global settings from default_profile and system_profile.
- global_context->setDefaultProfiles(config());
+ global_context->setDefaultProfiles(getClientConfiguration());
/// We load temporary database first, because projections need it.
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
- std::string default_database = config().getString("default_database", "default");
+ std::string default_database = getClientConfiguration().getString("default_database", "default");
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
global_context->setCurrentDatabase(default_database);
- if (config().has("path"))
+ if (getClientConfiguration().has("path"))
{
String path = global_context->getPath();
fs::create_directories(fs::path(path));
@@ -713,7 +718,7 @@ void LocalServer::processConfig()
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
waitLoad(TablesLoaderForegroundPoolId, startup_system_tasks);
- if (!config().has("only-system-tables"))
+ if (!getClientConfiguration().has("only-system-tables"))
{
DatabaseCatalog::instance().createBackgroundTasks();
waitLoad(loadMetadata(global_context));
@@ -725,15 +730,15 @@ void LocalServer::processConfig()
LOG_DEBUG(log, "Loaded metadata.");
}
- else if (!config().has("no-system-tables"))
+ else if (!getClientConfiguration().has("no-system-tables"))
{
attachSystemTablesServer(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::SYSTEM_DATABASE), false);
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA));
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
}
- server_display_name = config().getString("display_name", "");
- prompt_by_server_display_name = config().getRawString("prompt_by_server_display_name.default", ":) ");
+ server_display_name = getClientConfiguration().getString("display_name", "");
+ prompt_by_server_display_name = getClientConfiguration().getRawString("prompt_by_server_display_name.default", ":) ");
global_context->setQueryKindInitial();
global_context->setQueryKind(query_kind);
@@ -811,7 +816,7 @@ void LocalServer::applyCmdSettings(ContextMutablePtr context)
void LocalServer::applyCmdOptions(ContextMutablePtr context)
{
- context->setDefaultFormat(config().getString("output-format", config().getString("format", is_interactive ? "PrettyCompact" : "TSV")));
+ context->setDefaultFormat(getClientConfiguration().getString("output-format", getClientConfiguration().getString("format", is_interactive ? "PrettyCompact" : "TSV")));
applyCmdSettings(context);
}
@@ -819,33 +824,33 @@ void LocalServer::applyCmdOptions(ContextMutablePtr context)
void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector &, const std::vector &)
{
if (options.count("table"))
- config().setString("table-name", options["table"].as());
+ getClientConfiguration().setString("table-name", options["table"].as());
if (options.count("file"))
- config().setString("table-file", options["file"].as());
+ getClientConfiguration().setString("table-file", options["file"].as());
if (options.count("structure"))
- config().setString("table-structure", options["structure"].as());
+ getClientConfiguration().setString("table-structure", options["structure"].as());
if (options.count("no-system-tables"))
- config().setBool("no-system-tables", true);
+ getClientConfiguration().setBool("no-system-tables", true);
if (options.count("only-system-tables"))
- config().setBool("only-system-tables", true);
+ getClientConfiguration().setBool("only-system-tables", true);
if (options.count("database"))
- config().setString("default_database", options["database"].as());
+ getClientConfiguration().setString("default_database", options["database"].as());
if (options.count("input-format"))
- config().setString("table-data-format", options["input-format"].as());
+ getClientConfiguration().setString("table-data-format", options["input-format"].as());
if (options.count("output-format"))
- config().setString("output-format", options["output-format"].as());
+ getClientConfiguration().setString("output-format", options["output-format"].as());
if (options.count("logger.console"))
- config().setBool("logger.console", options["logger.console"].as());
+ getClientConfiguration().setBool("logger.console", options["logger.console"].as());
if (options.count("logger.log"))
- config().setString("logger.log", options["logger.log"].as());
+ getClientConfiguration().setString("logger.log", options["logger.log"].as());
if (options.count("logger.level"))
- config().setString("logger.level", options["logger.level"].as());
+ getClientConfiguration().setString("logger.level", options["logger.level"].as());
if (options.count("send_logs_level"))
- config().setString("send_logs_level", options["send_logs_level"].as());
+ getClientConfiguration().setString("send_logs_level", options["send_logs_level"].as());
if (options.count("wait_for_suggestions_to_load"))
- config().setBool("wait_for_suggestions_to_load", true);
+ getClientConfiguration().setBool("wait_for_suggestions_to_load", true);
}
void LocalServer::readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector &, std::vector &)
diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h
index 4856e68ff9b..4ab09ffc353 100644
--- a/programs/local/LocalServer.h
+++ b/programs/local/LocalServer.h
@@ -30,6 +30,9 @@ public:
int main(const std::vector & /*args*/) override;
protected:
+
+ Poco::Util::LayeredConfiguration & getClientConfiguration() override;
+
void connect() override;
void processError(const String & query) const override;
diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt
index 83839cc21ac..14af330f788 100644
--- a/programs/odbc-bridge/CMakeLists.txt
+++ b/programs/odbc-bridge/CMakeLists.txt
@@ -13,7 +13,6 @@ set (CLICKHOUSE_ODBC_BRIDGE_SOURCES
getIdentifierQuote.cpp
odbc-bridge.cpp
validateODBCConnectionString.cpp
- createFunctionBaseCast.cpp
)
clickhouse_add_executable(clickhouse-odbc-bridge ${CLICKHOUSE_ODBC_BRIDGE_SOURCES})
@@ -25,6 +24,7 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE
clickhouse_parsers
ch_contrib::nanodbc
ch_contrib::unixodbc
+ clickhouse_functions
)
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index c97837b685d..2abf5f4e508 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -302,8 +302,29 @@ public:
ClientBase::~ClientBase() = default;
-ClientBase::ClientBase() = default;
-
+ClientBase::ClientBase(
+ int in_fd_,
+ int out_fd_,
+ int err_fd_,
+ std::istream & input_stream_,
+ std::ostream & output_stream_,
+ std::ostream & error_stream_
+)
+ : std_in(in_fd_)
+ , std_out(out_fd_)
+ , progress_indication(output_stream_, in_fd_, err_fd_)
+ , in_fd(in_fd_)
+ , out_fd(out_fd_)
+ , err_fd(err_fd_)
+ , input_stream(input_stream_)
+ , output_stream(output_stream_)
+ , error_stream(error_stream_)
+{
+ stdin_is_a_tty = isatty(in_fd);
+ stdout_is_a_tty = isatty(out_fd);
+ stderr_is_a_tty = isatty(err_fd);
+ terminal_width = getTerminalWidth(in_fd, err_fd);
+}
void ClientBase::setupSignalHandler()
{
@@ -330,7 +351,7 @@ void ClientBase::setupSignalHandler()
}
-ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements, bool is_interactive, bool ignore_error)
+ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements)
{
std::unique_ptr parser;
ASTPtr res;
@@ -359,7 +380,7 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
if (!res)
{
- std::cerr << std::endl << message << std::endl << std::endl;
+ error_stream << std::endl << message << std::endl << std::endl;
return nullptr;
}
}
@@ -373,11 +394,11 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting
if (is_interactive)
{
- std::cout << std::endl;
- WriteBufferFromOStream res_buf(std::cout, 4096);
+ output_stream << std::endl;
+ WriteBufferFromOStream res_buf(output_stream, 4096);
formatAST(*res, res_buf);
res_buf.finalize();
- std::cout << std::endl << std::endl;
+ output_stream << std::endl << std::endl;
}
return res;
@@ -481,7 +502,7 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
if (need_render_progress && tty_buf)
{
if (select_into_file && !select_into_file_and_stdout)
- std::cerr << "\r";
+ error_stream << "\r";
progress_indication.writeProgress(*tty_buf);
}
}
@@ -741,17 +762,17 @@ bool ClientBase::isRegularFile(int fd)
void ClientBase::setDefaultFormatsAndCompressionFromConfiguration()
{
- if (config().has("output-format"))
+ if (getClientConfiguration().has("output-format"))
{
- default_output_format = config().getString("output-format");
+ default_output_format = getClientConfiguration().getString("output-format");
is_default_format = false;
}
- else if (config().has("format"))
+ else if (getClientConfiguration().has("format"))
{
- default_output_format = config().getString("format");
+ default_output_format = getClientConfiguration().getString("format");
is_default_format = false;
}
- else if (config().has("vertical"))
+ else if (getClientConfiguration().has("vertical"))
{
default_output_format = "Vertical";
is_default_format = false;
@@ -777,17 +798,17 @@ void ClientBase::setDefaultFormatsAndCompressionFromConfiguration()
default_output_format = "TSV";
}
- if (config().has("input-format"))
+ if (getClientConfiguration().has("input-format"))
{
- default_input_format = config().getString("input-format");
+ default_input_format = getClientConfiguration().getString("input-format");
}
- else if (config().has("format"))
+ else if (getClientConfiguration().has("format"))
{
- default_input_format = config().getString("format");
+ default_input_format = getClientConfiguration().getString("format");
}
- else if (config().getString("table-file", "-") != "-")
+ else if (getClientConfiguration().getString("table-file", "-") != "-")
{
- auto file_name = config().getString("table-file");
+ auto file_name = getClientConfiguration().getString("table-file");
std::optional format_from_file_name = FormatFactory::instance().tryGetFormatFromFileName(file_name);
if (format_from_file_name)
default_input_format = *format_from_file_name;
@@ -803,7 +824,7 @@ void ClientBase::setDefaultFormatsAndCompressionFromConfiguration()
default_input_format = "TSV";
}
- format_max_block_size = config().getUInt64("format_max_block_size",
+ format_max_block_size = getClientConfiguration().getUInt64("format_max_block_size",
global_context->getSettingsRef().max_block_size);
/// Setting value from cmd arg overrides one from config
@@ -813,7 +834,7 @@ void ClientBase::setDefaultFormatsAndCompressionFromConfiguration()
}
else
{
- insert_format_max_block_size = config().getUInt64("insert_format_max_block_size",
+ insert_format_max_block_size = getClientConfiguration().getUInt64("insert_format_max_block_size",
global_context->getSettingsRef().max_insert_block_size);
}
}
@@ -924,9 +945,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query)
const char * begin = full_query.data();
auto parsed_query = parseQuery(begin, begin + full_query.size(),
global_context->getSettingsRef(),
- /*allow_multi_statements=*/ false,
- is_interactive,
- ignore_error);
+ /*allow_multi_statements=*/ false);
if (!parsed_query)
return;
@@ -1100,7 +1119,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
/// has been received yet.
if (processed_rows == 0 && e.code() == ErrorCodes::DEADLOCK_AVOIDED && --retries_left)
{
- std::cerr << "Got a transient error from the server, will"
+ error_stream << "Got a transient error from the server, will"
<< " retry (" << retries_left << " retries left)";
}
else
@@ -1154,7 +1173,7 @@ void ClientBase::receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, b
double elapsed = receive_watch.elapsedSeconds();
if (break_on_timeout && elapsed > receive_timeout.totalSeconds())
{
- std::cout << "Timeout exceeded while receiving data from server."
+ output_stream << "Timeout exceeded while receiving data from server."
<< " Waited for " << static_cast(elapsed) << " seconds,"
<< " timeout is " << receive_timeout.totalSeconds() << " seconds." << std::endl;
@@ -1189,7 +1208,7 @@ void ClientBase::receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, b
if (cancelled && is_interactive)
{
- std::cout << "Query was cancelled." << std::endl;
+ output_stream << "Query was cancelled." << std::endl;
cancelled_printed = true;
}
}
@@ -1308,9 +1327,9 @@ void ClientBase::onEndOfStream()
if (is_interactive)
{
if (cancelled && !cancelled_printed)
- std::cout << "Query was cancelled." << std::endl;
+ output_stream << "Query was cancelled." << std::endl;
else if (!written_first_block)
- std::cout << "Ok." << std::endl;
+ output_stream << "Ok." << std::endl;
}
}
@@ -1863,7 +1882,7 @@ void ClientBase::cancelQuery()
progress_indication.clearProgressOutput(*tty_buf);
if (is_interactive)
- std::cout << "Cancelling query." << std::endl;
+ output_stream << "Cancelling query." << std::endl;
cancelled = true;
}
@@ -2026,7 +2045,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
{
const String & new_database = use_query->getDatabase();
/// If the client initiates the reconnection, it takes the settings from the config.
- config().setString("database", new_database);
+ getClientConfiguration().setString("database", new_database);
/// If the connection initiates the reconnection, it uses its variable.
connection->setDefaultDatabase(new_database);
}
@@ -2046,21 +2065,21 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
if (is_interactive)
{
- std::cout << std::endl;
+ output_stream << std::endl;
if (!server_exception || processed_rows != 0)
- std::cout << processed_rows << " row" << (processed_rows == 1 ? "" : "s") << " in set. ";
- std::cout << "Elapsed: " << progress_indication.elapsedSeconds() << " sec. ";
+ output_stream << processed_rows << " row" << (processed_rows == 1 ? "" : "s") << " in set. ";
+ output_stream << "Elapsed: " << progress_indication.elapsedSeconds() << " sec. ";
progress_indication.writeFinalProgress();
- std::cout << std::endl << std::endl;
+ output_stream << std::endl << std::endl;
}
- else if (print_time_to_stderr)
+ else if (getClientConfiguration().getBool("print-time-to-stderr", false))
{
- std::cerr << progress_indication.elapsedSeconds() << "\n";
+ error_stream << progress_indication.elapsedSeconds() << "\n";
}
- if (!is_interactive && print_num_processed_rows)
+ if (!is_interactive && getClientConfiguration().getBool("print-num-processed-rows", false))
{
- std::cout << "Processed rows: " << processed_rows << "\n";
+ output_stream << "Processed rows: " << processed_rows << "\n";
}
if (have_error && report_error)
@@ -2110,9 +2129,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
{
parsed_query = parseQuery(this_query_end, all_queries_end,
global_context->getSettingsRef(),
- /*allow_multi_statements=*/ true,
- is_interactive,
- ignore_error);
+ /*allow_multi_statements=*/ true);
}
catch (const Exception & e)
{
@@ -2428,12 +2445,12 @@ void ClientBase::initQueryIdFormats()
return;
/// Initialize query_id_formats if any
- if (config().has("query_id_formats"))
+ if (getClientConfiguration().has("query_id_formats"))
{
Poco::Util::AbstractConfiguration::Keys keys;
- config().keys("query_id_formats", keys);
+ getClientConfiguration().keys("query_id_formats", keys);
for (const auto & name : keys)
- query_id_formats.emplace_back(name + ":", config().getString("query_id_formats." + name));
+ query_id_formats.emplace_back(name + ":", getClientConfiguration().getString("query_id_formats." + name));
}
if (query_id_formats.empty())
@@ -2478,9 +2495,9 @@ bool ClientBase::addMergeTreeSettings(ASTCreateQuery & ast_create)
void ClientBase::runInteractive()
{
- if (config().has("query_id"))
+ if (getClientConfiguration().has("query_id"))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "query_id could be specified only in non-interactive mode");
- if (print_time_to_stderr)
+ if (getClientConfiguration().getBool("print-time-to-stderr", false))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "time option could be specified only in non-interactive mode");
initQueryIdFormats();
@@ -2493,9 +2510,9 @@ void ClientBase::runInteractive()
{
/// Load suggestion data from the server.
if (global_context->getApplicationType() == Context::ApplicationType::CLIENT)
- suggest->load(global_context, connection_parameters, config().getInt("suggestion_limit"), wait_for_suggestions_to_load);
+ suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load);
else if (global_context->getApplicationType() == Context::ApplicationType::LOCAL)
- suggest->load(global_context, connection_parameters, config().getInt("suggestion_limit"), wait_for_suggestions_to_load);
+ suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load);
}
if (home_path.empty())
@@ -2506,8 +2523,8 @@ void ClientBase::runInteractive()
}
/// Load command history if present.
- if (config().has("history_file"))
- history_file = config().getString("history_file");
+ if (getClientConfiguration().has("history_file"))
+ history_file = getClientConfiguration().getString("history_file");
else
{
auto * history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE"); // NOLINT(concurrency-mt-unsafe)
@@ -2528,7 +2545,7 @@ void ClientBase::runInteractive()
{
if (e.getErrno() != EEXIST)
{
- std::cerr << getCurrentExceptionMessage(false) << '\n';
+ error_stream << getCurrentExceptionMessage(false) << '\n';
}
}
}
@@ -2539,13 +2556,13 @@ void ClientBase::runInteractive()
#if USE_REPLXX
replxx::Replxx::highlighter_callback_t highlight_callback{};
- if (config().getBool("highlight", true))
+ if (getClientConfiguration().getBool("highlight", true))
highlight_callback = highlight;
ReplxxLineReader lr(
*suggest,
history_file,
- config().has("multiline"),
+ getClientConfiguration().has("multiline"),
query_extenders,
query_delimiters,
word_break_characters,
@@ -2553,7 +2570,7 @@ void ClientBase::runInteractive()
#else
LineReader lr(
history_file,
- config().has("multiline"),
+ getClientConfiguration().has("multiline"),
query_extenders,
query_delimiters,
word_break_characters);
@@ -2633,7 +2650,7 @@ void ClientBase::runInteractive()
{
// If a separate connection loading suggestions failed to open a new session,
// use the main session to receive them.
- suggest->load(*connection, connection_parameters.timeouts, config().getInt("suggestion_limit"), global_context->getClientInfo());
+ suggest->load(*connection, connection_parameters.timeouts, getClientConfiguration().getInt("suggestion_limit"), global_context->getClientInfo());
}
try
@@ -2648,7 +2665,7 @@ void ClientBase::runInteractive()
break;
/// We don't need to handle the test hints in the interactive mode.
- std::cerr << "Exception on client:" << std::endl << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
+ error_stream << "Exception on client:" << std::endl << getExceptionMessage(e, print_stack_trace, true) << std::endl << std::endl;
client_exception.reset(e.clone());
}
@@ -2665,11 +2682,11 @@ void ClientBase::runInteractive()
while (true);
if (isNewYearMode())
- std::cout << "Happy new year." << std::endl;
+ output_stream << "Happy new year." << std::endl;
else if (isChineseNewYearMode(local_tz))
- std::cout << "Happy Chinese new year. 春节快乐!" << std::endl;
+ output_stream << "Happy Chinese new year. 春节快乐!" << std::endl;
else
- std::cout << "Bye." << std::endl;
+ output_stream << "Bye." << std::endl;
}
@@ -2680,7 +2697,7 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name)
ReadBufferFromFile in(file_name);
readStringUntilEOF(queries_from_file, in);
- if (!has_log_comment)
+ if (!getClientConfiguration().has("log_comment"))
{
Settings settings = global_context->getSettings();
/// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]"
@@ -2789,13 +2806,13 @@ void ClientBase::clearTerminal()
/// It is needed if garbage is left in terminal.
/// Show cursor. It can be left hidden by invocation of previous programs.
/// A test for this feature: perl -e 'print "x"x100000'; echo -ne '\033[0;0H\033[?25l'; clickhouse-client
- std::cout << "\033[0J" "\033[?25h";
+ output_stream << "\033[0J" "\033[?25h";
}
void ClientBase::showClientVersion()
{
- std::cout << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
+ output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl;
}
namespace
@@ -3080,18 +3097,18 @@ void ClientBase::init(int argc, char ** argv)
if (options.count("version-clean"))
{
- std::cout << VERSION_STRING;
+ output_stream << VERSION_STRING;
exit(0); // NOLINT(concurrency-mt-unsafe)
}
if (options.count("verbose"))
- config().setBool("verbose", true);
+ getClientConfiguration().setBool("verbose", true);
/// Output of help message.
if (options.count("help")
|| (options.count("host") && options["host"].as() == "elp")) /// If user writes -help instead of --help.
{
- if (config().getBool("verbose", false))
+ if (getClientConfiguration().getBool("verbose", false))
printHelpMessage(options_description, true);
else
printHelpMessage(options_description_non_verbose, false);
@@ -3099,72 +3116,75 @@ void ClientBase::init(int argc, char ** argv)
}
/// Common options for clickhouse-client and clickhouse-local.
+
+ /// Output execution time to stderr in batch mode.
if (options.count("time"))
- print_time_to_stderr = true;
+ getClientConfiguration().setBool("print-time-to-stderr", true);
if (options.count("query"))
queries = options["query"].as>();
if (options.count("query_id"))
- config().setString("query_id", options["query_id"].as());
+ getClientConfiguration().setString("query_id", options["query_id"].as());
if (options.count("database"))
- config().setString("database", options["database"].as());
+ getClientConfiguration().setString("database", options["database"].as());
if (options.count("config-file"))
- config().setString("config-file", options["config-file"].as());
+ getClientConfiguration().setString("config-file", options["config-file"].as());
if (options.count("queries-file"))
queries_files = options["queries-file"].as>();
if (options.count("multiline"))
- config().setBool("multiline", true);
+ getClientConfiguration().setBool("multiline", true);
if (options.count("multiquery"))
- config().setBool("multiquery", true);
+ getClientConfiguration().setBool("multiquery", true);
if (options.count("ignore-error"))
- config().setBool("ignore-error", true);
+ getClientConfiguration().setBool("ignore-error", true);
if (options.count("format"))
- config().setString("format", options["format"].as());
+ getClientConfiguration().setString("format", options["format"].as());
if (options.count("output-format"))
- config().setString("output-format", options["output-format"].as());
+ getClientConfiguration().setString("output-format", options["output-format"].as());
if (options.count("vertical"))
- config().setBool("vertical", true);
+ getClientConfiguration().setBool("vertical", true);
if (options.count("stacktrace"))
- config().setBool("stacktrace", true);
+ getClientConfiguration().setBool("stacktrace", true);
if (options.count("print-profile-events"))
- config().setBool("print-profile-events", true);
+ getClientConfiguration().setBool("print-profile-events", true);
if (options.count("profile-events-delay-ms"))
- config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as());
+ getClientConfiguration().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as());
+ /// Whether to print the number of processed rows at
if (options.count("processed-rows"))
- print_num_processed_rows = true;
+ getClientConfiguration().setBool("print-num-processed-rows", true);
if (options.count("progress"))
{
switch (options["progress"].as())
{
case DEFAULT:
- config().setString("progress", "default");
+ getClientConfiguration().setString("progress", "default");
break;
case OFF:
- config().setString("progress", "off");
+ getClientConfiguration().setString("progress", "off");
break;
case TTY:
- config().setString("progress", "tty");
+ getClientConfiguration().setString("progress", "tty");
break;
case ERR:
- config().setString("progress", "err");
+ getClientConfiguration().setString("progress", "err");
break;
}
}
if (options.count("echo"))
- config().setBool("echo", true);
+ getClientConfiguration().setBool("echo", true);
if (options.count("disable_suggestion"))
- config().setBool("disable_suggestion", true);
+ getClientConfiguration().setBool("disable_suggestion", true);
if (options.count("wait_for_suggestions_to_load"))
- config().setBool("wait_for_suggestions_to_load", true);
+ getClientConfiguration().setBool("wait_for_suggestions_to_load", true);
if (options.count("suggestion_limit"))
- config().setInt("suggestion_limit", options["suggestion_limit"].as());
+ getClientConfiguration().setInt("suggestion_limit", options["suggestion_limit"].as());
if (options.count("highlight"))
- config().setBool("highlight", options["highlight"].as());
+ getClientConfiguration().setBool("highlight", options["highlight"].as());
if (options.count("history_file"))
- config().setString("history_file", options["history_file"].as());
+ getClientConfiguration().setString("history_file", options["history_file"].as());
if (options.count("interactive"))
- config().setBool("interactive", true);
+ getClientConfiguration().setBool("interactive", true);
if (options.count("pager"))
- config().setString("pager", options["pager"].as());
+ getClientConfiguration().setString("pager", options["pager"].as());
if (options.count("log-level"))
Poco::Logger::root().setLevel(options["log-level"].as());
@@ -3182,13 +3202,13 @@ void ClientBase::init(int argc, char ** argv)
alias_names.reserve(options_description.main_description->options().size());
for (const auto& option : options_description.main_description->options())
alias_names.insert(option->long_name());
- argsToConfig(common_arguments, config(), 100, &alias_names);
+ argsToConfig(common_arguments, getClientConfiguration(), 100, &alias_names);
}
clearPasswordFromCommandLine(argc, argv);
/// Limit on total memory usage
- std::string max_client_memory_usage = config().getString("max_memory_usage_in_client", "0" /*default value*/);
+ std::string max_client_memory_usage = getClientConfiguration().getString("max_memory_usage_in_client", "0" /*default value*/);
if (max_client_memory_usage != "0")
{
UInt64 max_client_memory_usage_int = parseWithSizeSuffix(max_client_memory_usage.c_str(), max_client_memory_usage.length());
@@ -3197,8 +3217,6 @@ void ClientBase::init(int argc, char ** argv)
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
}
-
- has_log_comment = config().has("log_comment");
}
}
diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h
index 5202b57040f..756400137ad 100644
--- a/src/Client/ClientBase.h
+++ b/src/Client/ClientBase.h
@@ -18,7 +18,6 @@
#include
#include
-
namespace po = boost::program_options;
@@ -67,13 +66,22 @@ class ClientBase : public Poco::Util::Application, public IHints<2>
public:
using Arguments = std::vector;
- ClientBase();
+ explicit ClientBase
+ (
+ int in_fd_ = STDIN_FILENO,
+ int out_fd_ = STDOUT_FILENO,
+ int err_fd_ = STDERR_FILENO,
+ std::istream & input_stream_ = std::cin,
+ std::ostream & output_stream_ = std::cout,
+ std::ostream & error_stream_ = std::cerr
+ );
+
~ClientBase() override;
void init(int argc, char ** argv);
std::vector getAllRegisteredNames() const override { return cmd_options; }
- static ASTPtr parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements, bool is_interactive, bool ignore_error);
+ ASTPtr parseQuery(const char *& pos, const char * end, const Settings & settings, bool allow_multi_statements);
protected:
void runInteractive();
@@ -82,6 +90,9 @@ protected:
char * argv0 = nullptr;
void runLibFuzzer();
+ /// This is the analogue of Poco::Application::config()
+ virtual Poco::Util::LayeredConfiguration & getClientConfiguration() = 0;
+
virtual bool processWithFuzzing(const String &)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Query processing with fuzzing is not implemented");
@@ -107,7 +118,7 @@ protected:
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
std::unique_ptr & current_exception);
- static void clearTerminal();
+ void clearTerminal();
void showClientVersion();
using ProgramOptionsDescription = boost::program_options::options_description;
@@ -206,7 +217,6 @@ protected:
bool echo_queries = false; /// Print queries before execution in batch mode.
bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode.
- bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode.
std::optional suggest;
bool load_suggestions = false;
@@ -251,9 +261,9 @@ protected:
ConnectionParameters connection_parameters;
/// Buffer that reads from stdin in batch mode.
- ReadBufferFromFileDescriptor std_in{STDIN_FILENO};
+ ReadBufferFromFileDescriptor std_in;
/// Console output.
- WriteBufferFromFileDescriptor std_out{STDOUT_FILENO};
+ WriteBufferFromFileDescriptor std_out;
std::unique_ptr pager_cmd;
/// The user can specify to redirect query output to a file.
@@ -284,7 +294,6 @@ protected:
bool need_render_profile_events = true;
bool written_first_block = false;
size_t processed_rows = 0; /// How many rows have been read or written.
- bool print_num_processed_rows = false; /// Whether to print the number of processed rows at
bool print_stack_trace = false;
/// The last exception that was received from the server. Is used for the
@@ -332,8 +341,14 @@ protected:
bool cancelled = false;
bool cancelled_printed = false;
- /// Does log_comment has specified by user?
- bool has_log_comment = false;
+ /// Unpacked descriptors and streams for the ease of use.
+ int in_fd = STDIN_FILENO;
+ int out_fd = STDOUT_FILENO;
+ int err_fd = STDERR_FILENO;
+ std::istream & input_stream;
+ std::ostream & output_stream;
+ std::ostream & error_stream;
+
};
}
diff --git a/src/Client/LineReader.cpp b/src/Client/LineReader.cpp
index b3559657ced..487ef232fdc 100644
--- a/src/Client/LineReader.cpp
+++ b/src/Client/LineReader.cpp
@@ -23,14 +23,6 @@ void trim(String & s)
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
}
-/// Check if multi-line query is inserted from the paste buffer.
-/// Allows delaying the start of query execution until the entirety of query is inserted.
-bool hasInputData()
-{
- pollfd fd{STDIN_FILENO, POLLIN, 0};
- return poll(&fd, 1, 0) == 1;
-}
-
struct NoCaseCompare
{
bool operator()(const std::string & str1, const std::string & str2)
@@ -63,6 +55,14 @@ void addNewWords(Words & to, const Words & from, Compare comp)
namespace DB
{
+/// Check if multi-line query is inserted from the paste buffer.
+/// Allows delaying the start of query execution until the entirety of query is inserted.
+bool LineReader::hasInputData() const
+{
+ pollfd fd{in_fd, POLLIN, 0};
+ return poll(&fd, 1, 0) == 1;
+}
+
replxx::Replxx::completions_t LineReader::Suggest::getCompletions(const String & prefix, size_t prefix_length, const char * word_break_characters)
{
std::string_view last_word;
@@ -131,11 +131,22 @@ void LineReader::Suggest::addWords(Words && new_words) // NOLINT(cppcoreguidelin
}
}
-LineReader::LineReader(const String & history_file_path_, bool multiline_, Patterns extenders_, Patterns delimiters_)
+LineReader::LineReader(
+ const String & history_file_path_,
+ bool multiline_,
+ Patterns extenders_,
+ Patterns delimiters_,
+ std::istream & input_stream_,
+ std::ostream & output_stream_,
+ int in_fd_
+)
: history_file_path(history_file_path_)
, multiline(multiline_)
, extenders(std::move(extenders_))
, delimiters(std::move(delimiters_))
+ , input_stream(input_stream_)
+ , output_stream(output_stream_)
+ , in_fd(in_fd_)
{
/// FIXME: check extender != delimiter
}
@@ -212,9 +223,9 @@ LineReader::InputStatus LineReader::readOneLine(const String & prompt)
input.clear();
{
- std::cout << prompt;
- std::getline(std::cin, input);
- if (!std::cin.good())
+ output_stream << prompt;
+ std::getline(input_stream, input);
+ if (!input_stream.good())
return ABORT;
}
diff --git a/src/Client/LineReader.h b/src/Client/LineReader.h
index fc19eaa5667..0172bd7ec22 100644
--- a/src/Client/LineReader.h
+++ b/src/Client/LineReader.h
@@ -1,5 +1,7 @@
#pragma once
+#include
+#include
#include
#include
#include
@@ -37,7 +39,16 @@ public:
using Patterns = std::vector;
- LineReader(const String & history_file_path, bool multiline, Patterns extenders, Patterns delimiters);
+ LineReader(
+ const String & history_file_path,
+ bool multiline,
+ Patterns extenders,
+ Patterns delimiters,
+ std::istream & input_stream_ = std::cin,
+ std::ostream & output_stream_ = std::cout,
+ int in_fd_ = STDIN_FILENO
+ );
+
virtual ~LineReader() = default;
/// Reads the whole line until delimiter (in multiline mode) or until the last line without extender.
@@ -56,6 +67,8 @@ public:
virtual void enableBracketedPaste() {}
virtual void disableBracketedPaste() {}
+ bool hasInputData() const;
+
protected:
enum InputStatus
{
@@ -77,6 +90,10 @@ protected:
virtual InputStatus readOneLine(const String & prompt);
virtual void addToHistory(const String &) {}
+
+ std::istream & input_stream;
+ std::ostream & output_stream;
+ int in_fd;
};
}
diff --git a/src/Client/LocalConnection.cpp b/src/Client/LocalConnection.cpp
index c7494e31605..3b2c14ee4f9 100644
--- a/src/Client/LocalConnection.cpp
+++ b/src/Client/LocalConnection.cpp
@@ -16,7 +16,10 @@
#include
#include
#include
-
+#include
+#include
+#include
+#include
namespace DB
{
@@ -151,12 +154,26 @@ void LocalConnection::sendQuery(
state->block = sample;
String current_format = "Values";
+
+ const auto & settings = context->getSettingsRef();
const char * begin = state->query.data();
- auto parsed_query = ClientBase::parseQuery(begin, begin + state->query.size(),
- context->getSettingsRef(),
- /*allow_multi_statements=*/ false,
- /*is_interactive=*/ false,
- /*ignore_error=*/ false);
+ const char * end = begin + state->query.size();
+ const Dialect & dialect = settings.dialect;
+
+ std::unique_ptr parser;
+ if (dialect == Dialect::kusto)
+ parser = std::make_unique(end, settings.allow_settings_after_format_in_insert);
+ else if (dialect == Dialect::prql)
+ parser = std::make_unique(settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
+ else
+ parser = std::make_unique(end, settings.allow_settings_after_format_in_insert);
+
+ ASTPtr parsed_query;
+ if (dialect == Dialect::kusto)
+ parsed_query = parseKQLQueryAndMovePosition(*parser, begin, end, "", /*allow_multi_statements*/false, settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
+ else
+ parsed_query = parseQueryAndMovePosition(*parser, begin, end, "", /*allow_multi_statements*/false, settings.max_query_size, settings.max_parser_depth, settings.max_parser_backtracks);
+
if (const auto * insert = parsed_query->as())
{
if (!insert->format.empty())
diff --git a/src/Client/ReplxxLineReader.cpp b/src/Client/ReplxxLineReader.cpp
index 9e0f5946205..46600168695 100644
--- a/src/Client/ReplxxLineReader.cpp
+++ b/src/Client/ReplxxLineReader.cpp
@@ -297,8 +297,15 @@ ReplxxLineReader::ReplxxLineReader(
Patterns extenders_,
Patterns delimiters_,
const char word_break_characters_[],
- replxx::Replxx::highlighter_callback_t highlighter_)
- : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_))
+ replxx::Replxx::highlighter_callback_t highlighter_,
+ [[ maybe_unused ]] std::istream & input_stream_,
+ [[ maybe_unused ]] std::ostream & output_stream_,
+ [[ maybe_unused ]] int in_fd_,
+ [[ maybe_unused ]] int out_fd_,
+ [[ maybe_unused ]] int err_fd_
+)
+ : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_), input_stream_, output_stream_, in_fd_)
+ , highlighter(std::move(highlighter_))
, word_break_characters(word_break_characters_)
, editor(getEditor())
{
@@ -471,7 +478,7 @@ ReplxxLineReader::ReplxxLineReader(
ReplxxLineReader::~ReplxxLineReader()
{
- if (close(history_file_fd))
+ if (history_file_fd >= 0 && close(history_file_fd))
rx.print("Close of history file failed: %s\n", errnoToString().c_str());
}
@@ -496,7 +503,7 @@ void ReplxxLineReader::addToHistory(const String & line)
// but replxx::Replxx::history_load() does not
// and that is why flock() is added here.
bool locked = false;
- if (flock(history_file_fd, LOCK_EX))
+ if (history_file_fd >= 0 && flock(history_file_fd, LOCK_EX))
rx.print("Lock of history file failed: %s\n", errnoToString().c_str());
else
locked = true;
@@ -507,7 +514,7 @@ void ReplxxLineReader::addToHistory(const String & line)
if (!rx.history_save(history_file_path))
rx.print("Saving history failed: %s\n", errnoToString().c_str());
- if (locked && 0 != flock(history_file_fd, LOCK_UN))
+ if (history_file_fd >= 0 && locked && 0 != flock(history_file_fd, LOCK_UN))
rx.print("Unlock of history file failed: %s\n", errnoToString().c_str());
}
diff --git a/src/Client/ReplxxLineReader.h b/src/Client/ReplxxLineReader.h
index 6ad149e38f2..c46080420ef 100644
--- a/src/Client/ReplxxLineReader.h
+++ b/src/Client/ReplxxLineReader.h
@@ -1,6 +1,7 @@
#pragma once
-#include "LineReader.h"
+#include
+#include
#include
namespace DB
@@ -9,14 +10,22 @@ namespace DB
class ReplxxLineReader : public LineReader
{
public:
- ReplxxLineReader(
+ ReplxxLineReader
+ (
Suggest & suggest,
const String & history_file_path,
bool multiline,
Patterns extenders_,
Patterns delimiters_,
const char word_break_characters_[],
- replxx::Replxx::highlighter_callback_t highlighter_);
+ replxx::Replxx::highlighter_callback_t highlighter_,
+ std::istream & input_stream_ = std::cin,
+ std::ostream & output_stream_ = std::cout,
+ int in_fd_ = STDIN_FILENO,
+ int out_fd_ = STDOUT_FILENO,
+ int err_fd_ = STDERR_FILENO
+ );
+
~ReplxxLineReader() override;
void enableBracketedPaste() override;
diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp
index 7b07c72824a..0b482cb09be 100644
--- a/src/Common/ProgressIndication.cpp
+++ b/src/Common/ProgressIndication.cpp
@@ -92,19 +92,19 @@ void ProgressIndication::writeFinalProgress()
if (progress.read_rows < 1000)
return;
- std::cout << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, "
+ output_stream << "Processed " << formatReadableQuantity(progress.read_rows) << " rows, "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes);
UInt64 elapsed_ns = getElapsedNanoseconds();
if (elapsed_ns)
- std::cout << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
+ output_stream << " (" << formatReadableQuantity(progress.read_rows * 1000000000.0 / elapsed_ns) << " rows/s., "
<< formatReadableSizeWithDecimalSuffix(progress.read_bytes * 1000000000.0 / elapsed_ns) << "/s.)";
else
- std::cout << ". ";
+ output_stream << ". ";
auto peak_memory_usage = getMemoryUsage().peak;
if (peak_memory_usage >= 0)
- std::cout << "\nPeak memory usage: " << formatReadableSizeWithBinarySuffix(peak_memory_usage) << ".";
+ output_stream << "\nPeak memory usage: " << formatReadableSizeWithBinarySuffix(peak_memory_usage) << ".";
}
void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
@@ -125,7 +125,7 @@ void ProgressIndication::writeProgress(WriteBufferFromFileDescriptor & message)
const char * indicator = indicators[increment % 8];
- size_t terminal_width = getTerminalWidth();
+ size_t terminal_width = getTerminalWidth(in_fd, err_fd);
if (!written_progress_chars)
{
diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h
index a9965785889..ae39fb49bcc 100644
--- a/src/Common/ProgressIndication.h
+++ b/src/Common/ProgressIndication.h
@@ -32,6 +32,19 @@ using HostToTimesMap = std::unordered_map;
class ProgressIndication
{
public:
+
+ explicit ProgressIndication
+ (
+ std::ostream & output_stream_ = std::cout,
+ int in_fd_ = STDIN_FILENO,
+ int err_fd_ = STDERR_FILENO
+ )
+ : output_stream(output_stream_),
+ in_fd(in_fd_),
+ err_fd(err_fd_)
+ {
+ }
+
/// Write progress bar.
void writeProgress(WriteBufferFromFileDescriptor & message);
void clearProgressOutput(WriteBufferFromFileDescriptor & message);
@@ -103,6 +116,10 @@ private:
/// - hosts_data/cpu_usage_meter (guarded with profile_events_mutex)
mutable std::mutex profile_events_mutex;
mutable std::mutex progress_mutex;
+
+ std::ostream & output_stream;
+ int in_fd;
+ int err_fd;
};
}
diff --git a/src/Common/TerminalSize.cpp b/src/Common/TerminalSize.cpp
index bc5b4474384..8139f4f7616 100644
--- a/src/Common/TerminalSize.cpp
+++ b/src/Common/TerminalSize.cpp
@@ -13,17 +13,17 @@ namespace DB::ErrorCodes
extern const int SYSTEM_ERROR;
}
-uint16_t getTerminalWidth()
+uint16_t getTerminalWidth(int in_fd, int err_fd)
{
struct winsize terminal_size {};
- if (isatty(STDIN_FILENO))
+ if (isatty(in_fd))
{
- if (ioctl(STDIN_FILENO, TIOCGWINSZ, &terminal_size))
+ if (ioctl(in_fd, TIOCGWINSZ, &terminal_size))
throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)");
}
- else if (isatty(STDERR_FILENO))
+ else if (isatty(err_fd))
{
- if (ioctl(STDERR_FILENO, TIOCGWINSZ, &terminal_size))
+ if (ioctl(err_fd, TIOCGWINSZ, &terminal_size))
throw DB::ErrnoException(DB::ErrorCodes::SYSTEM_ERROR, "Cannot obtain terminal window size (ioctl TIOCGWINSZ)");
}
/// Default - 0.
diff --git a/src/Common/TerminalSize.h b/src/Common/TerminalSize.h
index b5fc6de7921..f1334f2bcb9 100644
--- a/src/Common/TerminalSize.h
+++ b/src/Common/TerminalSize.h
@@ -1,16 +1,16 @@
#pragma once
#include
+#include
#include
namespace po = boost::program_options;
-uint16_t getTerminalWidth();
+uint16_t getTerminalWidth(int in_fd = STDIN_FILENO, int err_fd = STDERR_FILENO);
/** Creates po::options_description with name and an appropriate size for option displaying
* when program is called with option --help
* */
po::options_description createOptionsDescription(const std::string &caption, unsigned short terminal_width); /// NOLINT
-
diff --git a/src/Common/ZooKeeper/examples/CMakeLists.txt b/src/Common/ZooKeeper/examples/CMakeLists.txt
index 678b302a512..11669d765f7 100644
--- a/src/Common/ZooKeeper/examples/CMakeLists.txt
+++ b/src/Common/ZooKeeper/examples/CMakeLists.txt
@@ -1,15 +1,18 @@
clickhouse_add_executable(zkutil_test_commands zkutil_test_commands.cpp)
target_link_libraries(zkutil_test_commands PRIVATE
clickhouse_common_zookeeper_no_log
+ clickhouse_functions
dbms)
clickhouse_add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
target_link_libraries(zkutil_test_commands_new_lib PRIVATE
clickhouse_common_zookeeper_no_log
clickhouse_compression
+ clickhouse_functions
dbms)
clickhouse_add_executable(zkutil_test_async zkutil_test_async.cpp)
target_link_libraries(zkutil_test_async PRIVATE
clickhouse_common_zookeeper_no_log
+ clickhouse_functions
dbms)
diff --git a/src/Common/examples/CMakeLists.txt b/src/Common/examples/CMakeLists.txt
index 73e1396fb35..410576c2b4a 100644
--- a/src/Common/examples/CMakeLists.txt
+++ b/src/Common/examples/CMakeLists.txt
@@ -11,10 +11,10 @@ clickhouse_add_executable (small_table small_table.cpp)
target_link_libraries (small_table PRIVATE clickhouse_common_io)
clickhouse_add_executable (parallel_aggregation parallel_aggregation.cpp)
-target_link_libraries (parallel_aggregation PRIVATE dbms)
+target_link_libraries (parallel_aggregation PRIVATE dbms clickhouse_functions)
clickhouse_add_executable (parallel_aggregation2 parallel_aggregation2.cpp)
-target_link_libraries (parallel_aggregation2 PRIVATE dbms)
+target_link_libraries (parallel_aggregation2 PRIVATE dbms clickhouse_functions)
clickhouse_add_executable (int_hashes_perf int_hashes_perf.cpp)
target_link_libraries (int_hashes_perf PRIVATE clickhouse_common_io)
@@ -85,7 +85,7 @@ target_link_libraries (interval_tree PRIVATE dbms)
if (ENABLE_SSL)
clickhouse_add_executable (encrypt_decrypt encrypt_decrypt.cpp)
- target_link_libraries (encrypt_decrypt PRIVATE dbms)
+ target_link_libraries (encrypt_decrypt PRIVATE dbms clickhouse_functions)
endif()
clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp)
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index c6c7bd026e7..e37aa8a47a2 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -1093,6 +1093,7 @@ class IColumn;
M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \
M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \
M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \
+ M(Bool, input_format_json_ignore_key_case, false, "Ignore json key case while read json field from string", 0) \
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h
index d7cf4563b6d..fddf41172c2 100644
--- a/src/Core/SettingsChangesHistory.h
+++ b/src/Core/SettingsChangesHistory.h
@@ -117,6 +117,7 @@ static const std::map)
diff --git a/src/Functions/parseDateTime.cpp b/src/Functions/parseDateTime.cpp
index 11e210d2cc2..162b8c58873 100644
--- a/src/Functions/parseDateTime.cpp
+++ b/src/Functions/parseDateTime.cpp
@@ -978,8 +978,7 @@ namespace
[[nodiscard]]
static PosOrError mysqlAmericanDate(Pos cur, Pos end, const String & fragment, DateTime & date)
{
- if (auto status = checkSpace(cur, end, 8, "mysqlAmericanDate requires size >= 8", fragment))
- return tl::unexpected(status.error());
+ RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 8, "mysqlAmericanDate requires size >= 8", fragment))
Int32 month;
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumber2(cur, end, fragment, month)))
@@ -993,7 +992,7 @@ namespace
Int32 year;
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumber2(cur, end, fragment, year)))
- RETURN_ERROR_IF_FAILED(date.setYear(year))
+ RETURN_ERROR_IF_FAILED(date.setYear(year + 2000))
return cur;
}
@@ -1015,8 +1014,7 @@ namespace
[[nodiscard]]
static PosOrError mysqlISO8601Date(Pos cur, Pos end, const String & fragment, DateTime & date)
{
- if (auto status = checkSpace(cur, end, 10, "mysqlISO8601Date requires size >= 10", fragment))
- return tl::unexpected(status.error());
+ RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 10, "mysqlISO8601Date requires size >= 10", fragment))
Int32 year;
Int32 month;
@@ -1462,8 +1460,7 @@ namespace
[[nodiscard]]
static PosOrError jodaDayOfWeekText(size_t /*min_represent_digits*/, Pos cur, Pos end, const String & fragment, DateTime & date)
{
- if (auto result= checkSpace(cur, end, 3, "jodaDayOfWeekText requires size >= 3", fragment); !result.has_value())
- return tl::unexpected(result.error());
+ RETURN_ERROR_IF_FAILED(checkSpace(cur, end, 3, "jodaDayOfWeekText requires size >= 3", fragment))
String text1(cur, 3);
boost::to_lower(text1);
@@ -1556,8 +1553,8 @@ namespace
Int32 day_of_month;
ASSIGN_RESULT_OR_RETURN_ERROR(cur, (readNumberWithVariableLength(
cur, end, false, false, false, repetitions, std::max(repetitions, 2uz), fragment, day_of_month)))
- if (auto res = date.setDayOfMonth(day_of_month); !res.has_value())
- return tl::unexpected(res.error());
+ RETURN_ERROR_IF_FAILED(date.setDayOfMonth(day_of_month))
+
return cur;
}
diff --git a/src/Functions/tests/gtest_ternary_logic.cpp b/src/Functions/tests/gtest_ternary_logic.cpp
deleted file mode 100644
index 5ecafabb361..00000000000
--- a/src/Functions/tests/gtest_ternary_logic.cpp
+++ /dev/null
@@ -1,354 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// I know that inclusion of .cpp is not good at all
-#include // NOLINT
-
-using namespace DB;
-using TernaryValues = std::vector;
-
-struct LinearCongruentialGenerator
-{
- /// Constants from `man lrand48_r`.
- static constexpr UInt64 a = 0x5DEECE66D;
- static constexpr UInt64 c = 0xB;
-
- /// And this is from `head -c8 /dev/urandom | xxd -p`
- UInt64 current = 0x09826f4a081cee35ULL;
-
- UInt32 next()
- {
- current = current * a + c;
- return static_cast(current >> 16);
- }
-};
-
-void generateRandomTernaryValue(LinearCongruentialGenerator & gen, Ternary::ResultType * output, size_t size, double false_ratio, double null_ratio)
-{
- /// The LinearCongruentialGenerator generates nonnegative integers uniformly distributed over the interval [0, 2^32).
- /// See https://linux.die.net/man/3/nrand48
-
- double false_percentile = false_ratio;
- double null_percentile = false_ratio + null_ratio;
-
- false_percentile = false_percentile > 1 ? 1 : false_percentile;
- null_percentile = null_percentile > 1 ? 1 : null_percentile;
-
- UInt32 false_threshold = static_cast(static_cast(std::numeric_limits::max()) * false_percentile);
- UInt32 null_threshold = static_cast(static_cast(std::numeric_limits::max()) * null_percentile);
-
- for (Ternary::ResultType * end = output + size; output != end; ++output)
- {
- UInt32 val = gen.next();
- *output = val < false_threshold ? Ternary::False : (val < null_threshold ? Ternary::Null : Ternary::True);
- }
-}
-
-template
-ColumnPtr createColumnNullable(const Ternary::ResultType * ternary_values, size_t size)
-{
- auto nested_column = ColumnVector::create(size);
- auto null_map = ColumnUInt8::create(size);
- auto & nested_column_data = nested_column->getData();
- auto & null_map_data = null_map->getData();
-
- for (size_t i = 0; i < size; ++i)
- {
- if (ternary_values[i] == Ternary::Null)
- {
- null_map_data[i] = 1;
- nested_column_data[i] = 0;
- }
- else if (ternary_values[i] == Ternary::True)
- {
- null_map_data[i] = 0;
- nested_column_data[i] = 100;
- }
- else
- {
- null_map_data[i] = 0;
- nested_column_data[i] = 0;
- }
- }
-
- return ColumnNullable::create(std::move(nested_column), std::move(null_map));
-}
-
-template
-ColumnPtr createColumnVector(const Ternary::ResultType * ternary_values, size_t size)
-{
- auto column = ColumnVector::create(size);
- auto & column_data = column->getData();
-
- for (size_t i = 0; i < size; ++i)
- {
- if (ternary_values[i] == Ternary::True)
- {
- column_data[i] = 100;
- }
- else
- {
- column_data[i] = 0;
- }
- }
-
- return column;
-}
-
-template
-ColumnPtr createRandomColumn(LinearCongruentialGenerator & gen, TernaryValues & ternary_values)
-{
- size_t size = ternary_values.size();
- Ternary::ResultType * ternary_data = ternary_values.data();
-
- if constexpr (std::is_same_v)
- {
- generateRandomTernaryValue(gen, ternary_data, size, 0.3, 0.7);
- return createColumnNullable(ternary_data, size);
- }
- else if constexpr (std::is_same_v>)
- {
- generateRandomTernaryValue(gen, ternary_data, size, 0.5, 0);
- return createColumnVector(ternary_data, size);
- }
- else
- {
- auto nested_col = ColumnNothing::create(size);
- auto null_map = ColumnUInt8::create(size);
-
- memset(ternary_data, Ternary::Null, size);
-
- return ColumnNullable::create(std::move(nested_col), std::move(null_map));
- }
-}
-
-/* The truth table of ternary And and Or operations:
- * +-------+-------+---------+--------+
- * | a | b | a And b | a Or b |
- * +-------+-------+---------+--------+
- * | False | False | False | False |
- * | False | Null | False | Null |
- * | False | True | False | True |
- * | Null | False | False | Null |
- * | Null | Null | Null | Null |
- * | Null | True | Null | True |
- * | True | False | False | True |
- * | True | Null | Null | True |
- * | True | True | True | True |
- * +-------+-------+---------+--------+
- *
- * https://en.wikibooks.org/wiki/Structured_Query_Language/NULLs_and_the_Three_Valued_Logic
- */
-template
-bool testTernaryLogicTruthTable()
-{
- constexpr size_t size = 9;
-
- Ternary::ResultType col_a_ternary[] = {Ternary::False, Ternary::False, Ternary::False, Ternary::Null, Ternary::Null, Ternary::Null, Ternary::True, Ternary::True, Ternary::True};
- Ternary::ResultType col_b_ternary[] = {Ternary::False, Ternary::Null, Ternary::True, Ternary::False, Ternary::Null, Ternary::True,Ternary::False, Ternary::Null, Ternary::True};
- Ternary::ResultType and_expected_ternary[] = {Ternary::False, Ternary::False, Ternary::False, Ternary::False, Ternary::Null, Ternary::Null,Ternary::False, Ternary::Null, Ternary::True};
- Ternary::ResultType or_expected_ternary[] = {Ternary::False, Ternary::Null, Ternary::True, Ternary::Null, Ternary::Null, Ternary::True,Ternary::True, Ternary::True, Ternary::True};
- Ternary::ResultType * expected_ternary;
-
-
- if constexpr (std::is_same_v)
- {
- expected_ternary = and_expected_ternary;
- }
- else
- {
- expected_ternary = or_expected_ternary;
- }
-
- auto col_a = createColumnNullable(col_a_ternary, size);
- auto col_b = createColumnNullable(col_b_ternary, size);
- ColumnRawPtrs arguments = {col_a.get(), col_b.get()};
-
- auto col_res = ColumnUInt8::create(size);
- auto & col_res_data = col_res->getData();
-
- OperationApplier::apply(arguments, col_res->getData(), false);
-
- for (size_t i = 0; i < size; ++i)
- {
- if (col_res_data[i] != expected_ternary[i]) return false;
- }
-
- return true;
-}
-
-template
-bool testTernaryLogicOfTwoColumns(size_t size)
-{
- LinearCongruentialGenerator gen;
-
- TernaryValues left_column_ternary(size);
- TernaryValues right_column_ternary(size);
- TernaryValues expected_ternary(size);
-
- ColumnPtr left = createRandomColumn(gen, left_column_ternary);
- ColumnPtr right = createRandomColumn(gen, right_column_ternary);
-
- for (size_t i = 0; i < size; ++i)
- {
- /// Given that False is less than Null and Null is less than True, the And operation can be implemented
- /// with std::min, and the Or operation can be implemented with std::max.
- if constexpr (std::is_same_v)
- {
- expected_ternary[i] = std::min(left_column_ternary[i], right_column_ternary[i]);
- }
- else
- {
- expected_ternary[i] = std::max(left_column_ternary[i], right_column_ternary[i]);
- }
- }
-
- ColumnRawPtrs arguments = {left.get(), right.get()};
-
- auto col_res = ColumnUInt8::create(size);
- auto & col_res_data = col_res->getData();
-
- OperationApplier::apply(arguments, col_res->getData(), false);
-
- for (size_t i = 0; i < size; ++i)
- {
- if (col_res_data[i] != expected_ternary[i]) return false;
- }
-
- return true;
-}
-
-TEST(TernaryLogicTruthTable, NestedUInt8)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedUInt16)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedUInt32)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedUInt64)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedInt8)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedInt16)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedInt32)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedInt64)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedFloat32)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTruthTable, NestedFloat64)
-{
- bool test_1 = testTernaryLogicTruthTable();
- bool test_2 = testTernaryLogicTruthTable();
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, TwoNullable)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, TwoVector)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, TwoNothing)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, NullableVector)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, NullableNothing)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
-
-TEST(TernaryLogicTwoColumns, VectorNothing)
-{
- bool test_1 = testTernaryLogicOfTwoColumns(100 /*size*/);
- bool test_2 = testTernaryLogicOfTwoColumns(100 /*size*/);
- ASSERT_EQ(test_1, true);
- ASSERT_EQ(test_2, true);
-}
diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp
index 7f96c927d82..d832f568cb8 100644
--- a/src/Interpreters/ExpressionActions.cpp
+++ b/src/Interpreters/ExpressionActions.cpp
@@ -195,6 +195,10 @@ static void setLazyExecutionInfo(
}
lazy_execution_info.short_circuit_ancestors_info[parent].insert(indexes.begin(), indexes.end());
+ /// After checking arguments_with_disabled_lazy_execution, if there is no relation with parent,
+ /// disable the current node.
+ if (indexes.empty())
+ lazy_execution_info.can_be_lazy_executed = false;
}
else
/// If lazy execution is disabled for one of parents, we should disable it for current node.
@@ -292,9 +296,9 @@ static std::unordered_set processShortCircuitFunctions
/// Firstly, find all short-circuit functions and get their settings.
std::unordered_map short_circuit_nodes;
- IFunctionBase::ShortCircuitSettings short_circuit_settings;
for (const auto & node : nodes)
{
+ IFunctionBase::ShortCircuitSettings short_circuit_settings;
if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->isShortCircuit(short_circuit_settings, node.children.size()) && !node.children.empty())
short_circuit_nodes[&node] = short_circuit_settings;
}
diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp
index 7272e10b801..76b9c26eab5 100644
--- a/src/Interpreters/InterpreterCreateQuery.cpp
+++ b/src/Interpreters/InterpreterCreateQuery.cpp
@@ -898,6 +898,8 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
assert(as_database_saved.empty() && as_table_saved.empty());
std::swap(create.as_database, as_database_saved);
std::swap(create.as_table, as_table_saved);
+ if (!as_table_saved.empty())
+ create.is_create_empty = false;
return properties;
}
diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp
index 3e5c6a9d86e..d56a2724914 100644
--- a/src/Parsers/ASTCreateQuery.cpp
+++ b/src/Parsers/ASTCreateQuery.cpp
@@ -404,8 +404,18 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
<< quoteString(toString(to_inner_uuid));
}
+ bool should_add_empty = is_create_empty;
+ auto add_empty_if_needed = [&]
+ {
+ if (!should_add_empty)
+ return;
+ should_add_empty = false;
+ settings.ostr << (settings.hilite ? hilite_keyword : "") << " EMPTY" << (settings.hilite ? hilite_none : "");
+ };
+
if (!as_table.empty())
{
+ add_empty_if_needed();
settings.ostr
<< (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "")
<< (!as_database.empty() ? backQuoteIfNeed(as_database) + "." : "") << backQuoteIfNeed(as_table);
@@ -423,6 +433,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
frame.expression_list_always_start_on_new_line = false;
}
+ add_empty_if_needed();
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "");
as_table_function->formatImpl(settings, state, frame);
}
@@ -484,8 +495,8 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
if (is_populate)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " POPULATE" << (settings.hilite ? hilite_none : "");
- else if (is_create_empty)
- settings.ostr << (settings.hilite ? hilite_keyword : "") << " EMPTY" << (settings.hilite ? hilite_none : "");
+
+ add_empty_if_needed();
if (sql_security && supportSQLSecurity() && sql_security->as().type.has_value())
{
diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp
index 8855a1bc28d..be4e9430c34 100644
--- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp
+++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp
@@ -46,6 +46,15 @@ JSONEachRowRowInputFormat::JSONEachRowRowInputFormat(
{
const auto & header = getPort().getHeader();
name_map = header.getNamesToIndexesMap();
+ if (format_settings_.json.ignore_key_case)
+ {
+ for (auto & it : name_map)
+ {
+ StringRef key = it.first;
+ String lower_case_key = transformFieldNameToLowerCase(key);
+ lower_case_name_map[lower_case_key] = key;
+ }
+ }
if (format_settings_.import_nested_json)
{
for (size_t i = 0; i != header.columns(); ++i)
@@ -171,7 +180,15 @@ void JSONEachRowRowInputFormat::readJSONObject(MutableColumns & columns)
skipUnknownField(name_ref);
continue;
}
- const size_t column_index = columnIndex(name_ref, key_index);
+ size_t column_index = 0;
+ if (format_settings.json.ignore_key_case)
+ {
+ String lower_case_name = transformFieldNameToLowerCase(name_ref);
+ StringRef field_name_ref = lower_case_name_map[lower_case_name];
+ column_index = columnIndex(field_name_ref, key_index);
+ }
+ else
+ column_index = columnIndex(name_ref, key_index);
if (unlikely(ssize_t(column_index) < 0))
{
diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h
index d97aa2dad8d..8a1cef8fa9f 100644
--- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h
+++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h
@@ -55,7 +55,13 @@ private:
virtual void readRowStart(MutableColumns &) {}
virtual void skipRowStart() {}
-
+ String transformFieldNameToLowerCase(const StringRef & field_name)
+ {
+ String field_name_str = field_name.toString();
+ std::transform(field_name_str.begin(), field_name_str.end(), field_name_str.begin(),
+ [](unsigned char c) { return std::tolower(c); });
+ return field_name_str;
+ }
/// Buffer for the read from the stream field name. Used when you have to copy it.
/// Also, if processing of Nested data is in progress, it holds the common prefix
/// of the nested column names (so that appending the field name to it produces
@@ -74,7 +80,8 @@ private:
/// Hash table match `field name -> position in the block`. NOTE You can use perfect hash map.
Block::NameMap name_map;
-
+ /// Hash table match `lower_case field name -> field name in the block`.
+ std::unordered_map lower_case_name_map;
/// Cached search results for previous row (keyed as index in JSON object) - used as a hint.
std::vector prev_positions;
diff --git a/src/Storages/examples/CMakeLists.txt b/src/Storages/examples/CMakeLists.txt
index b4786b7313b..4f221efbd2b 100644
--- a/src/Storages/examples/CMakeLists.txt
+++ b/src/Storages/examples/CMakeLists.txt
@@ -5,4 +5,4 @@ clickhouse_add_executable (merge_selector2 merge_selector2.cpp)
target_link_libraries (merge_selector2 PRIVATE dbms)
clickhouse_add_executable (get_current_inserts_in_replicated get_current_inserts_in_replicated.cpp)
-target_link_libraries (get_current_inserts_in_replicated PRIVATE dbms clickhouse_common_config clickhouse_common_zookeeper)
+target_link_libraries (get_current_inserts_in_replicated PRIVATE dbms clickhouse_common_config clickhouse_common_zookeeper clickhouse_functions)
diff --git a/tests/config/install.sh b/tests/config/install.sh
index e04392d893b..08ee11a7407 100755
--- a/tests/config/install.sh
+++ b/tests/config/install.sh
@@ -183,13 +183,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then
fi
if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then
- if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
- echo "Azure configuration will not be added"
- else
- echo "Adding azure configuration"
- ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/
- fi
-
+ ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/
diff --git a/tests/integration/test_asynchronous_metric_jemalloc_profile_active/test.py b/tests/integration/test_asynchronous_metric_jemalloc_profile_active/test.py
index a8f4ab05888..b3769a61b3f 100644
--- a/tests/integration/test_asynchronous_metric_jemalloc_profile_active/test.py
+++ b/tests/integration/test_asynchronous_metric_jemalloc_profile_active/test.py
@@ -7,7 +7,6 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=["configs/asynchronous_metrics_update_period_s.xml"],
- env_variables={"MALLOC_CONF": "background_thread:true,prof:true"},
)
@@ -29,26 +28,11 @@ def test_asynchronous_metric_jemalloc_profile_active(started_cluster):
if node1.is_built_with_sanitizer():
pytest.skip("Disabled for sanitizers")
- res_o = node1.query(
+ res = node1.query(
"SELECT * FROM system.asynchronous_metrics WHERE metric ILIKE '%jemalloc.prof.active%' FORMAT Vertical;"
)
assert (
- res_o
- == """Row 1:
-──────
-metric: jemalloc.prof.active
-value: 1
-description: An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
-"""
- )
- # disable
- node1.query("SYSTEM JEMALLOC DISABLE PROFILE")
- time.sleep(5)
- res_t = node1.query(
- "SELECT * FROM system.asynchronous_metrics WHERE metric ILIKE '%jemalloc.prof.active%' FORMAT Vertical;"
- )
- assert (
- res_t
+ res
== """Row 1:
──────
metric: jemalloc.prof.active
@@ -58,16 +42,31 @@ description: An internal metric of the low-level memory allocator (jemalloc). Se
)
# enable
node1.query("SYSTEM JEMALLOC ENABLE PROFILE")
- time.sleep(5)
- res_f = node1.query(
+ node1.query("SYSTEM RELOAD ASYNCHRONOUS METRICS")
+ res = node1.query(
"SELECT * FROM system.asynchronous_metrics WHERE metric ILIKE '%jemalloc.prof.active%' FORMAT Vertical;"
)
assert (
- res_f
+ res
== """Row 1:
──────
metric: jemalloc.prof.active
value: 1
description: An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
+"""
+ )
+ # disable
+ node1.query("SYSTEM JEMALLOC DISABLE PROFILE")
+ node1.query("SYSTEM RELOAD ASYNCHRONOUS METRICS")
+ res = node1.query(
+ "SELECT * FROM system.asynchronous_metrics WHERE metric ILIKE '%jemalloc.prof.active%' FORMAT Vertical;"
+ )
+ assert (
+ res
+ == """Row 1:
+──────
+metric: jemalloc.prof.active
+value: 0
+description: An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html
"""
)
diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py
index f23384b5c04..60a6e099b22 100644
--- a/tests/integration/test_replicated_database/test.py
+++ b/tests/integration/test_replicated_database/test.py
@@ -337,8 +337,12 @@ def test_alter_attach(started_cluster, attachable_part, engine):
main_node.query(f"SELECT CounterID FROM {database}.alter_attach_test")
== "123\n"
)
+
# On the other node, data is replicated only if using a Replicated table engine
if engine == "ReplicatedMergeTree":
+ dummy_node.query(
+ f"SYSTEM SYNC REPLICA {database}.alter_attach_test LIGHTWEIGHT"
+ )
assert (
dummy_node.query(f"SELECT CounterID FROM {database}.alter_attach_test")
== "123\n"
diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
index 93b6d4de94f..6b5dd182112 100644
--- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
+++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
@@ -34,3 +34,21 @@ DOWNLOADED 0 79 80
DOWNLOADED 0 745 746
2
Expect no cache
+Using storage policy: azure_cache
+0
+Expect cache
+DOWNLOADED 0 0 1
+DOWNLOADED 0 79 80
+DOWNLOADED 0 745 746
+3
+Expect cache
+DOWNLOADED 0 0 1
+DOWNLOADED 0 79 80
+DOWNLOADED 0 745 746
+3
+Expect no cache
+Expect cache
+DOWNLOADED 0 79 80
+DOWNLOADED 0 745 746
+2
+Expect no cache
diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
index 9aa631c5d0a..57b8cec7864 100755
--- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
+++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
@@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-for STORAGE_POLICY in 's3_cache' 'local_cache'; do
+for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE"
${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE"
diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference
index 186dcc1eeb2..f53f00992e7 100644
--- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference
+++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.reference
@@ -218,3 +218,113 @@ SELECT count() FROM test_02241
5010500
SELECT count() FROM test_02241 WHERE value LIKE '%010%'
18816
+Using storage policy: azure_cache
+DROP TABLE IF EXISTS test_02241
+CREATE TABLE test_02241 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='azure_cache', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false, ratio_of_defaults_for_sparse_serialization = 1
+SYSTEM STOP MERGES test_02241
+SYSTEM DROP FILESYSTEM CACHE
+SELECT file_segment_range_begin, file_segment_range_end, size, state
+ FROM
+ (
+ SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
+ FROM
+ (
+ SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path
+ FROM system.remote_data_paths
+ ) AS data_paths
+ INNER JOIN
+ system.filesystem_cache AS caches
+ ON data_paths.cache_path = caches.cache_path
+ )
+ WHERE endsWith(local_path, 'data.bin')
+ FORMAT Vertical
+SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path
+0
+SELECT count(), sum(size) FROM system.filesystem_cache
+0 0
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)
+SELECT file_segment_range_begin, file_segment_range_end, size, state
+ FROM
+ (
+ SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
+ FROM
+ (
+ SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path
+ FROM system.remote_data_paths
+ ) AS data_paths
+ INNER JOIN
+ system.filesystem_cache AS caches
+ ON data_paths.cache_path = caches.cache_path
+ )
+ WHERE endsWith(local_path, 'data.bin')
+ FORMAT Vertical
+Row 1:
+──────
+file_segment_range_begin: 0
+file_segment_range_end: 745
+size: 746
+state: DOWNLOADED
+SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path
+8
+SELECT count(), sum(size) FROM system.filesystem_cache
+8 1100
+SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0
+0
+SELECT * FROM test_02241 FORMAT Null
+SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0
+2
+SELECT * FROM test_02241 FORMAT Null
+SELECT count() FROM system.filesystem_cache WHERE cache_hits > 0
+2
+SELECT count(), sum(size) size FROM system.filesystem_cache
+8 1100
+SYSTEM DROP FILESYSTEM CACHE
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100, 200)
+SELECT file_segment_range_begin, file_segment_range_end, size, state
+ FROM
+ (
+ SELECT file_segment_range_begin, file_segment_range_end, size, state, local_path
+ FROM
+ (
+ SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path
+ FROM system.remote_data_paths
+ ) AS data_paths
+ INNER JOIN
+ system.filesystem_cache AS caches
+ ON data_paths.cache_path = caches.cache_path
+ )
+ WHERE endsWith(local_path, 'data.bin')
+ FORMAT Vertical;
+Row 1:
+──────
+file_segment_range_begin: 0
+file_segment_range_end: 1659
+size: 1660
+state: DOWNLOADED
+SELECT count() FROM (SELECT arrayJoin(cache_paths) AS cache_path, local_path, remote_path FROM system.remote_data_paths ) AS data_paths INNER JOIN system.filesystem_cache AS caches ON data_paths.cache_path = caches.cache_path
+8
+SELECT count(), sum(size) FROM system.filesystem_cache
+8 2014
+SELECT count(), sum(size) FROM system.filesystem_cache
+8 2014
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100) SETTINGS enable_filesystem_cache_on_write_operations=0
+SELECT count(), sum(size) FROM system.filesystem_cache
+8 2014
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(100)
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(300, 10000)
+SELECT count(), sum(size) FROM system.filesystem_cache
+24 84045
+SYSTEM START MERGES test_02241
+OPTIMIZE TABLE test_02241 FINAL
+SELECT count(), sum(size) FROM system.filesystem_cache
+32 167243
+ALTER TABLE test_02241 UPDATE value = 'kek' WHERE key = 100
+SELECT count(), sum(size) FROM system.filesystem_cache
+41 250541
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000)
+SYSTEM FLUSH LOGS
+INSERT INTO test_02241 SELECT number, toString(number) FROM numbers(5000000) 0
+SELECT count() FROM test_02241
+5010500
+SELECT count() FROM test_02241 WHERE value LIKE '%010%'
+18816
diff --git a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh
index c1d930f54a7..1028fba76f5 100755
--- a/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh
+++ b/tests/queries/0_stateless/02241_filesystem_cache_on_write_operations.sh
@@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-for STORAGE_POLICY in 's3_cache' 'local_cache'; do
+for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --echo --query "DROP TABLE IF EXISTS test_02241"
diff --git a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference
index 99f31df7def..447e1a275fc 100644
--- a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference
+++ b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.reference
@@ -6,3 +6,7 @@ Using storage policy: local_cache
(0,519) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE
(0,808110) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE
(0,808110) READ_FROM_CACHE
+Using storage policy: azure_cache
+(0,519) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE
+(0,808110) READ_FROM_FS_AND_DOWNLOADED_TO_CACHE
+(0,808110) READ_FROM_CACHE
diff --git a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.sh b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.sh
index 4c92d1d2954..7a665d81eab 100755
--- a/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.sh
+++ b/tests/queries/0_stateless/02242_system_filesystem_cache_log_table.sh
@@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-for STORAGE_POLICY in 's3_cache' 'local_cache'; do
+for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE"
diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
index b4e5b6715de..e3875dbabe1 100644
--- a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
+++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
@@ -16,3 +16,12 @@ Using storage policy: local_cache
1
1
0
+Using storage policy: azure_cache
+0
+2
+0
+1
+1
+1
+1
+0
diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
index 1e1841862e9..a2c9352b7aa 100755
--- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
+++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
@@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-for STORAGE_POLICY in 's3_cache' 'local_cache'; do
+for STORAGE_POLICY in 's3_cache' 'local_cache' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_02286"
diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.reference b/tests/queries/0_stateless/02313_filesystem_cache_seeks.reference
index 062aac259a4..0a9e1c20b59 100644
--- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.reference
+++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.reference
@@ -1,3 +1,4 @@
Using storage policy: s3_cache
Using storage policy: local_cache
Using storage policy: s3_cache_multi
+Using storage policy: azure_cache
diff --git a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh
index f5de4346fd6..fbaec1ffaa7 100755
--- a/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh
+++ b/tests/queries/0_stateless/02313_filesystem_cache_seeks.sh
@@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
-for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi'; do
+for STORAGE_POLICY in 's3_cache' 'local_cache' 's3_cache_multi' 'azure_cache'; do
echo "Using storage policy: $STORAGE_POLICY"
$CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE"
diff --git a/tests/queries/0_stateless/03013_json_key_ignore_case.reference b/tests/queries/0_stateless/03013_json_key_ignore_case.reference
new file mode 100644
index 00000000000..54683d8fbc5
--- /dev/null
+++ b/tests/queries/0_stateless/03013_json_key_ignore_case.reference
@@ -0,0 +1,3 @@
+1 77328912 Ben
+2 77328913 Jim
+3 77328914 Bill
diff --git a/tests/queries/0_stateless/03013_json_key_ignore_case.sh b/tests/queries/0_stateless/03013_json_key_ignore_case.sh
new file mode 100755
index 00000000000..807e743b22a
--- /dev/null
+++ b/tests/queries/0_stateless/03013_json_key_ignore_case.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# NOTE: this sh wrapper is required because of shell_config
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+
+USER_FILES_PATH=$($CLICKHOUSE_CLIENT --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep -E '^Code: 107.*FILE_DOESNT_EXIST' | head -1 | awk '{gsub("/nonexist.txt","",$9); print $9}')
+
+cp "$CURDIR"/data_json/key_ignore_case.json $USER_FILES_PATH/
+
+$CLICKHOUSE_CLIENT -q "drop table if exists test_tbl"
+$CLICKHOUSE_CLIENT -q "create table test_tbl (id UInt16, reqid UInt32, name String) engine=MergeTree order by id"
+$CLICKHOUSE_CLIENT -q "INSERT INTO test_tbl SELECT * FROM file('key_ignore_case.json', 'JSONEachRow') SETTINGS input_format_json_ignore_key_case=true"
+$CLICKHOUSE_CLIENT -q "select * from test_tbl"
+$CLICKHOUSE_CLIENT -q "drop table test_tbl"
\ No newline at end of file
diff --git a/tests/queries/0_stateless/03071_fix_short_circuit_logic.reference b/tests/queries/0_stateless/03071_fix_short_circuit_logic.reference
new file mode 100644
index 00000000000..48aedfc3958
--- /dev/null
+++ b/tests/queries/0_stateless/03071_fix_short_circuit_logic.reference
@@ -0,0 +1 @@
+2024-01-02 16:54:59
diff --git a/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql b/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql
new file mode 100644
index 00000000000..7745bceca0b
--- /dev/null
+++ b/tests/queries/0_stateless/03071_fix_short_circuit_logic.sql
@@ -0,0 +1,62 @@
+
+
+CREATE FUNCTION IF NOT EXISTS unhexPrefixed AS value -> unhex(substring(value, 3));
+CREATE FUNCTION IF NOT EXISTS hex2bytes AS address -> CAST(unhexPrefixed(address), 'FixedString(20)');
+CREATE FUNCTION IF NOT EXISTS bytes2hex AS address -> concat('0x', lower(hex(address)));
+
+CREATE TABLE test
+(
+ `transfer_id` String,
+ `address` FixedString(20),
+ `value` UInt256,
+ `block_timestamp` DateTime('UTC'),
+ `token_address` FixedString(20)
+)
+ENGINE = MergeTree
+PARTITION BY toYYYYMM(block_timestamp)
+PRIMARY KEY (address, block_timestamp)
+ORDER BY (address, block_timestamp);
+
+INSERT INTO test SELECT 'token-transfer-0x758f1bbabb160683e1c80ed52dcd24a32b599d40edf1cec91b5f1199c0e392a2-56', hex2bytes('0xd387a6e4e84a6c86bd90c158c6028a58cc8ac459'), 3000000000000000000000, '2024-01-02 16:54:59', 'abc';
+
+CREATE TABLE token_data
+(
+ token_address_hex String,
+ chain String,
+ is_blacklisted Bool
+)
+ENGINE = TinyLog;
+
+INSERT INTO token_data SELECT bytes2hex('abc'), 'zksync', false;
+
+CREATE DICTIONARY token_data_map
+(
+ token_address_hex String,
+ chain String,
+ is_blacklisted Bool
+)
+PRIMARY KEY token_address_hex, chain
+SOURCE(Clickhouse(table token_data))
+LIFETIME(MIN 200 MAX 300)
+LAYOUT(COMPLEX_KEY_HASHED_ARRAY());
+
+SELECT block_timestamp
+FROM
+(
+ SELECT
+ block_timestamp,
+ bytes2hex(token_address) AS token_address_hex
+ FROM
+ (
+ SELECT
+ transfer_id,
+ address,
+ value,
+ block_timestamp,
+ token_address,
+ 'zksync' AS chain
+ FROM test
+ )
+ WHERE (address = hex2bytes('0xd387a6e4e84a6c86bd90c158c6028a58cc8ac459')) AND (transfer_id NOT LIKE 'gas%') AND (value > 0) AND (dictGetOrDefault(token_data_map, 'is_blacklisted', (token_address_hex, 'zksync'), true))
+)
+SETTINGS max_threads = 1, short_circuit_function_evaluation = 'enable', allow_experimental_analyzer = 0;
\ No newline at end of file
diff --git a/tests/queries/0_stateless/03094_one_thousand_joins.sql b/tests/queries/0_stateless/03094_one_thousand_joins.sql
index ea159f0e4c0..1f6bd99df7f 100644
--- a/tests/queries/0_stateless/03094_one_thousand_joins.sql
+++ b/tests/queries/0_stateless/03094_one_thousand_joins.sql
@@ -1,6 +1,7 @@
-- Tags: no-fasttest, no-tsan, long
-- (no-tsan because it has a small maximum stack size and the test would fail with TOO_DEEP_RECURSION)
+SET join_algorithm = 'default'; -- for 'full_sorting_merge' the query is 10x slower
SET allow_experimental_analyzer = 1; -- old analyzer returns TOO_DEEP_SUBQUERIES
-- Bug 33446, marked as 'long' because it still runs around 10 sec
diff --git a/tests/queries/0_stateless/03168_inconsistent_ast_formatting.sql b/tests/queries/0_stateless/03168_inconsistent_ast_formatting.sql
index d43d46d5b14..5333ea29ce7 100644
--- a/tests/queries/0_stateless/03168_inconsistent_ast_formatting.sql
+++ b/tests/queries/0_stateless/03168_inconsistent_ast_formatting.sql
@@ -1,4 +1,7 @@
create table a (x `Null`); -- { clientError SYNTAX_ERROR }
create table a (x f(`Null`)); -- { clientError SYNTAX_ERROR }
create table a (x Enum8(f(`Null`, 'World', 2))); -- { clientError SYNTAX_ERROR }
-create table a (`value2` Enum8('Hello' = 1, equals(`Null`, 'World', 2), '!' = 3)); -- { clientError SYNTAX_ERROR }
\ No newline at end of file
+create table a (`value2` Enum8('Hello' = 1, equals(`Null`, 'World', 2), '!' = 3)); -- { clientError SYNTAX_ERROR }
+
+create table a (x Int8) engine Memory;
+create table b empty as a;
diff --git a/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.reference b/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.reference
new file mode 100644
index 00000000000..bd9ab3be3fa
--- /dev/null
+++ b/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.reference
@@ -0,0 +1,2 @@
+2024-06-20 00:00:00
+2024-06-20 00:00:00
diff --git a/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.sql b/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.sql
new file mode 100644
index 00000000000..e83738f7214
--- /dev/null
+++ b/tests/queries/0_stateless/03197_fix_parse_mysql_iso_date.sql
@@ -0,0 +1,2 @@
+SELECT parseDateTime('2024-06-20', '%F', 'UTC') AS x;
+SELECT parseDateTime('06/20/24', '%D', 'UTC') AS x;
diff --git a/tests/queries/0_stateless/data_json/key_ignore_case.json b/tests/queries/0_stateless/data_json/key_ignore_case.json
new file mode 100644
index 00000000000..ad8f7cb4507
Binary files /dev/null and b/tests/queries/0_stateless/data_json/key_ignore_case.json differ
diff --git a/utils/check-style/check-style b/utils/check-style/check-style
index 722dfbcad16..380656cd1ca 100755
--- a/utils/check-style/check-style
+++ b/utils/check-style/check-style
@@ -322,10 +322,14 @@ std_cerr_cout_excludes=(
src/Client/LineReader.cpp
src/Client/QueryFuzzer.cpp
src/Client/Suggest.cpp
+ src/Client/ClientBase.h
+ src/Client/LineReader.h
+ src/Client/ReplxxLineReader.h
src/Bridge/IBridge.cpp
src/Daemon/BaseDaemon.cpp
src/Loggers/Loggers.cpp
src/Common/GWPAsan.cpp
+ src/Common/ProgressIndication.h
)
sources_with_std_cerr_cout=( $(
find $ROOT_PATH/{src,base} -name '*.h' -or -name '*.cpp' | \
diff --git a/utils/zookeeper-cli/CMakeLists.txt b/utils/zookeeper-cli/CMakeLists.txt
index cad7164b775..fd2fa669f40 100644
--- a/utils/zookeeper-cli/CMakeLists.txt
+++ b/utils/zookeeper-cli/CMakeLists.txt
@@ -3,4 +3,6 @@ clickhouse_add_executable(clickhouse-zookeeper-cli
${ClickHouse_SOURCE_DIR}/src/Client/LineReader.cpp)
target_link_libraries(clickhouse-zookeeper-cli PRIVATE
clickhouse_common_zookeeper_no_log
- dbms)
+ dbms
+ clickhouse_functions
+)
diff --git a/utils/zookeeper-dump-tree/CMakeLists.txt b/utils/zookeeper-dump-tree/CMakeLists.txt
index 85e4d18c19f..3f3df65776a 100644
--- a/utils/zookeeper-dump-tree/CMakeLists.txt
+++ b/utils/zookeeper-dump-tree/CMakeLists.txt
@@ -3,4 +3,5 @@ target_link_libraries(zookeeper-dump-tree PRIVATE
clickhouse_common_zookeeper_no_log
clickhouse_common_io
dbms
+ clickhouse_functions
boost::program_options)
diff --git a/utils/zookeeper-remove-by-list/CMakeLists.txt b/utils/zookeeper-remove-by-list/CMakeLists.txt
index 50aaed76110..a4d7dccef65 100644
--- a/utils/zookeeper-remove-by-list/CMakeLists.txt
+++ b/utils/zookeeper-remove-by-list/CMakeLists.txt
@@ -2,4 +2,5 @@ clickhouse_add_executable (zookeeper-remove-by-list main.cpp ${SRCS})
target_link_libraries(zookeeper-remove-by-list PRIVATE
clickhouse_common_zookeeper_no_log
dbms
+ clickhouse_functions
boost::program_options)