Merge branch 'master' into hanfei/gwp-asan

This commit is contained in:
Han Fei 2023-01-31 15:01:25 +01:00 committed by GitHub
commit 716518f143
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
70 changed files with 918 additions and 154 deletions

View File

@ -460,8 +460,15 @@ set(ICUI18N_SOURCES
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM) enable_language(ASM)
if (ARCH_S390X)
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70b_dat.S" )
else()
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" )
endif()
set(ICUDATA_SOURCES set(ICUDATA_SOURCES
"${ICUDATA_SOURCE_DIR}/icudt70l_dat.S" "${ICUDATA_SOURCE_FILE}"
"${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC
) )

2
contrib/icudata vendored

@ -1 +1 @@
Subproject commit 72d9a4a7febc904e2b0a534ccb25ae40fac5f1e5 Subproject commit c8e717892a557b4d2852317c7d628aacc0a0e5ab

View File

@ -6,14 +6,14 @@ sidebar_label: Query Result Cache [experimental]
# Query Result Cache [experimental] # Query Result Cache [experimental]
The query result cache allows to compute SELECT queries just once and to serve further executions of the same query directly from the cache. The query result cache allows to compute `SELECT` queries just once and to serve further executions of the same query directly from the
Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server. cache. Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
## Background, Design and Limitations ## Background, Design and Limitations
Query result caches can generally be viewed as transactionally consistent or inconsistent. Query result caches can generally be viewed as transactionally consistent or inconsistent.
- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the SELECT query changes - In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the `SELECT` query changes
or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing
merges. Transactionally consistent caching is especially suitable for OLTP databases, for example merges. Transactionally consistent caching is especially suitable for OLTP databases, for example
[MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and [MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and
@ -22,7 +22,7 @@ Query result caches can generally be viewed as transactionally consistent or inc
assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period. assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period.
This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient, This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient,
consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically
slowly enough that the database only needs to compute the report once (represented by the first SELECT query). Further queries can be slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be
served directly from the query result cache. In this example, a reasonable validity period could be 30 min. served directly from the query result cache. In this example, a reasonable validity period could be 30 min.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
@ -36,32 +36,45 @@ processing) where wrong results are returned.
## Configuration Settings and Usage ## Configuration Settings and Usage
Parameter [enable_experimental_query_result_cache](settings/settings.md#enable-experimental-query-result-cache) controls whether query As long as the result cache is experimental it must be activated using the following configuration setting:
results are inserted into / retrieved from the cache for the current query or session. For example, the first execution of query
``` sql ```sql
SELECT some_expensive_calculation(column_1, column_2) SET allow_experimental_query_result_cache = true;
FROM table
SETTINGS enable_experimental_query_result_cache = true;
``` ```
stores the query result into the query result cache. Subsequent executions of the same query (also with parameter Afterwards, setting [use_query_result_cache](settings/settings.md#use-query-result-cache) can be used to control whether a specific query or
`enable_experimental_query_result_cache = true`) will read the computed result directly from the cache. all queries of the current session should utilize the query result cache. For example, the first execution of query
Sometimes, it is desirable to use the query result cache only passively, i.e. to allow reading from it but not writing into it (if the cache ```sql
result is not stored yet). Parameter [enable_experimental_query_result_cache_passive_usage](settings/settings.md#enable-experimental-query-result-cache-passive-usage) SELECT some_expensive_calculation(column_1, column_2)
instead of 'enable_experimental_query_result_cache' can be used for that. FROM table
SETTINGS use_query_result_cache = true;
```
For maximum control, it is generally recommended to provide settings "enable_experimental_query_result_cache" or will store the query result in the query result cache. Subsequent executions of the same query (also with parameter `use_query_result_cache
"enable_experimental_query_result_cache_passive_usage" only with specific queries. It is also possible to enable caching at user or profile = true`) will read the computed result from the cache and return it immediately.
level but one should keep in mind that all SELECT queries may return a cached results, including monitoring or debugging queries to system
tables. The way the cache is utilized can be configured in more detail using settings [enable_writes_to_query_result_cache](settings/settings.md#enable-writes-to-query-result-cache)
and [enable_reads_from_query_result_cache](settings/settings.md#enable-reads-from-query-result-cache) (both `true` by default). The first
settings controls whether query results are stored in the cache, whereas the second parameter determines if the database should try to
retrieve query results from the cache. For example, the following query will use the cache only passively, i.e. attempt to read from it but
not store its result in it:
```sql
SELECT some_expensive_calculation(column_1, column_2)
FROM table
SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;
```
For maximum control, it is generally recommended to provide settings "use_query_result_cache", "enable_writes_to_query_result_cache" and
"enable_reads_from_query_result_cache" only with specific queries. It is also possible to enable caching at user or profile level (e.g. via
`SET use_query_result_cache = true`) but one should keep in mind that all `SELECT` queries including monitoring or debugging queries to
system tables may return cached results then.
The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed
in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and
"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for SELECT queries which run with settings "QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for `SELECT` queries which run with setting
"enable_experimental_query_result_cache = true" or "enable_experimental_query_result_cache_passive_usage = true". Other queries do not "use_query_result_cache = true". Other queries do not affect the cache miss counter.
affect the cache miss counter.
The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can
be changed (see below) but doing so is not recommended for security reasons. be changed (see below) but doing so is not recommended for security reasons.
@ -81,7 +94,7 @@ To define how long a query must run at least such that its result can be cached,
``` sql ``` sql
SELECT some_expensive_calculation(column_1, column_2) SELECT some_expensive_calculation(column_1, column_2)
FROM table FROM table
SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_duration = 5000; SETTINGS use_query_result_cache = true, query_result_cache_min_query_duration = 5000;
``` ```
is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is

View File

@ -1301,27 +1301,39 @@ Possible values:
Default value: `3`. Default value: `3`.
## enable_experimental_query_result_cache {#enable-experimental-query-result-cache} ## use_query_result_cache {#use-query-result-cache}
If turned on, results of SELECT queries are stored in and (if available) retrieved from the [query result cache](../query-result-cache.md). If turned on, SELECT queries may utilize the [query result cache](../query-result-cache.md). Parameters [enable_reads_from_query_result_cache](#enable-reads-from-query-result-cache)
and [enable_writes_to_query_result_cache](#enable-writes-to-query-result-cache) control in more detail how the cache is used.
Possible values:
- 0 - Yes
- 1 - No
Default value: `0`.
## enable_reads_from_query_result_cache {#enable-reads-from-query-result-cache}
If turned on, results of SELECT queries are retrieved from the [query result cache](../query-result-cache.md).
Possible values: Possible values:
- 0 - Disabled - 0 - Disabled
- 1 - Enabled - 1 - Enabled
Default value: `0`. Default value: `1`.
## enable_experimental_query_result_cache_passive_usage {#enable-experimental-query-result-cache-passive-usage} ## enable_writes_to_query_result_cache {#enable-writes-to-query-result-cache}
If turned on, results of SELECT queries are (if available) retrieved from the [query result cache](../query-result-cache.md). If turned on, results of SELECT queries are stored in the [query result cache](../query-result-cache.md).
Possible values: Possible values:
- 0 - Disabled - 0 - Disabled
- 1 - Enabled - 1 - Enabled
Default value: `0`. Default value: `1`.
## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions} ## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions}

View File

@ -95,6 +95,32 @@ Result:
└───────────────────────────────┘ └───────────────────────────────┘
``` ```
If argument `needle` is empty the following rules apply:
- if no `start_pos` was specified: return `1`
- if `start_pos = 0`: return `1`
- if `start_pos >= 1` and `start_pos <= length(haystack) + 1`: return `start_pos`
- otherwise: return `0`
The same rules also apply to functions `positionCaseInsensitive`, `positionUTF8` and `positionCaseInsensitiveUTF8`
``` sql
SELECT
position('abc', ''),
position('abc', '', 0),
position('abc', '', 1),
position('abc', '', 2),
position('abc', '', 3),
position('abc', '', 4),
position('abc', '', 5)
```
``` text
┌─position('abc', '')─┬─position('abc', '', 0)─┬─position('abc', '', 1)─┬─position('abc', '', 2)─┬─position('abc', '', 3)─┬─position('abc', '', 4)─┬─position('abc', '', 5)─┐
│ 1 │ 1 │ 1 │ 2 │ 3 │ 4 │ 0 │
└─────────────────────┴────────────────────────┴────────────────────────┴────────────────────────┴────────────────────────┴────────────────────────┴────────────────────────┘
```
**Examples for POSITION(needle IN haystack) syntax** **Examples for POSITION(needle IN haystack) syntax**
Query: Query:

View File

@ -1,3 +1,2 @@
build
__pycache__ __pycache__
*.pyc *.pyc

View File

@ -127,6 +127,69 @@ void Client::showWarnings()
} }
} }
void Client::parseConnectionsCredentials()
{
/// It is not possible to correctly handle multiple --host --port options.
if (hosts_and_ports.size() >= 2)
return;
String host;
std::optional<UInt16> port;
if (hosts_and_ports.empty())
{
host = config().getString("host", "localhost");
if (config().has("port"))
port = config().getInt("port");
}
else
{
host = hosts_and_ports.front().host;
port = hosts_and_ports.front().port;
}
Strings keys;
config().keys("connections_credentials", keys);
for (const auto & connection : keys)
{
const String & prefix = "connections_credentials." + connection;
const String & connection_name = config().getString(prefix + ".name", "");
if (connection_name != host)
continue;
String connection_hostname;
if (config().has(prefix + ".hostname"))
connection_hostname = config().getString(prefix + ".hostname");
else
connection_hostname = connection_name;
/// Set "host" unconditionally (since it is used as a "name"), while
/// other options only if they are not set yet (config.xml/cli
/// options).
config().setString("host", connection_hostname);
if (!hosts_and_ports.empty())
hosts_and_ports.front().host = connection_hostname;
if (config().has(prefix + ".port") && !port.has_value())
config().setInt("port", config().getInt(prefix + ".port"));
if (config().has(prefix + ".secure") && !config().has("secure"))
config().setBool("secure", config().getBool(prefix + ".secure"));
if (config().has(prefix + ".user") && !config().has("user"))
config().setString("user", config().getString(prefix + ".user"));
if (config().has(prefix + ".password") && !config().has("password"))
config().setString("password", config().getString(prefix + ".password"));
if (config().has(prefix + ".database") && !config().has("database"))
config().setString("database", config().getString(prefix + ".database"));
if (config().has(prefix + ".history_file") && !config().has("history_file"))
{
String history_file = config().getString(prefix + ".history_file");
if (history_file.starts_with("~") && !home_path.empty())
history_file = home_path + "/" + history_file.substr(1);
config().setString("history_file", history_file);
}
}
}
/// Make query to get all server warnings /// Make query to get all server warnings
std::vector<String> Client::loadWarningMessages() std::vector<String> Client::loadWarningMessages()
{ {
@ -216,6 +279,8 @@ void Client::initialize(Poco::Util::Application & self)
if (env_password) if (env_password)
config().setString("password", env_password); config().setString("password", env_password);
parseConnectionsCredentials();
// global_context->setApplicationType(Context::ApplicationType::CLIENT); // global_context->setApplicationType(Context::ApplicationType::CLIENT);
global_context->setQueryParameters(query_parameters); global_context->setQueryParameters(query_parameters);

View File

@ -47,6 +47,7 @@ protected:
private: private:
void printChangedSettings() const; void printChangedSettings() const;
void showWarnings(); void showWarnings();
void parseConnectionsCredentials();
std::vector<String> loadWarningMessages(); std::vector<String> loadWarningMessages();
}; };
} }

View File

@ -57,4 +57,28 @@
The same can be done on user-level configuration, just create & adjust: ~/.clickhouse-client/config.xml The same can be done on user-level configuration, just create & adjust: ~/.clickhouse-client/config.xml
--> -->
<!-- Analog of .netrc -->
<![CDATA[
<connections_credentials>
<connection>
<!-- Name of the connection, host option for the client.
"host" is not the same as "hostname" since you may want to have different settings for one host,
and in this case you can add "prod" and "prod_readonly".
Default: "hostname" will be used. -->
<name>default</name>
<!-- Host that will be used for connection. -->
<hostname>127.0.0.1</hostname>
<port>9000</port>
<secure>1</secure>
<user>default</user>
<password></password>
<database></database>
<!-- '~' is expanded to HOME, like in any shell -->
<history_file></history_file>
</connection>
</connections_credentials>
]]>
</config> </config>

View File

@ -61,13 +61,18 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
return; return;
} }
bool use_connection_pooling = params.getParsed<bool>("use_connection_pooling", true);
try try
{ {
std::string connection_string = params.get("connection_string"); std::string connection_string = params.get("connection_string");
auto connection = ODBCPooledConnectionFactory::instance().get( nanodbc::ConnectionHolderPtr connection;
validateODBCConnectionString(connection_string), if (use_connection_pooling)
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); connection = ODBCPooledConnectionFactory::instance().get(
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
else
connection = std::make_shared<nanodbc::ConnectionHolder>(validateODBCConnectionString(connection_string));
auto identifier = getIdentifierQuote(std::move(connection)); auto identifier = getIdentifierQuote(std::move(connection));

View File

@ -102,7 +102,9 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
std::string format = params.get("format", "RowBinary"); std::string format = params.get("format", "RowBinary");
std::string connection_string = params.get("connection_string"); std::string connection_string = params.get("connection_string");
bool use_connection_pooling = params.getParsed<bool>("use_connection_pooling", true);
LOG_TRACE(log, "Connection string: '{}'", connection_string); LOG_TRACE(log, "Connection string: '{}'", connection_string);
LOG_TRACE(log, "Use pooling: {}", use_connection_pooling);
UInt64 max_block_size = DEFAULT_BLOCK_SIZE; UInt64 max_block_size = DEFAULT_BLOCK_SIZE;
if (params.has("max_block_size")) if (params.has("max_block_size"))
@ -134,7 +136,7 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
try try
{ {
nanodbc::ConnectionHolderPtr connection_handler; nanodbc::ConnectionHolderPtr connection_handler;
if (getContext()->getSettingsRef().odbc_bridge_use_connection_pooling) if (use_connection_pooling)
connection_handler = ODBCPooledConnectionFactory::instance().get( connection_handler = ODBCPooledConnectionFactory::instance().get(
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size); validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
else else

View File

@ -70,13 +70,19 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
return; return;
} }
bool use_connection_pooling = params.getParsed<bool>("use_connection_pooling", true);
try try
{ {
std::string connection_string = params.get("connection_string"); std::string connection_string = params.get("connection_string");
auto connection = ODBCPooledConnectionFactory::instance().get( nanodbc::ConnectionHolderPtr connection;
validateODBCConnectionString(connection_string),
getContext()->getSettingsRef().odbc_bridge_connection_pool_size); if (use_connection_pooling)
connection = ODBCPooledConnectionFactory::instance().get(
validateODBCConnectionString(connection_string), getContext()->getSettingsRef().odbc_bridge_connection_pool_size);
else
connection = std::make_shared<nanodbc::ConnectionHolder>(validateODBCConnectionString(connection_string));
bool result = isSchemaAllowed(std::move(connection)); bool result = isSchemaAllowed(std::move(connection));

View File

@ -1043,7 +1043,7 @@ std::shared_ptr<IArchiveWriter> BackupImpl::getArchiveWriter(const String & suff
String archive_name_with_suffix = getArchiveNameWithSuffix(suffix); String archive_name_with_suffix = getArchiveNameWithSuffix(suffix);
auto new_archive_writer = createArchiveWriter(archive_params.archive_name, writer->writeFile(archive_name_with_suffix)); auto new_archive_writer = createArchiveWriter(archive_params.archive_name, writer->writeFile(archive_name_with_suffix));
new_archive_writer->setPassword(archive_params.password); new_archive_writer->setPassword(archive_params.password);
new_archive_writer->setCompression(archive_params.compression_method, archive_params.compression_level);
size_t pos = suffix.empty() ? 0 : 1; size_t pos = suffix.empty() ? 0 : 1;
archive_writers[pos] = {suffix, new_archive_writer}; archive_writers[pos] = {suffix, new_archive_writer};

View File

@ -63,10 +63,12 @@ public:
XDBCBridgeHelper( XDBCBridgeHelper(
ContextPtr context_, ContextPtr context_,
Poco::Timespan http_timeout_, Poco::Timespan http_timeout_,
const std::string & connection_string_) const std::string & connection_string_,
bool use_connection_pooling_)
: IXDBCBridgeHelper(context_->getGlobalContext()) : IXDBCBridgeHelper(context_->getGlobalContext())
, log(&Poco::Logger::get(BridgeHelperMixin::getName() + "BridgeHelper")) , log(&Poco::Logger::get(BridgeHelperMixin::getName() + "BridgeHelper"))
, connection_string(connection_string_) , connection_string(connection_string_)
, use_connection_pooling(use_connection_pooling_)
, http_timeout(http_timeout_) , http_timeout(http_timeout_)
, config(context_->getGlobalContext()->getConfigRef()) , config(context_->getGlobalContext()->getConfigRef())
{ {
@ -132,6 +134,7 @@ protected:
uri.setHost(bridge_host); uri.setHost(bridge_host);
uri.setPort(bridge_port); uri.setPort(bridge_port);
uri.setScheme("http"); uri.setScheme("http");
uri.addQueryParameter("use_connection_pooling", toString(use_connection_pooling));
return uri; return uri;
} }
@ -146,6 +149,7 @@ private:
Poco::Logger * log; Poco::Logger * log;
std::string connection_string; std::string connection_string;
bool use_connection_pooling;
Poco::Timespan http_timeout; Poco::Timespan http_timeout;
std::string bridge_host; std::string bridge_host;
size_t bridge_port; size_t bridge_port;
@ -189,6 +193,7 @@ protected:
uri.setPath(SCHEMA_ALLOWED_HANDLER); uri.setPath(SCHEMA_ALLOWED_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION)); uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString()); uri.addQueryParameter("connection_string", getConnectionString());
uri.addQueryParameter("use_connection_pooling", toString(use_connection_pooling));
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials); ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
@ -210,6 +215,7 @@ protected:
uri.setPath(IDENTIFIER_QUOTE_HANDLER); uri.setPath(IDENTIFIER_QUOTE_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION)); uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString()); uri.addQueryParameter("connection_string", getConnectionString());
uri.addQueryParameter("use_connection_pooling", toString(use_connection_pooling));
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials); ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);

View File

@ -28,6 +28,7 @@ protected:
bool isCompression() const override { return false; } bool isCompression() const override { return false; }
bool isGenericCompression() const override { return false; } bool isGenericCompression() const override { return false; }
bool isDeltaCompression() const override { return true; }
private: private:
const UInt8 delta_bytes_size; const UInt8 delta_bytes_size;

View File

@ -133,6 +133,7 @@ protected:
bool isCompression() const override { return true; } bool isCompression() const override { return true; }
bool isGenericCompression() const override { return false; } bool isGenericCompression() const override { return false; }
bool isDeltaCompression() const override { return true; }
private: private:
UInt8 data_bytes_size; UInt8 data_bytes_size;

View File

@ -85,8 +85,9 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
bool with_compression_codec = false; bool with_compression_codec = false;
bool with_none_codec = false; bool with_none_codec = false;
bool with_floating_point_timeseries_codec = false; std::optional<size_t> first_generic_compression_codec_pos;
std::optional<size_t> generic_compression_codec_pos; std::optional<size_t> first_delta_codec_pos;
std::optional<size_t> last_floating_point_time_series_codec_pos;
std::set<size_t> encryption_codecs_pos; std::set<size_t> encryption_codecs_pos;
bool can_substitute_codec_arguments = true; bool can_substitute_codec_arguments = true;
@ -163,10 +164,15 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
with_compression_codec |= result_codec->isCompression(); with_compression_codec |= result_codec->isCompression();
with_none_codec |= result_codec->isNone(); with_none_codec |= result_codec->isNone();
with_floating_point_timeseries_codec |= result_codec->isFloatingPointTimeSeriesCodec();
if (!generic_compression_codec_pos && result_codec->isGenericCompression()) if (result_codec->isGenericCompression() && !first_generic_compression_codec_pos.has_value())
generic_compression_codec_pos = i; first_generic_compression_codec_pos = i;
if (result_codec->isDeltaCompression() && !first_delta_codec_pos.has_value())
first_delta_codec_pos = i;
if (result_codec->isFloatingPointTimeSeriesCodec())
last_floating_point_time_series_codec_pos = i;
if (result_codec->isEncryption()) if (result_codec->isEncryption())
encryption_codecs_pos.insert(i); encryption_codecs_pos.insert(i);
@ -178,44 +184,55 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
{ {
if (codecs_descriptions->children.size() > 1 && with_none_codec) if (codecs_descriptions->children.size() > 1 && with_none_codec)
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"It does not make sense to have codec NONE along with other compression codecs: {}. " "It does not make sense to have codec NONE along with other compression codecs: {}. "
"(Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", "(Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
codec_description); codec_description);
/// Allow to explicitly specify single NONE codec if user don't want any compression. /// Allow to explicitly specify single NONE codec if user don't want any compression.
/// But applying other transformations solely without compression (e.g. Delta) does not make sense. /// But applying other transformations solely without compression (e.g. Delta) does not make sense.
/// It's okay to apply encryption codecs solely without anything else. /// It's okay to apply encryption codecs solely without anything else.
if (!with_compression_codec && !with_none_codec && encryption_codecs_pos.size() != codecs_descriptions->children.size()) if (!with_compression_codec && !with_none_codec && encryption_codecs_pos.size() != codecs_descriptions->children.size())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec {} does not compress anything. " throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Compression codec {} does not compress anything. "
"You may want to add generic compression algorithm after other transformations, like: {}, LZ4. " "You may want to add generic compression algorithm after other transformations, like: {}, LZ4. "
"(Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", "(Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
codec_description, codec_description); codec_description, codec_description);
/// It does not make sense to apply any non-encryption codecs /// It does not make sense to apply any non-encryption codecs after encryption one.
/// after encryption one.
if (!encryption_codecs_pos.empty() && if (!encryption_codecs_pos.empty() &&
*encryption_codecs_pos.begin() != codecs_descriptions->children.size() - encryption_codecs_pos.size()) *encryption_codecs_pos.begin() != codecs_descriptions->children.size() - encryption_codecs_pos.size())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The combination of compression codecs {} is meaningless, " throw Exception(ErrorCodes::BAD_ARGUMENTS,
"because it does not make sense to apply any non-post-processing codecs after " "The combination of compression codecs {} is meaningless, "
"post-processing ones. (Note: you can enable setting 'allow_suspicious_codecs' " "because it does not make sense to apply any non-post-processing codecs after "
"to skip this check).", codec_description); "post-processing ones. (Note: you can enable setting 'allow_suspicious_codecs' "
"to skip this check).", codec_description);
/// Floating-point time series codecs are not supposed to compress non-floating-point data /// Floating-point time series codecs are not supposed to compress non-floating-point data
if (with_floating_point_timeseries_codec && if (last_floating_point_time_series_codec_pos.has_value()
column_type && !innerDataTypeIsFloat(column_type)) && column_type && !innerDataTypeIsFloat(column_type))
throw Exception(ErrorCodes::BAD_ARGUMENTS, throw Exception(ErrorCodes::BAD_ARGUMENTS,
"The combination of compression codecs {} is meaningless," "The combination of compression codecs {} is meaningless,"
" because it does not make sense to apply a floating-point time series codec to non-floating-point columns" " because it does not make sense to apply a floating-point time series codec to non-floating-point columns"
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", codec_description); " (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", codec_description);
/// Floating-point time series codecs usually do implicit delta compression (or something equivalent), and makes no sense to run
/// delta compression manually.
if (first_delta_codec_pos.has_value() && last_floating_point_time_series_codec_pos.has_value()
&& (*first_delta_codec_pos < *last_floating_point_time_series_codec_pos))
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"The combination of compression codecs {} is meaningless,"
" because floating point time series codecs do delta compression implicitly by themselves."
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", codec_description);
/// It does not make sense to apply any transformations after generic compression algorithm /// It does not make sense to apply any transformations after generic compression algorithm
/// So, generic compression can be only one and only at the end. /// So, generic compression can be only one and only at the end.
if (generic_compression_codec_pos && if (first_generic_compression_codec_pos &&
*generic_compression_codec_pos != codecs_descriptions->children.size() - 1 - encryption_codecs_pos.size()) *first_generic_compression_codec_pos != codecs_descriptions->children.size() - 1 - encryption_codecs_pos.size())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The combination of compression codecs {} is meaningless, " throw Exception(ErrorCodes::BAD_ARGUMENTS,
"because it does not make sense to apply any transformations after generic " "The combination of compression codecs {} is meaningless, "
"compression algorithm. (Note: you can enable setting 'allow_suspicious_codecs' " "because it does not make sense to apply any transformations after generic "
"to skip this check).", codec_description); "compression algorithm. (Note: you can enable setting 'allow_suspicious_codecs' "
"to skip this check).", codec_description);
} }

View File

@ -102,6 +102,9 @@ public:
/// If it is a specialized codec for floating-point time series. Applying it to non-floating point data is suspicious. /// If it is a specialized codec for floating-point time series. Applying it to non-floating point data is suspicious.
virtual bool isFloatingPointTimeSeriesCodec() const { return false; } virtual bool isFloatingPointTimeSeriesCodec() const { return false; }
/// If the codec's purpose is to calculate deltas between consecutive values.
virtual bool isDeltaCompression() const { return false; }
/// It is a codec available only for evaluation purposes and not meant to be used in production. /// It is a codec available only for evaluation purposes and not meant to be used in production.
/// It will not be allowed to use unless the user will turn off the safety switch. /// It will not be allowed to use unless the user will turn off the safety switch.
virtual bool isExperimental() const { return false; } virtual bool isExperimental() const { return false; }

View File

@ -540,6 +540,15 @@ class IColumn;
M(Bool, describe_extend_object_types, false, "Deduce concrete type of columns of type Object in DESCRIBE query", 0) \ M(Bool, describe_extend_object_types, false, "Deduce concrete type of columns of type Object in DESCRIBE query", 0) \
M(Bool, describe_include_subcolumns, false, "If true, subcolumns of all table columns will be included into result of DESCRIBE query", 0) \ M(Bool, describe_include_subcolumns, false, "If true, subcolumns of all table columns will be included into result of DESCRIBE query", 0) \
\ \
M(Bool, use_query_result_cache, false, "Enable the query result cache", 0) \
M(Bool, enable_writes_to_query_result_cache, true, "Enable storing results of SELECT queries in the query result cache", 0) \
M(Bool, enable_reads_from_query_result_cache, true, "Enable reading results of SELECT queries from the query result cache", 0) \
M(Bool, query_result_cache_store_results_of_queries_with_nondeterministic_functions, false, "Store results of queries with non-deterministic functions (e.g. rand(), now()) in the query result cache", 0) \
M(UInt64, query_result_cache_min_query_runs, 0, "Minimum number a SELECT query must run before its result is stored in the query result cache", 0) \
M(Milliseconds, query_result_cache_min_query_duration, 0, "Minimum time in milliseconds for a query to run for its result to be stored in the query result cache.", 0) \
M(Seconds, query_result_cache_ttl, 60, "After this time in seconds entries in the query result cache become stale", 0) \
M(Bool, query_result_cache_share_between_users, false, "Allow other users to read entry in the query result cache", 0) \
\
M(Bool, optimize_rewrite_sum_if_to_count_if, false, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \ M(Bool, optimize_rewrite_sum_if_to_count_if, false, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \ M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
\ \
@ -660,6 +669,7 @@ class IColumn;
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \ M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \ M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions (hashid, etc)", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(Bool, allow_experimental_query_result_cache, false, "Enable experimental query result cache", 0) \
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \ M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \ M(String, ann_index_select_query_params, "", "Parameters passed to ANN indexes in SELECT queries, the format is 'param1=x, param2=y, ...'", 0) \
M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \ M(UInt64, max_limit_for_ann_queries, 1000000, "Maximum limit value for using ANN indexes is used to prevent memory overflow in search queries for indexes", 0) \
@ -675,13 +685,6 @@ class IColumn;
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \ M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \ M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \ M(Bool, optimize_sorting_by_input_stream_properties, true, "Optimize sorting by sorting properties of input stream", 0) \
M(Bool, enable_experimental_query_result_cache, false, "Store and retrieve results of SELECT queries in/from the query result cache", 0) \
M(Bool, enable_experimental_query_result_cache_passive_usage, false, "Retrieve results of SELECT queries from the query result cache", 0) \
M(Bool, query_result_cache_store_results_of_queries_with_nondeterministic_functions, false, "Store results of queries with non-deterministic functions (e.g. rand(), now()) in the query result cache", 0) \
M(UInt64, query_result_cache_min_query_runs, 0, "Minimum number a SELECT query must run before its result is stored in the query result cache", 0) \
M(Milliseconds, query_result_cache_min_query_duration, 0, "Minimum time in milliseconds for a query to run for its result to be stored in the query result cache.", 0) \
M(Seconds, query_result_cache_ttl, 60, "After this time in seconds entries in the query result cache become stale", 0) \
M(Bool, query_result_cache_share_between_users, false, "Allow other users to read entry in the query result cache", 0) \
M(UInt64, insert_keeper_max_retries, 0, "Max retries for keeper operations during insert", 0) \ M(UInt64, insert_keeper_max_retries, 0, "Max retries for keeper operations during insert", 0) \
M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \ M(UInt64, insert_keeper_retry_initial_backoff_ms, 100, "Initial backoff timeout for keeper operations during insert", 0) \
M(UInt64, insert_keeper_retry_max_backoff_ms, 10000, "Max backoff timeout for keeper operations during insert", 0) \ M(UInt64, insert_keeper_retry_max_backoff_ms, 10000, "Max backoff timeout for keeper operations during insert", 0) \

View File

@ -233,7 +233,11 @@ void registerDictionarySourceXDBC(DictionarySourceFactory & factory)
bool /* check_config */) -> DictionarySourcePtr { bool /* check_config */) -> DictionarySourcePtr {
#if USE_ODBC #if USE_ODBC
BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>( BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(
global_context, global_context->getSettings().http_receive_timeout, config.getString(config_prefix + ".odbc.connection_string")); global_context,
global_context->getSettings().http_receive_timeout,
config.getString(config_prefix + ".odbc.connection_string"),
config.getBool(config_prefix + ".settings.odbc_bridge_use_connection_pooling",
global_context->getSettingsRef().odbc_bridge_use_connection_pooling));
std::string settings_config_prefix = config_prefix + ".odbc"; std::string settings_config_prefix = config_prefix + ".odbc";

View File

@ -294,7 +294,9 @@ struct RemoveRecursiveObjectStorageOperation final : public IDiskObjectStorageOp
void execute(MetadataTransactionPtr tx) override void execute(MetadataTransactionPtr tx) override
{ {
removeMetadataRecursive(tx, path); /// Similar to DiskLocal and https://en.cppreference.com/w/cpp/filesystem/remove
if (metadata_storage.exists(path))
removeMetadataRecursive(tx, path);
} }
void undo() override void undo() override

View File

@ -115,6 +115,16 @@ struct CRCFunctionWrapper
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", std::string(Impl::name)); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", std::string(Impl::name));
} }
[[noreturn]] static void ipv6(const ColumnIPv6::Container & /*offsets*/, size_t /*n*/, PaddedPODArray<ReturnType> & /*res*/)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv6 argument", std::string(Impl::name));
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container & /*offsets*/, size_t /*n*/, PaddedPODArray<ReturnType> & /*res*/)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv4 argument", std::string(Impl::name));
}
private: private:
static ReturnType doCRC(const ColumnString::Chars & buf, size_t offset, size_t size) static ReturnType doCRC(const ColumnString::Chars & buf, size_t offset, size_t size)
{ {

View File

@ -61,6 +61,18 @@ struct EmptyImpl
for (size_t i = 0; i < n; ++i) for (size_t i = 0; i < n; ++i)
res[i] = negative ^ (container[i].toUnderType() == 0); res[i] = negative ^ (container[i].toUnderType() == 0);
} }
static void ipv6(const ColumnIPv6::Container & container, size_t n, PaddedPODArray<UInt8> & res)
{
for (size_t i = 0; i < n; ++i)
res[i] = negative ^ (container[i].toUnderType() == 0);
}
static void ipv4(const ColumnIPv4::Container & container, size_t n, PaddedPODArray<UInt8> & res)
{
for (size_t i = 0; i < n; ++i)
res[i] = negative ^ (container[i].toUnderType() == 0);
}
}; };
} }

View File

@ -52,7 +52,9 @@ public:
if (!isStringOrFixedString(arguments[0]) if (!isStringOrFixedString(arguments[0])
&& !isArray(arguments[0]) && !isArray(arguments[0])
&& !isMap(arguments[0]) && !isMap(arguments[0])
&& !isUUID(arguments[0])) && !isUUID(arguments[0])
&& !isIPv6(arguments[0])
&& !isIPv4(arguments[0]))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName());
return std::make_shared<DataTypeNumber<ResultType>>(); return std::make_shared<DataTypeNumber<ResultType>>();
@ -121,6 +123,22 @@ public:
Impl::uuid(col_uuid->getData(), input_rows_count, vec_res); Impl::uuid(col_uuid->getData(), input_rows_count, vec_res);
return col_res; return col_res;
} }
else if (const ColumnIPv6 * col_ipv6 = checkAndGetColumn<ColumnIPv6>(column.get()))
{
auto col_res = ColumnVector<ResultType>::create();
typename ColumnVector<ResultType>::Container & vec_res = col_res->getData();
vec_res.resize(col_ipv6->size());
Impl::ipv6(col_ipv6->getData(), input_rows_count, vec_res);
return col_res;
}
else if (const ColumnIPv4 * col_ipv4 = checkAndGetColumn<ColumnIPv4>(column.get()))
{
auto col_res = ColumnVector<ResultType>::create();
typename ColumnVector<ResultType>::Container & vec_res = col_res->getData();
vec_res.resize(col_ipv4->size());
Impl::ipv4(col_ipv4->getData(), input_rows_count, vec_res);
return col_res;
}
else else
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}", throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of argument of function {}",
arguments[0].column->getName(), getName()); arguments[0].column->getName(), getName());

View File

@ -113,7 +113,7 @@ struct PositionCaseSensitiveUTF8
static const char * advancePos(const char * pos, const char * end, size_t n) static const char * advancePos(const char * pos, const char * end, size_t n)
{ {
for (auto it = pos; it != end; ++it) for (const auto *it = pos; it != end; ++it)
{ {
if (!UTF8::isContinuationOctet(static_cast<UInt8>(*it))) if (!UTF8::isContinuationOctet(static_cast<UInt8>(*it)))
{ {
@ -128,7 +128,7 @@ struct PositionCaseSensitiveUTF8
static size_t countChars(const char * begin, const char * end) static size_t countChars(const char * begin, const char * end)
{ {
size_t res = 0; size_t res = 0;
for (auto it = begin; it != end; ++it) for (const auto *it = begin; it != end; ++it)
if (!UTF8::isContinuationOctet(static_cast<UInt8>(*it))) if (!UTF8::isContinuationOctet(static_cast<UInt8>(*it)))
++res; ++res;
return res; return res;
@ -202,6 +202,53 @@ struct PositionImpl
const UInt8 * const end = haystack_data.data() + haystack_data.size(); const UInt8 * const end = haystack_data.data() + haystack_data.size();
const UInt8 * pos = begin; const UInt8 * pos = begin;
/// Fast path when needle is empty
if (needle.empty())
{
/// Needle is empty and start_pos doesn't exist --> always return 1
if (start_pos == nullptr)
{
for (auto & r : res)
r = 1;
return;
}
ColumnString::Offset prev_offset = 0;
size_t rows = haystack_offsets.size();
if (const ColumnConst * start_pos_const = typeid_cast<const ColumnConst *>(&*start_pos))
{
/// Needle is empty and start_pos is constant
UInt64 start = std::max(start_pos_const->getUInt(0), static_cast<UInt64>(1));
for (size_t i = 0; i < rows; ++i)
{
size_t haystack_size = Impl::countChars(
reinterpret_cast<const char *>(pos), reinterpret_cast<const char *>(pos + haystack_offsets[i] - prev_offset - 1));
res[i] = (start <= haystack_size + 1) ? start : 0;
pos = begin + haystack_offsets[i];
prev_offset = haystack_offsets[i];
}
return;
}
else
{
/// Needle is empty and start_pos is not constant
for (size_t i = 0; i < rows; ++i)
{
size_t haystack_size = Impl::countChars(
reinterpret_cast<const char *>(pos), reinterpret_cast<const char *>(pos + haystack_offsets[i] - prev_offset - 1));
UInt64 start = start_pos->getUInt(i);
start = std::max(static_cast<UInt64>(1), start);
res[i] = (start <= haystack_size + 1) ? start : 0;
pos = begin + haystack_offsets[i];
prev_offset = haystack_offsets[i];
}
return;
}
}
/// Current index in the array of strings. /// Current index in the array of strings.
size_t i = 0; size_t i = 0;
@ -253,7 +300,7 @@ struct PositionImpl
{ {
auto start = std::max(start_pos, UInt64(1)); auto start = std::max(start_pos, UInt64(1));
if (needle.size() == 0) if (needle.empty())
{ {
size_t haystack_size = Impl::countChars(data.data(), data.data() + data.size()); size_t haystack_size = Impl::countChars(data.data(), data.data() + data.size());
res = start <= haystack_size + 1 ? start : 0; res = start <= haystack_size + 1 ? start : 0;

View File

@ -43,6 +43,16 @@ struct LengthImpl
{ {
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to UUID argument"); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to UUID argument");
} }
[[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray<UInt64> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv6 argument");
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray<UInt64> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function length to IPv4 argument");
}
}; };

View File

@ -60,6 +60,16 @@ struct AsciiImpl
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", AsciiName::name); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to UUID argument", AsciiName::name);
} }
[[noreturn]] static void ipv6(const ColumnIPv6::Container & /*offsets*/, size_t /*n*/, PaddedPODArray<ReturnType> & /*res*/)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv6 argument", AsciiName::name);
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container & /*offsets*/, size_t /*n*/, PaddedPODArray<ReturnType> & /*res*/)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function {} to IPv4 argument", AsciiName::name);
}
private: private:
static Int32 doAscii(const ColumnString::Chars & buf, size_t offset, size_t size) static Int32 doAscii(const ColumnString::Chars & buf, size_t offset, size_t size)
{ {

View File

@ -248,6 +248,16 @@ SOFTWARE.
{ {
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to UUID argument"); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to UUID argument");
} }
[[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray<UInt8> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to IPv6 argument");
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray<UInt8> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function isValidUTF8 to IPv4 argument");
}
}; };
struct NameIsValidUTF8 struct NameIsValidUTF8

View File

@ -58,6 +58,16 @@ struct LengthUTF8Impl
{ {
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to UUID argument"); throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to UUID argument");
} }
[[noreturn]] static void ipv6(const ColumnIPv6::Container &, size_t &, PaddedPODArray<UInt64> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to IPv6 argument");
}
[[noreturn]] static void ipv4(const ColumnIPv4::Container &, size_t &, PaddedPODArray<UInt64> &)
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot apply function lengthUTF8 to IPv4 argument");
}
}; };
struct NameLengthUTF8 struct NameLengthUTF8

View File

@ -79,8 +79,9 @@ public:
auto is_query_result_cache_related_setting = [](const auto & change) auto is_query_result_cache_related_setting = [](const auto & change)
{ {
return change.name.starts_with("enable_experimental_query_result_cache") return change.name == "allow_experimental_query_result_cache"
|| change.name.starts_with("query_result_cache"); || change.name.starts_with("query_result_cache")
|| change.name.ends_with("query_result_cache");
}; };
std::erase_if(set_clause->changes, is_query_result_cache_related_setting); std::erase_if(set_clause->changes, is_query_result_cache_related_setting);
@ -88,7 +89,7 @@ public:
} }
/// TODO further improve AST cleanup, e.g. remove SETTINGS clause completely if it is empty /// TODO further improve AST cleanup, e.g. remove SETTINGS clause completely if it is empty
/// E.g. SELECT 1 SETTINGS enable_experimental_query_result_cache = true /// E.g. SELECT 1 SETTINGS use_query_result_cache = true
/// and SELECT 1; /// and SELECT 1;
/// currently don't match. /// currently don't match.
}; };
@ -96,11 +97,12 @@ public:
using RemoveQueryResultCacheSettingsVisitor = InDepthNodeVisitor<RemoveQueryResultCacheSettingsMatcher, true>; using RemoveQueryResultCacheSettingsVisitor = InDepthNodeVisitor<RemoveQueryResultCacheSettingsMatcher, true>;
/// Consider /// Consider
/// (1) SET enable_experimental_query_result_cache = true; /// (1) SET use_query_result_cache = true;
/// SELECT expensiveComputation(...) SETTINGS max_threads = 64, query_result_cache_ttl = 300; /// SELECT expensiveComputation(...) SETTINGS max_threads = 64, query_result_cache_ttl = 300;
/// SET enable_experimental_query_result_cache = false; /// SET use_query_result_cache = false;
/// and /// and
/// (2) SELECT expensiveComputation(...) SETTINGS max_threads = 64, enable_experimental_query_result_cache_passive_usage = true; /// (2) SELECT expensiveComputation(...) SETTINGS max_threads = 64, use_query_result_cache = true;
///
/// The SELECT queries in (1) and (2) are basically the same and the user expects that the second invocation is served from the query result /// The SELECT queries in (1) and (2) are basically the same and the user expects that the second invocation is served from the query result
/// cache. However, query results are indexed by their query ASTs and therefore no result will be found. Insert and retrieval behave overall /// cache. However, query results are indexed by their query ASTs and therefore no result will be found. Insert and retrieval behave overall
/// more natural if settings related to the query result cache are erased from the AST key. Note that at this point the settings themselves /// more natural if settings related to the query result cache are erased from the AST key. Note that at this point the settings themselves

View File

@ -714,11 +714,15 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
res = interpreter->execute(); res = interpreter->execute();
/// Try to read (SELECT) query result from query result cache (if it is enabled) /// If
/// - it is a SELECT query,
/// - passive (read) use of the query result cache is enabled, and
/// - the query result cache knows the query result
/// then replace the pipeline by a new pipeline with a single source that is populated from the query result cache
auto query_result_cache = context->getQueryResultCache(); auto query_result_cache = context->getQueryResultCache();
bool read_result_from_query_result_cache = false; /// a query must not read from *and* write to the query result cache at the same time bool read_result_from_query_result_cache = false; /// a query must not read from *and* write to the query result cache at the same time
if (query_result_cache != nullptr if (query_result_cache != nullptr
&& (settings.enable_experimental_query_result_cache || settings.enable_experimental_query_result_cache_passive_usage) && (settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_reads_from_query_result_cache)
&& res.pipeline.pulling()) && res.pipeline.pulling())
{ {
QueryResultCache::Key key( QueryResultCache::Key key(
@ -733,10 +737,13 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
} }
} }
/// Try to write (SELECT) query result into query result cache (if it is enabled) /// If
/// - it is a SELECT query, and
/// - active (write) use of the query result cache is enabled
/// then add a processor on top of the pipeline which stores the result in the query result cache.
if (!read_result_from_query_result_cache if (!read_result_from_query_result_cache
&& query_result_cache != nullptr && query_result_cache != nullptr
&& settings.enable_experimental_query_result_cache && settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_writes_to_query_result_cache
&& res.pipeline.pulling() && res.pipeline.pulling()
&& (!astContainsNonDeterministicFunctions(ast, context) || settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions)) && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions))
{ {
@ -901,7 +908,9 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
auto finish_callback = [elem, auto finish_callback = [elem,
context, context,
ast, ast,
enable_experimental_query_result_cache = settings.enable_experimental_query_result_cache, allow_experimental_query_result_cache = settings.allow_experimental_query_result_cache,
use_query_result_cache = settings.use_query_result_cache,
enable_writes_to_query_result_cache = settings.enable_writes_to_query_result_cache,
query_result_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions, query_result_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions,
log_queries, log_queries,
log_queries_min_type = settings.log_queries_min_type, log_queries_min_type = settings.log_queries_min_type,
@ -912,11 +921,12 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
pulling_pipeline = pipeline.pulling(), pulling_pipeline = pipeline.pulling(),
query_span](QueryPipeline & query_pipeline) mutable query_span](QueryPipeline & query_pipeline) mutable
{ {
/// Write query result into query result cache (if enabled) /// If active (write) use of the query result cache is enabled and the query is eligible for result caching, then store the
/// query result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache.
auto query_result_cache = context->getQueryResultCache(); auto query_result_cache = context->getQueryResultCache();
if (query_result_cache != nullptr if (query_result_cache != nullptr
&& pulling_pipeline && pulling_pipeline
&& enable_experimental_query_result_cache && allow_experimental_query_result_cache && use_query_result_cache && enable_writes_to_query_result_cache
&& (!astContainsNonDeterministicFunctions(ast, context) || query_result_cache_store_results_of_queries_with_nondeterministic_functions)) && (!astContainsNonDeterministicFunctions(ast, context) || query_result_cache_store_results_of_queries_with_nondeterministic_functions))
{ {
query_pipeline.finalizeWriteInQueryResultCache(); query_pipeline.finalizeWriteInQueryResultCache();

View File

@ -2613,6 +2613,14 @@ void MergeTreeData::dropAllData()
if (disk->isBroken()) if (disk->isBroken())
continue; continue;
/// It can naturally happen if we cannot drop table from the first time
/// i.e. get exceptions after remove recursive
if (!disk->exists(relative_data_path))
{
LOG_INFO(log, "dropAllData: path {} is already removed from disk {}", relative_data_path, disk->getName());
continue;
}
LOG_INFO(log, "dropAllData: remove format_version.txt, detached, moving and write ahead logs"); LOG_INFO(log, "dropAllData: remove format_version.txt, detached, moving and write ahead logs");
disk->removeFileIfExists(fs::path(relative_data_path) / FORMAT_VERSION_FILE_NAME); disk->removeFileIfExists(fs::path(relative_data_path) / FORMAT_VERSION_FILE_NAME);

View File

@ -67,11 +67,14 @@ void ReplicatedMergeTreePartCheckThread::enqueuePart(const String & name, time_t
task->schedule(); task->schedule();
} }
void ReplicatedMergeTreePartCheckThread::cancelRemovedPartsCheck(const MergeTreePartInfo & drop_range_info) std::unique_lock<std::mutex> ReplicatedMergeTreePartCheckThread::pausePartsCheck()
{ {
/// Wait for running tasks to finish and temporarily stop checking /// Wait for running tasks to finish and temporarily stop checking
auto pause_checking_parts = task->getExecLock(); return task->getExecLock();
}
void ReplicatedMergeTreePartCheckThread::cancelRemovedPartsCheck(const MergeTreePartInfo & drop_range_info)
{
std::lock_guard lock(parts_mutex); std::lock_guard lock(parts_mutex);
for (auto it = parts_queue.begin(); it != parts_queue.end();) for (auto it = parts_queue.begin(); it != parts_queue.end();)
{ {

View File

@ -71,6 +71,9 @@ public:
/// Check part by name /// Check part by name
CheckResult checkPart(const String & part_name); CheckResult checkPart(const String & part_name);
std::unique_lock<std::mutex> pausePartsCheck();
/// Can be called only while holding a lock returned from pausePartsCheck()
void cancelRemovedPartsCheck(const MergeTreePartInfo & drop_range_info); void cancelRemovedPartsCheck(const MergeTreePartInfo & drop_range_info);
private: private:

View File

@ -1156,7 +1156,15 @@ void StorageDistributed::drop()
auto disks = data_volume->getDisks(); auto disks = data_volume->getDisks();
for (const auto & disk : disks) for (const auto & disk : disks)
{
if (!disk->exists(relative_data_path))
{
LOG_INFO(log, "Path {} is already removed from disk {}", relative_data_path, disk->getName());
continue;
}
disk->removeRecursive(relative_data_path); disk->removeRecursive(relative_data_path);
}
LOG_DEBUG(log, "Removed"); LOG_DEBUG(log, "Removed");
} }

View File

@ -89,7 +89,11 @@ void StorageJoin::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPt
std::lock_guard mutate_lock(mutate_mutex); std::lock_guard mutate_lock(mutate_mutex);
TableLockHolder holder = tryLockTimedWithContext(rwlock, RWLockImpl::Write, context); TableLockHolder holder = tryLockTimedWithContext(rwlock, RWLockImpl::Write, context);
disk->removeRecursive(path); if (disk->exists(path))
disk->removeRecursive(path);
else
LOG_INFO(&Poco::Logger::get("StorageJoin"), "Path {} is already removed from disk {}", path, disk->getName());
disk->createDirectories(path); disk->createDirectories(path);
disk->createDirectories(path + "tmp/"); disk->createDirectories(path + "tmp/");

View File

@ -1880,8 +1880,11 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry)
auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version);
getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range_info.partition_id, drop_range_info.max_block); getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range_info.partition_id, drop_range_info.max_block);
queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry, /* fetch_entry_znode= */ {}); {
part_check_thread.cancelRemovedPartsCheck(drop_range_info); auto pause_checking_parts = part_check_thread.pausePartsCheck();
queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry, /* fetch_entry_znode= */ {});
part_check_thread.cancelRemovedPartsCheck(drop_range_info);
}
/// Delete the parts contained in the range to be deleted. /// Delete the parts contained in the range to be deleted.
/// It's important that no old parts remain (after the merge), because otherwise, /// It's important that no old parts remain (after the merge), because otherwise,
@ -1957,6 +1960,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
if (replace) if (replace)
{ {
getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range.partition_id, drop_range.max_block); getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range.partition_id, drop_range.max_block);
auto pause_checking_parts = part_check_thread.pausePartsCheck();
queue.removePartProducingOpsInRange(getZooKeeper(), drop_range, entry, /* fetch_entry_znode= */ {}); queue.removePartProducingOpsInRange(getZooKeeper(), drop_range, entry, /* fetch_entry_znode= */ {});
part_check_thread.cancelRemovedPartsCheck(drop_range); part_check_thread.cancelRemovedPartsCheck(drop_range);
} }
@ -7781,6 +7785,9 @@ StorageReplicatedMergeTree::LogEntryPtr StorageReplicatedMergeTree::dropAllParts
void StorageReplicatedMergeTree::enqueuePartForCheck(const String & part_name, time_t delay_to_check_seconds) void StorageReplicatedMergeTree::enqueuePartForCheck(const String & part_name, time_t delay_to_check_seconds)
{ {
MergeTreePartInfo covering_drop_range; MergeTreePartInfo covering_drop_range;
/// NOTE This check is just an optimization, it's not reliable for two reasons:
/// (1) drop entry could be removed concurrently and (2) it does not take REPLACE_RANGE into account.
/// See also ReplicatedMergeTreePartCheckThread::cancelRemovedPartsCheck
if (queue.hasDropRange(MergeTreePartInfo::fromPartName(part_name, format_version), &covering_drop_range)) if (queue.hasDropRange(MergeTreePartInfo::fromPartName(part_name, format_version), &covering_drop_range))
{ {
LOG_WARNING(log, "Do not enqueue part {} for check because it's covered by DROP_RANGE {} and going to be removed", LOG_WARNING(log, "Do not enqueue part {} for check because it's covered by DROP_RANGE {} and going to be removed",

View File

@ -162,7 +162,11 @@ std::optional<UInt64> StorageSet::totalBytes(const Settings &) const { return se
void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &)
{ {
disk->removeRecursive(path); if (disk->exists(path))
disk->removeRecursive(path);
else
LOG_INFO(&Poco::Logger::get("StorageSet"), "Path {} is already removed from disk {}", path, disk->getName());
disk->createDirectories(path); disk->createDirectories(path);
disk->createDirectories(fs::path(path) / "tmp/"); disk->createDirectories(fs::path(path) / "tmp/");

View File

@ -173,7 +173,8 @@ namespace
BridgeHelperPtr bridge_helper = std::make_shared<XDBCBridgeHelper<BridgeHelperMixin>>(args.getContext(), BridgeHelperPtr bridge_helper = std::make_shared<XDBCBridgeHelper<BridgeHelperMixin>>(args.getContext(),
args.getContext()->getSettingsRef().http_receive_timeout.value, args.getContext()->getSettingsRef().http_receive_timeout.value,
checkAndGetLiteralArgument<String>(engine_args[0], "connection_string")); checkAndGetLiteralArgument<String>(engine_args[0], "connection_string"),
args.getContext()->getSettingsRef().odbc_bridge_use_connection_pooling.value);
return std::make_shared<StorageXDBC>( return std::make_shared<StorageXDBC>(
args.table_id, args.table_id,
checkAndGetLiteralArgument<String>(engine_args[1], "database_name"), checkAndGetLiteralArgument<String>(engine_args[1], "database_name"),

View File

@ -56,7 +56,7 @@ void ITableFunctionXDBC::startBridgeIfNot(ContextPtr context) const
{ {
if (!helper) if (!helper)
{ {
helper = createBridgeHelper(context, context->getSettingsRef().http_receive_timeout.value, connection_string); helper = createBridgeHelper(context, context->getSettingsRef().http_receive_timeout.value, connection_string, context->getSettingsRef().odbc_bridge_use_connection_pooling.value);
helper->startBridgeSync(); helper->startBridgeSync();
} }
} }

View File

@ -21,7 +21,8 @@ private:
/* A factory method to create bridge helper, that will assist in remote interaction */ /* A factory method to create bridge helper, that will assist in remote interaction */
virtual BridgeHelperPtr createBridgeHelper(ContextPtr context, virtual BridgeHelperPtr createBridgeHelper(ContextPtr context,
Poco::Timespan http_timeout_, Poco::Timespan http_timeout_,
const std::string & connection_string_) const = 0; const std::string & connection_string_,
bool use_connection_pooling_) const = 0;
ColumnsDescription getActualTableStructure(ContextPtr context) const override; ColumnsDescription getActualTableStructure(ContextPtr context) const override;
@ -47,9 +48,10 @@ public:
private: private:
BridgeHelperPtr createBridgeHelper(ContextPtr context, BridgeHelperPtr createBridgeHelper(ContextPtr context,
Poco::Timespan http_timeout_, Poco::Timespan http_timeout_,
const std::string & connection_string_) const override const std::string & connection_string_,
bool use_connection_pooling_) const override
{ {
return std::make_shared<XDBCBridgeHelper<JDBCBridgeMixin>>(context, http_timeout_, connection_string_); return std::make_shared<XDBCBridgeHelper<JDBCBridgeMixin>>(context, http_timeout_, connection_string_, use_connection_pooling_);
} }
const char * getStorageTypeName() const override { return "JDBC"; } const char * getStorageTypeName() const override { return "JDBC"; }
@ -67,9 +69,10 @@ public:
private: private:
BridgeHelperPtr createBridgeHelper(ContextPtr context, BridgeHelperPtr createBridgeHelper(ContextPtr context,
Poco::Timespan http_timeout_, Poco::Timespan http_timeout_,
const std::string & connection_string_) const override const std::string & connection_string_,
bool use_connection_pooling_) const override
{ {
return std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(context, http_timeout_, connection_string_); return std::make_shared<XDBCBridgeHelper<ODBCBridgeMixin>>(context, http_timeout_, connection_string_, use_connection_pooling_);
} }
const char * getStorageTypeName() const override { return "ODBC"; } const char * getStorageTypeName() const override { return "ODBC"; }

View File

@ -10,11 +10,13 @@ from os import getenv
from pprint import pformat from pprint import pformat
from typing import Dict, List from typing import Dict, List
from github.PaginatedList import PaginatedList
from github.PullRequestReview import PullRequestReview from github.PullRequestReview import PullRequestReview
from github.WorkflowRun import WorkflowRun
from commit_status_helper import get_commit_filtered_statuses from commit_status_helper import get_commit_filtered_statuses
from get_robot_token import get_best_robot_token from get_robot_token import get_best_robot_token
from github_helper import GitHub, NamedUser, PullRequest from github_helper import GitHub, NamedUser, PullRequest, Repository
from pr_info import PRInfo from pr_info import PRInfo
@ -127,6 +129,20 @@ class Reviews:
return False return False
def get_workflows_for_head(repo: Repository, head_sha: str) -> List[WorkflowRun]:
# The monkey-patch until the PR is merged:
# https://github.com/PyGithub/PyGithub/pull/2408
return list(
PaginatedList(
WorkflowRun,
repo._requester, # type:ignore # pylint:disable=protected-access
f"{repo.url}/actions/runs",
{"head_sha": head_sha},
list_item="workflow_runs",
)
)
def parse_args() -> argparse.Namespace: def parse_args() -> argparse.Namespace:
pr_info = PRInfo() pr_info = PRInfo()
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
@ -134,11 +150,26 @@ def parse_args() -> argparse.Namespace:
description="Script to merge the given PR. Additional checks for approved " description="Script to merge the given PR. Additional checks for approved "
"status and green commit statuses could be done", "status and green commit statuses could be done",
) )
parser.add_argument(
"--dry-run",
action="store_true",
help="if set, the script won't merge the PR, just check the conditions",
)
parser.add_argument( parser.add_argument(
"--check-approved", "--check-approved",
action="store_true", action="store_true",
help="if set, checks that the PR is approved and no changes required", help="if set, checks that the PR is approved and no changes required",
) )
parser.add_argument(
"--check-running-workflows", default=True, help=argparse.SUPPRESS
)
parser.add_argument(
"--no-check-running-workflows",
dest="check_running_workflows",
action="store_false",
default=argparse.SUPPRESS,
help="(dangerous) if set, skip checking for running workflows for the PR head",
)
parser.add_argument("--check-green", default=True, help=argparse.SUPPRESS) parser.add_argument("--check-green", default=True, help=argparse.SUPPRESS)
parser.add_argument( parser.add_argument(
"--no-check-green", "--no-check-green",
@ -194,6 +225,19 @@ def main():
logging.info("The PR #%s is not ready for merge, stopping", pr.number) logging.info("The PR #%s is not ready for merge, stopping", pr.number)
return return
if args.check_running_workflows:
workflows = get_workflows_for_head(repo, pr.head.sha)
workflows_in_progress = [wf for wf in workflows if wf.status != "completed"]
# At most one workflow in progress is fine. We check that there no
# cases like, e.g. PullRequestCI and DocksCheck in progress at once
if len(workflows_in_progress) > 1:
logging.info(
"The PR #%s has more than one workflows in progress, check URLs:\n%s",
pr.number,
"\n".join(wf.html_url for wf in workflows_in_progress),
)
return
if args.check_green: if args.check_green:
logging.info("Checking that all PR's statuses are green") logging.info("Checking that all PR's statuses are green")
commit = repo.get_commit(pr.head.sha) commit = repo.get_commit(pr.head.sha)
@ -217,7 +261,8 @@ def main():
return return
logging.info("Merging the PR") logging.info("Merging the PR")
pr.merge() if not args.dry_run:
pr.merge()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -525,6 +525,24 @@ def test_zip_archive_with_settings():
assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n"
def test_zip_archive_with_bad_compression_method():
backup_name = f"Disk('backups', 'archive_with_bad_compression_method.zip')"
create_and_fill_table()
assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n"
expected_error = "Unknown compression method specified for a zip archive"
assert expected_error in instance.query_and_get_error(
f"BACKUP TABLE test.table TO {backup_name} SETTINGS id='archive_with_bad_compression_method', compression_method='foobar'"
)
assert (
instance.query(
"SELECT status FROM system.backups WHERE id='archive_with_bad_compression_method'"
)
== "BACKUP_FAILED\n"
)
def test_async(): def test_async():
create_and_fill_table() create_and_fill_table()
assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n"

View File

@ -0,0 +1 @@
#!/usr/bin/env python3

View File

@ -0,0 +1,12 @@
<clickhouse>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>instance</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
</remote_servers>
</clickhouse>

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance", main_configs=["configs/remote_servers.xml"])
@pytest.fixture(scope="module")
def setup_nodes():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def drop_table_directory(table_name):
data_path = instance.query(
f"SELECT data_paths[1] FROM system.tables where name = '{table_name}'"
).strip()
print("Data path", data_path)
instance.exec_in_container(
["bash", "-c", f"rm -fr {data_path}"], privileged=True, user="root"
)
def test_drop_no_local_path(setup_nodes):
instance.query(
"CREATE TABLE merge_tree_table (key UInt64) ENGINE = MergeTree() ORDER BY tuple()"
)
instance.query("INSERT INTO merge_tree_table VALUES (1)")
drop_table_directory("merge_tree_table")
instance.query("DROP TABLE merge_tree_table SYNC", timeout=10)
instance.query(
"CREATE TABLE merge_tree_table (key UInt64) ENGINE = MergeTree() ORDER BY tuple()"
)
instance.query(
"CREATE TABLE distributed_table (key UInt64) ENGINE = Distributed(test_cluster, default, merge_tree_table, key)"
)
instance.query("INSERT INTO distributed_table VALUES(0)")
drop_table_directory("distributed_table")
instance.query("DROP TABLE distributed_table SYNC", timeout=10)
instance.query("DROP TABLE merge_tree_table SYNC", timeout=10)
instance.query(
"CREATE TABLE join_table(`id` UInt64, `val` String) ENGINE = Join(ANY, LEFT, id)"
)
instance.query("INSERT INTO join_table VALUES (1, 'a')")
drop_table_directory("join_table")
instance.query("TRUNCATE TABLE join_table", timeout=10)

View File

@ -0,0 +1,41 @@
<clickhouse>
<dictionary>
<name>postgres_odbc_nopool</name>
<source>
<odbc>
<table>clickhouse.test_table</table>
<connection_string>DSN=postgresql_odbc</connection_string>
<db>postgres</db>
</odbc>
<settings>
<odbc_bridge_use_connection_pooling>0</odbc_bridge_use_connection_pooling>
</settings>
</source>
<lifetime>
<min>5</min>
<max>5</max>
</lifetime>
<layout>
<hashed />
</layout>
<structure>
<id>
<name>id</name>
</id>
<attribute>
<name>column1</name>
<type>Int64</type>
<null_value>1</null_value>
</attribute>
<attribute>
<name>column2</name>
<type>String</type>
<null_value>''</null_value>
</attribute>
</structure>
</dictionary>
</clickhouse>

View File

@ -21,6 +21,7 @@ node1 = cluster.add_instance(
"configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml", "configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml",
"configs/dictionaries/sqlite3_odbc_cached_dictionary.xml", "configs/dictionaries/sqlite3_odbc_cached_dictionary.xml",
"configs/dictionaries/postgres_odbc_hashed_dictionary.xml", "configs/dictionaries/postgres_odbc_hashed_dictionary.xml",
"configs/dictionaries/postgres_odbc_no_connection_pool_dictionary.xml",
], ],
) )
@ -624,6 +625,34 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
cursor.execute("truncate table clickhouse.test_table") cursor.execute("truncate table clickhouse.test_table")
def test_no_connection_pooling(started_cluster):
skip_test_msan(node1)
conn = get_postgres_conn(started_cluster)
cursor = conn.cursor()
cursor.execute(
"insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')"
)
node1.exec_in_container(["ss", "-K", "dport", "5432"], privileged=True, user="root")
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_nopool")
assert_eq_with_retry(
node1,
"select dictGetString('postgres_odbc_nopool', 'column2', toUInt64(1))",
"hello",
)
assert_eq_with_retry(
node1,
"select dictGetString('postgres_odbc_nopool', 'column2', toUInt64(2))",
"world",
)
# No open connections should be left because we don't use connection pooling.
assert "" == node1.exec_in_container(
["ss", "-H", "dport", "5432"], privileged=True, user="root"
)
cursor.execute("truncate table clickhouse.test_table")
def test_postgres_insert(started_cluster): def test_postgres_insert(started_cluster):
skip_test_msan(node1) skip_test_msan(node1)

View File

@ -0,0 +1,13 @@
<test>
<query>select position(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), '') from numbers(100000000) format Null</query>
<query>select position(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), '', 10) from numbers(100000000) format Null</query>
<query>select positionCaseInsensitive(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), '') from numbers(100000000) format Null</query>
<query>select positionCaseInsensitive(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), '', 10) from numbers(100000000) format Null</query>
<query>select positionUTF8(materialize('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest'), '') from numbers(100000000) format Null</query>
<query>select positionUTF8(materialize('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest'), '', 10) from numbers(100000000) format Null</query>
<query>select positionCaseInsensitiveUTF8(materialize('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest'), '') from numbers(100000000) format Null</query>
<query>select positionCaseInsensitiveUTF8(materialize('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest'), '', 10) from numbers(100000000) format Null</query>
</test>

View File

@ -32,6 +32,8 @@ CREATE TABLE codecs (a UInt8 CODEC(LZ4, Delta)) ENGINE = MergeTree ORDER BY tupl
CREATE TABLE codecs (a UInt8 CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } CREATE TABLE codecs (a UInt8 CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS }
CREATE TABLE codecs (a FixedString(2) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } CREATE TABLE codecs (a FixedString(2) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS }
CREATE TABLE codecs (a Decimal(15,5) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } CREATE TABLE codecs (a Decimal(15,5) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS }
CREATE TABLE codecs (a Float64 CODEC(Delta, Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS }
CREATE TABLE codecs (a Float32 CODEC(DoubleDelta, FPC)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS }
-- test that sanity check is not performed in ATTACH query -- test that sanity check is not performed in ATTACH query

View File

@ -1,18 +1,20 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
-- Start with empty query result cache (QRC) and query log -- Start with empty query result cache (QRC) and query log
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
DROP TABLE system.query_log SYNC; DROP TABLE system.query_log SYNC;
-- Insert an entry into the query result cache. -- Insert an entry into the query result cache.
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
-- Check that entry in QRC exists -- Check that entry in QRC exists
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- Run the same SELECT but with different case (--> select). We want its result to be served from the QRC. -- Run the same SELECT but with different case (--> select). We want its result to be served from the QRC.
SELECT '---'; SELECT '---';
select 1 SETTINGS enable_experimental_query_result_cache = true; select 1 SETTINGS use_query_result_cache = true;
-- There should still be just one entry in the QRC -- There should still be just one entry in the QRC
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
@ -22,6 +24,6 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
AND query = 'select 1 SETTINGS enable_experimental_query_result_cache = true;'; AND query = 'select 1 SETTINGS use_query_result_cache = true;';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,8 +1,10 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
-- Cache query result in query result cache -- Cache query result in query result cache
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
SELECT count(*) FROM system.query_result_cache; SELECT count(*) FROM system.query_result_cache;
-- No query results are cached after DROP -- No query results are cached after DROP

View File

@ -1,15 +1,17 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
DROP TABLE IF EXISTS eligible_test; DROP TABLE IF EXISTS eligible_test;
DROP TABLE IF EXISTS eligible_test2; DROP TABLE IF EXISTS eligible_test2;
-- enable query result cache session-wide but also force it individually in each of below statements -- enable query result cache session-wide but also force it individually in each of below statements
SET enable_experimental_query_result_cache = true; SET use_query_result_cache = true;
-- check that SELECT statements create entries in the query result cache ... -- check that SELECT statements create entries in the query result cache ...
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
@ -17,49 +19,49 @@ SYSTEM DROP QUERY RESULT CACHE;
-- ... and all other statements also should not create entries: -- ... and all other statements also should not create entries:
-- CREATE -- CREATE
CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS enable_experimental_query_result_cache = true; -- SETTINGS rejected as unknown CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- ALTER -- ALTER
ALTER TABLE eligible_test ADD COLUMN b String SETTINGS enable_experimental_query_result_cache = true; ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- INSERT -- INSERT
INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS enable_experimental_query_result_cache = true; -- SETTINGS rejected as unknown INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS enable_experimental_query_result_cache = true; INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- SHOW -- SHOW
SHOW TABLES SETTINGS enable_experimental_query_result_cache = true; SHOW TABLES SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- CHECK -- CHECK
CHECK TABLE eligible_test SETTINGS enable_experimental_query_result_cache = true; CHECK TABLE eligible_test SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- DESCRIBE -- DESCRIBE
DESCRIBE TABLE eligible_test SETTINGS enable_experimental_query_result_cache = true; DESCRIBE TABLE eligible_test SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- EXISTS -- EXISTS
EXISTS TABLE eligible_test SETTINGS enable_experimental_query_result_cache = true; EXISTS TABLE eligible_test SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- KILL -- KILL
KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS enable_experimental_query_result_cache = true; KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- OPTIMIZE -- OPTIMIZE
OPTIMIZE TABLE eligible_test FINAL SETTINGS enable_experimental_query_result_cache = true; OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- TRUNCATE -- TRUNCATE
TRUNCATE TABLE eligible_test SETTINGS enable_experimental_query_result_cache = true; TRUNCATE TABLE eligible_test SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- RENAME -- RENAME
RENAME TABLE eligible_test TO eligible_test2 SETTINGS enable_experimental_query_result_cache = true; RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,30 +1,32 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
-- Start with empty query result cache (QRC) and query log -- Start with empty query result cache (QRC) and query log
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
DROP TABLE system.query_log SYNC; DROP TABLE system.query_log SYNC;
-- Run a query with QRC on. The first execution is a QRC miss. -- Run a query with QRC on. The first execution is a QRC miss.
SELECT '---'; SELECT '---';
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS enable_experimental_query_result_cache = true;'; AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;';
-- Run previous query again with query result cache on -- Run previous query again with query result cache on
SELECT '---'; SELECT '---';
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
DROP TABLE system.query_log SYNC; DROP TABLE system.query_log SYNC;
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS enable_experimental_query_result_cache = true;'; AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,10 +1,12 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- If an exception is thrown during query execution, no entry must be created in the query result cache -- If an exception is thrown during query execution, no entry must be created in the query result cache
SELECT throwIf(1) SETTINGS enable_experimental_query_result_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } SELECT throwIf(1) SETTINGS use_query_result_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,19 +1,21 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Run a silly query with a non-trivial plan and put the result into the query result cache (QRC) -- Run a silly query with a non-trivial plan and put the result into the query result cache (QRC)
SELECT 1 + number from system.numbers LIMIT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true;
SELECT count(*) FROM system.query_result_cache; SELECT count(*) FROM system.query_result_cache;
-- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QRC -- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QRC
EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1; EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1;
EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS enable_experimental_query_result_cache = true; -- (*) EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*)
-- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QRC -- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QRC
EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1; EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1;
EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS enable_experimental_query_result_cache = true; -- (*) EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*)
-- Statements (*) must not cache their results into the QRC -- Statements (*) must not cache their results into the QRC
SELECT count(*) FROM system.query_result_cache; SELECT count(*) FROM system.query_result_cache;

View File

@ -1,10 +1,12 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- This creates an entry in the query result cache ... -- This creates an entry in the query result cache ...
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
@ -12,7 +14,7 @@ SYSTEM DROP QUERY RESULT CACHE;
SELECT '---'; SELECT '---';
-- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result -- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_duration = 10000; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_duration = 10000;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,10 +1,12 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Cache the query result after the 1st query invocation -- Cache the query result after the 1st query invocation
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 0; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 0;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT '---'; SELECT '---';
@ -12,9 +14,9 @@ SELECT '---';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Cache the query result after the 2nd query invocation -- Cache the query result after the 2nd query invocation
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 1; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 1; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT '---'; SELECT '---';
@ -22,11 +24,11 @@ SELECT '---';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Cache the query result after the 3rd query invocation -- Cache the query result after the 3rd query invocation
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 2; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 2; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_runs = 2; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,16 +1,18 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- rand() is non-deterministic, with default settings no entry in the query result cache should be created -- rand() is non-deterministic, with default settings no entry in the query result cache should be created
SELECT COUNT(rand(1)) SETTINGS enable_experimental_query_result_cache = true; SELECT COUNT(rand(1)) SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT '---'; SELECT '---';
-- But an entry can be forced using a setting -- But an entry can be forced using a setting
SELECT COUNT(RAND(1)) SETTINGS enable_experimental_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true; SELECT COUNT(RAND(1)) SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,21 +1,24 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
-- Start with empty query result cache (QRC) and query log. -- Start with empty query result cache (QRC) and query log.
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
DROP TABLE system.query_log SYNC; DROP TABLE system.query_log SYNC;
-- Run query whose result gets cached in the query result cache. -- Run query whose result gets cached in the query result cache.
-- Besides "enable_experimental_query_result_cache", pass two more knobs (one QRC-specific knob and one non-QRC-specific knob). We just care -- Besides "use_query_result_cache", pass two more knobs (one QRC-specific knob and one non-QRC-specific knob). We just care
-- *that* they are passed and not about their effect. -- *that* they are passed and not about their effect.
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16;
-- Check that entry in QRC exists -- Check that entry in QRC exists
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
-- Run the same SELECT but with different SETTINGS. We want its result to be served from the QRC. -- Run the same SELECT but with different SETTINGS. We want its result to be served from the QRC (--> passive mode, achieve it by
-- disabling active mode)
SELECT '---'; SELECT '---';
SELECT 1 SETTINGS enable_experimental_query_result_cache_passive_usage = true, max_threads = 16; SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16;
-- Technically, both SELECT queries have different ASTs, leading to different QRC keys. QRC does some AST normalization (erase all -- Technically, both SELECT queries have different ASTs, leading to different QRC keys. QRC does some AST normalization (erase all
-- QRC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QRC hit. -- QRC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QRC hit.
@ -23,6 +26,6 @@ SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS enable_experimental_query_result_cache_passive_usage = true, max_threads = 16;'; AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16;';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -1,6 +1,8 @@
-- Tags: no-parallel -- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
-- Start with empty query result cache (QRC). -- Start with empty query result cache (QRC).
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
@ -10,14 +12,14 @@ SELECT COUNT(*) FROM system.query_result_cache;
SELECT '-----'; SELECT '-----';
-- Try to retrieve query result from empty QRC using the passive mode. The cache should still be empty (no insert). -- Try to retrieve query result from empty QRC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert).
SELECT 1 SETTINGS enable_experimental_query_result_cache_passive_usage = true; SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT '-----'; SELECT '-----';
-- Put query result into cache. -- Put query result into cache.
SELECT 1 SETTINGS enable_experimental_query_result_cache = true; SELECT 1 SETTINGS use_query_result_cache = true;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SELECT '-----'; SELECT '-----';
@ -27,13 +29,13 @@ SELECT '-----';
-- Get rid of log of previous SELECT -- Get rid of log of previous SELECT
DROP TABLE system.query_log SYNC; DROP TABLE system.query_log SYNC;
SELECT 1 SETTINGS enable_experimental_query_result_cache_passive_usage = true; SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
SYSTEM FLUSH LOGS; SYSTEM FLUSH LOGS;
SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses'] SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
FROM system.query_log FROM system.query_log
WHERE type = 'QueryFinish' WHERE type = 'QueryFinish'
AND query = 'SELECT 1 SETTINGS enable_experimental_query_result_cache_passive_usage = true;'; AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;';
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;

View File

@ -2,10 +2,12 @@
-- Tag no-fasttest: Depends on OpenSSL -- Tag no-fasttest: Depends on OpenSSL
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Cache a result of a query with secret in the query result cache -- Cache a result of a query with secret in the query result cache
SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS enable_experimental_query_result_cache = true; SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_result_cache = true;
-- The secret should not be revealed in system.query_result_cache -- The secret should not be revealed in system.query_result_cache
SELECT query FROM system.query_result_cache; SELECT query FROM system.query_result_cache;

View File

@ -3,10 +3,12 @@
-- Tag long: Test runtime is > 6 sec -- Tag long: Test runtime is > 6 sec
-- Tag no-parallel: Messes with internal cache -- Tag no-parallel: Messes with internal cache
SET allow_experimental_query_result_cache = true;
SYSTEM DROP QUERY RESULT CACHE; SYSTEM DROP QUERY RESULT CACHE;
-- Cache query result into query result cache with a TTL of 3 sec -- Cache query result into query result cache with a TTL of 3 sec
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_ttl = 3; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3;
-- Expect one non-stale cache entry -- Expect one non-stale cache entry
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;
@ -20,7 +22,7 @@ SELECT stale FROM system.query_result_cache;
SELECT '---'; SELECT '---';
-- Run same query as before -- Run same query as before
SELECT 1 SETTINGS enable_experimental_query_result_cache = true, query_result_cache_ttl = 3; SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3;
-- The entry should have been refreshed (non-stale) -- The entry should have been refreshed (non-stale)
SELECT COUNT(*) FROM system.query_result_cache; SELECT COUNT(*) FROM system.query_result_cache;

View File

@ -45,4 +45,5 @@ $CLICKHOUSE_CLIENT --query "DETACH TABLE table_with_gap;"
$CLICKHOUSE_CLIENT --query "ATTACH TABLE table_with_gap;" $CLICKHOUSE_CLIENT --query "ATTACH TABLE table_with_gap;"
$CLICKHOUSE_CLIENT --query "SELECT 'parts after detach/attach';" $CLICKHOUSE_CLIENT --query "SELECT 'parts after detach/attach';"
$CLICKHOUSE_CLIENT --query "SYSTEM WAIT LOADING PARTS table_with_gap;"
$CLICKHOUSE_CLIENT --query "SELECT name, rows, active FROM system.parts WHERE table = 'table_with_gap' AND database = currentDatabase();" $CLICKHOUSE_CLIENT --query "SELECT name, rows, active FROM system.parts WHERE table = 'table_with_gap' AND database = currentDatabase();"

View File

@ -0,0 +1,8 @@
1
0
0
1
1
0
0
1

View File

@ -0,0 +1,9 @@
SELECT empty(toIPv6('::'));
SELECT notEmpty(toIPv6('::'));
SELECT empty(toIPv6('::1'));
SELECT notEmpty(toIPv6('::1'));
SELECT empty(toIPv4('0.0.0.0'));
SELECT notEmpty(toIPv4('0.0.0.0'));
SELECT empty(toIPv4('127.0.0.1'));
SELECT notEmpty(toIPv4('127.0.0.1'));

View File

@ -0,0 +1,20 @@
SELECT DISTINCT *
FROM
(
SELECT DISTINCT *
FROM
(
SELECT DISTINCT
0.5,
number % 65536 AS number
FROM numbers(2)
ORDER BY
ignore(ignore(-1, 10.0001)) DESC NULLS LAST,
ignore(2147483648) DESC NULLS FIRST,
ignore(255, 0.0001) ASC,
number ASC
)
ORDER BY number ASC NULLS FIRST
)
WHERE ignore(2147483648)
ORDER BY number DESC

View File

@ -0,0 +1,17 @@
hostname
Not found address of host: MySQL.
port
Connection refused (localhost:0).
9000
secure
1
database
system
user
MySQL: Authentication failed
default
password
default: Authentication failed: password is incorrect, or there is no user with such name.
default
history_file
Cannot create file: /no/such/dir/.history

View File

@ -0,0 +1,86 @@
#!/usr/bin/env bash
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Overrides
TEST_DATABASE=$CLICKHOUSE_DATABASE
TEST_HOST=${CLICKHOUSE_HOST:-"localhost"}
TEST_PORT=${CLICKHOUSE_PORT_TCP:-9000}
CLICKHOUSE_DATABASE="system"
CLICKHOUSE_HOST=""
CLICKHOUSE_PORT_TCP=""
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
CONFIG=$CLICKHOUSE_TMP/client.xml
cat > $CONFIG <<EOL
<clickhouse>
<host>$TEST_HOST</host>
<port>$TEST_PORT</port>
<database>$TEST_DATABASE</database>
<connections_credentials>
<connection>
<name>test_hostname</name>
<hostname>MySQL</hostname>
</connection>
<connection>
<name>test_port</name>
<hostname>$TEST_HOST</hostname>
<port>0</port>
</connection>
<connection>
<name>test_secure</name>
<hostname>$TEST_HOST</hostname>
<secure>1</secure>
</connection>
<connection>
<name>test_database</name>
<hostname>$TEST_HOST</hostname>
<database>$CLICKHOUSE_DATABASE</database>
</connection>
<connection>
<name>test_user</name>
<hostname>$TEST_HOST</hostname>
<user>MySQL</user>
</connection>
<connection>
<name>test_password</name>
<hostname>$TEST_HOST</hostname>
<password>MySQL</password>
</connection>
<connection>
<name>test_history_file</name>
<hostname>$TEST_HOST</hostname>
<history_file>/no/such/dir/.history</history_file>
</connection>
</connections_credentials>
</clickhouse>
EOL
echo 'hostname'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_hostname -q 'select 1' |& grep -F -o 'Not found address of host: MySQL.'
echo 'port'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_port -q 'select tcpPort()' |& grep -F -o 'Connection refused (localhost:0).'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_port --port $TEST_PORT -q 'select tcpPort()'
echo 'secure'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_secure -q 'select tcpPort()' |& grep -c -F -o -e OPENSSL_internal:WRONG_VERSION_NUMBER -e 'tcp_secure protocol is disabled because poco library was built without NetSSL support.'
echo 'database'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_database -q 'select currentDatabase()'
echo 'user'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_user -q 'select currentUser()' |& grep -F -o 'MySQL: Authentication failed'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_user --user default -q 'select currentUser()'
echo 'password'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_password -q 'select currentUser()' |& grep -F -o 'default: Authentication failed: password is incorrect, or there is no user with such name.'
$CLICKHOUSE_CLIENT --config $CONFIG --host test_password --password "" -q 'select currentUser()'
echo 'history_file'
$CLICKHOUSE_CLIENT --progress off --interactive --config $CONFIG --host test_history_file -q 'select 1' </dev/null |& grep -F -o 'Cannot create file: /no/such/dir/.history'
rm -f "${CONFIG:?}"