Merge remote-tracking branch 'ClickHouse/master' into add-clear-query-cache-tag

This commit is contained in:
Robert Schulze 2024-08-21 09:58:42 +00:00
commit 050af403a7
No known key found for this signature in database
GPG Key ID: 26703B55FB13728A
53 changed files with 318 additions and 280 deletions

View File

@ -47,8 +47,7 @@
"docker/test/stateful": {
"name": "clickhouse/stateful-test",
"dependent": [
"docker/test/stress",
"docker/test/upgrade"
"docker/test/stress"
]
},
"docker/test/unit": {
@ -59,10 +58,6 @@
"name": "clickhouse/stress-test",
"dependent": []
},
"docker/test/upgrade": {
"name": "clickhouse/upgrade-check",
"dependent": []
},
"docker/test/integration/runner": {
"name": "clickhouse/integration-tests-runner",
"dependent": []

View File

@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV COMMIT_SHA=''
ENV PULL_REQUEST_NUMBER=''
ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -10,7 +10,3 @@ RUN apt-get update -y \
npm \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY create.sql /
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -1 +0,0 @@
../stateless/setup_minio.sh

View File

@ -85,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo
ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1
ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py"
RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node
COPY run.sh /
COPY setup_minio.sh /
COPY setup_hdfs_minicluster.sh /
COPY attach_gdb.lib /
COPY utils.lib /
# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests
COPY stress_tests.lib /
CMD ["/bin/bash", "/run.sh"]

View File

@ -22,8 +22,5 @@ RUN apt-get update -y \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY run.sh /
ENV EXPORT_S3_STORAGE_POLICIES=1
CMD ["/bin/bash", "/run.sh"]

View File

@ -1,29 +0,0 @@
# rebuild in #33610
# docker build -t clickhouse/upgrade-check .
ARG FROM_TAG=latest
FROM clickhouse/stateful-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
bash \
tzdata \
parallel \
expect \
python3 \
python3-lxml \
python3-termcolor \
python3-requests \
curl \
sudo \
openssl \
netcat-openbsd \
brotli \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY run.sh /
ENV EXPORT_S3_STORAGE_POLICIES=1
CMD ["/bin/bash", "/run.sh"]

View File

@ -56,7 +56,5 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
COPY process_functional_tests_result.py /
COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"

View File

@ -240,7 +240,7 @@ libhdfs3 support HDFS namenode HA.
## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**

View File

@ -225,7 +225,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
## S3-related Settings {#settings}

View File

@ -116,7 +116,7 @@ SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parque
## Storage Settings {#storage-settings}
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.

View File

@ -290,7 +290,7 @@ SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date
## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default.
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
**See Also**

View File

@ -1164,9 +1164,6 @@ void Client::processOptions(const OptionsDescription & options_description,
/// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.)
client_context = global_context;
initClientContext();
/// Allow to pass-through unknown settings to the server.
client_context->getAccessControl().allowAllSettings();
}

View File

@ -1922,7 +1922,7 @@ try
auto & access_control = global_context->getAccessControl();
try
{
access_control.setupFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); });
access_control.setUpFromMainConfig(config(), config_path, [&] { return global_context->getZooKeeper(); });
}
catch (...)
{

View File

@ -280,7 +280,7 @@ void AccessControl::shutdown()
}
void AccessControl::setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
const zkutil::GetZooKeeper & get_zookeeper_function_)
{
if (config_.has("custom_settings_prefixes"))
@ -868,10 +868,4 @@ const ExternalAuthenticators & AccessControl::getExternalAuthenticators() const
return *external_authenticators;
}
void AccessControl::allowAllSettings()
{
custom_settings_prefixes->registerPrefixes({""});
}
}

View File

@ -57,7 +57,7 @@ public:
void shutdown() override;
/// Initializes access storage (user directories).
void setupFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
void setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
const zkutil::GetZooKeeper & get_zookeeper_function_);
/// Parses access entities from a configuration loaded from users.xml.
@ -238,9 +238,6 @@ public:
/// Gets manager of notifications.
AccessChangesNotifier & getChangesNotifier();
/// Allow all setting names - this can be used in clients to pass-through unknown settings to the server.
void allowAllSettings();
private:
class ContextAccessCache;
class CustomSettingsPrefixes;

View File

@ -219,8 +219,8 @@ void SettingsConstraints::clamp(const Settings & current_settings, SettingsChang
});
}
template <typename SettingsT>
bool getNewValueToCheck(const SettingsT & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure)
template <class T>
bool getNewValueToCheck(const T & current_settings, SettingChange & change, Field & new_value, bool throw_on_failure)
{
Field current_value;
bool has_current_value = current_settings.tryGet(change.name, current_value);
@ -230,12 +230,12 @@ bool getNewValueToCheck(const SettingsT & current_settings, SettingChange & chan
return false;
if (throw_on_failure)
new_value = SettingsT::castValueUtil(change.name, change.value);
new_value = T::castValueUtil(change.name, change.value);
else
{
try
{
new_value = SettingsT::castValueUtil(change.name, change.value);
new_value = T::castValueUtil(change.name, change.value);
}
catch (...)
{

View File

@ -58,7 +58,6 @@
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <Interpreters/ReplaceQueryParameterVisitor.h>
#include <Interpreters/ProfileEventsExt.h>
#include <Interpreters/InterpreterSetQuery.h>
#include <IO/WriteBufferFromOStream.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <IO/CompressionMethod.h>
@ -1609,14 +1608,14 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
auto metadata = storage->getInMemoryMetadataPtr();
QueryPlan plan;
storage->read(
plan,
sample.getNames(),
storage->getStorageSnapshot(metadata, client_context),
query_info,
client_context,
{},
client_context->getSettingsRef().max_block_size,
getNumberOfPhysicalCPUCores());
plan,
sample.getNames(),
storage->getStorageSnapshot(metadata, client_context),
query_info,
client_context,
{},
client_context->getSettingsRef().max_block_size,
getNumberOfPhysicalCPUCores());
auto builder = plan.buildQueryPipeline(
QueryPlanOptimizationSettings::fromContext(client_context),
@ -1893,19 +1892,48 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
profile_events.watch.restart();
{
/// Temporarily apply query settings to the context.
Settings old_settings = client_context->getSettingsCopy();
SCOPE_EXIT_SAFE(
{
client_context->setSettings(old_settings);
/// Temporarily apply query settings to context.
std::optional<Settings> old_settings;
SCOPE_EXIT_SAFE({
if (old_settings)
client_context->setSettings(*old_settings);
});
InterpreterSetQuery::applySettingsFromQuery(parsed_query, client_context);
auto apply_query_settings = [&](const IAST & settings_ast)
{
if (!old_settings)
old_settings.emplace(client_context->getSettingsRef());
client_context->applySettingsChanges(settings_ast.as<ASTSetQuery>()->changes);
client_context->resetSettingsToDefaultValue(settings_ast.as<ASTSetQuery>()->default_settings);
};
const auto * insert = parsed_query->as<ASTInsertQuery>();
if (const auto * select = parsed_query->as<ASTSelectQuery>(); select && select->settings())
apply_query_settings(*select->settings());
else if (const auto * select_with_union = parsed_query->as<ASTSelectWithUnionQuery>())
{
const ASTs & children = select_with_union->list_of_selects->children;
if (!children.empty())
{
// On the client it is enough to apply settings only for the
// last SELECT, since the only thing that is important to apply
// on the client is format settings.
const auto * last_select = children.back()->as<ASTSelectQuery>();
if (last_select && last_select->settings())
{
apply_query_settings(*last_select->settings());
}
}
}
else if (const auto * query_with_output = parsed_query->as<ASTQueryWithOutput>(); query_with_output && query_with_output->settings_ast)
apply_query_settings(*query_with_output->settings_ast);
else if (insert && insert->settings_ast)
apply_query_settings(*insert->settings_ast);
if (!connection->checkConnected(connection_parameters.timeouts))
connect();
ASTPtr input_function;
const auto * insert = parsed_query->as<ASTInsertQuery>();
if (insert && insert->select)
insert->tryFindInputFunction(input_function);

View File

@ -35,6 +35,7 @@ namespace ErrorCodes
{
extern const int UNEXPECTED_AST_STRUCTURE;
extern const int BAD_ARGUMENTS;
extern const int CANNOT_COMPILE_REGEXP;
}
DataTypeObject::DataTypeObject(
@ -51,6 +52,17 @@ DataTypeObject::DataTypeObject(
, max_dynamic_paths(max_dynamic_paths_)
, max_dynamic_types(max_dynamic_types_)
{
/// Check if regular expressions are valid.
for (const auto & regexp_str : path_regexps_to_skip)
{
re2::RE2::Options options;
/// Don't log errors to stderr.
options.set_log_errors(false);
auto regexp = re2::RE2(regexp_str, options);
if (!regexp.ok())
throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP, "Invalid regexp '{}': {}", regexp_str, regexp.error());
}
for (const auto & [typed_path, type] : typed_paths)
{
for (const auto & path_to_skip : paths_to_skip)

View File

@ -9,7 +9,6 @@
#include <Parsers/ASTQueryWithOutput.h>
#include <Parsers/ASTSelectWithUnionQuery.h>
namespace DB
{
@ -46,7 +45,9 @@ static void applySettingsFromSelectWithUnion(const ASTSelectWithUnionQuery & sel
// It is flattened later, when we process UNION ALL/DISTINCT.
const auto * last_select = children.back()->as<ASTSelectQuery>();
if (last_select && last_select->settings())
InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext(/* ignore_setting_constraints= */ false);
{
InterpreterSetQuery(last_select->settings(), context).executeForCurrentContext();
}
}
void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMutablePtr context_)
@ -54,20 +55,10 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta
if (!ast)
return;
/// First apply the outermost settings. Then they could be overridden by deeper settings.
if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()))
{
if (query_with_output->settings_ast)
InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
if (const auto * create_query = ast->as<ASTCreateQuery>(); create_query && create_query->select)
applySettingsFromSelectWithUnion(create_query->select->as<ASTSelectWithUnionQuery &>(), context_);
}
if (const auto * select_query = ast->as<ASTSelectQuery>())
{
if (auto new_settings = select_query->settings())
InterpreterSetQuery(new_settings, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
InterpreterSetQuery(new_settings, context_).executeForCurrentContext();
}
else if (const auto * select_with_union_query = ast->as<ASTSelectWithUnionQuery>())
{
@ -76,15 +67,28 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta
else if (const auto * explain_query = ast->as<ASTExplainQuery>())
{
if (explain_query->settings_ast)
InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
InterpreterSetQuery(explain_query->settings_ast, context_).executeForCurrentContext();
applySettingsFromQuery(explain_query->getExplainedQuery(), context_);
}
else if (const auto * query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get()))
{
if (query_with_output->settings_ast)
InterpreterSetQuery(query_with_output->settings_ast, context_).executeForCurrentContext();
if (const auto * create_query = ast->as<ASTCreateQuery>())
{
if (create_query->select)
{
applySettingsFromSelectWithUnion(create_query->select->as<ASTSelectWithUnionQuery &>(), context_);
}
}
}
else if (auto * insert_query = ast->as<ASTInsertQuery>())
{
context_->setInsertFormat(insert_query->format);
if (insert_query->settings_ast)
InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false);
InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext();
}
}

View File

@ -23,7 +23,7 @@ public:
/** Set setting for current context (query context).
* It is used for interpretation of SETTINGS clause in SELECT query.
*/
void executeForCurrentContext(bool ignore_setting_constraints);
void executeForCurrentContext(bool ignore_setting_constraints = false);
bool supportsTransactions() const override { return true; }

View File

@ -25,6 +25,7 @@
#include <Parsers/ParserTablePropertiesQuery.h>
#include <Parsers/ParserWatchQuery.h>
#include <Parsers/ParserDescribeCacheQuery.h>
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
#include <Parsers/Access/ParserShowAccessEntitiesQuery.h>
#include <Parsers/Access/ParserShowAccessQuery.h>
#include <Parsers/Access/ParserShowCreateAccessEntityQuery.h>
@ -151,55 +152,37 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
}
/// These two sections are allowed in an arbitrary order.
ParserKeyword s_format(Keyword::FORMAT);
ParserKeyword s_settings(Keyword::SETTINGS);
/** Why: let's take the following example:
* SELECT 1 UNION ALL SELECT 2 FORMAT TSV
* Each subquery can be put in parentheses and have its own settings:
* (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV
* And the whole query can have settings:
* (SELECT 1 SETTINGS a=b) UNION ALL (SELECT 2 SETTINGS c=d) FORMAT TSV SETTINGS e=f
* A single query with output is parsed in the same way as the UNION ALL chain:
* SELECT 1 SETTINGS a=b FORMAT TSV SETTINGS e=f
* So while these forms have a slightly different meaning, they both exist:
* SELECT 1 SETTINGS a=b FORMAT TSV
* SELECT 1 FORMAT TSV SETTINGS e=f
* And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order.
* But while this work:
* (SELECT 1) UNION ALL (SELECT 2) FORMAT TSV SETTINGS d=f
* This does not work automatically, unless we explicitly allow different orders:
* (SELECT 1) UNION ALL (SELECT 2) SETTINGS d=f FORMAT TSV
* Inevitably, we also allow this:
* SELECT 1 SETTINGS a=b SETTINGS d=f FORMAT TSV
* ^^^^^^^^^^^^^^^^^^^^^
* Because this part is consumed into ASTSelectWithUnionQuery
* and the rest into ASTQueryWithOutput.
*/
for (size_t i = 0; i < 2; ++i)
if (s_format.ignore(pos, expected))
{
if (!query_with_output.format && s_format.ignore(pos, expected))
{
ParserIdentifier format_p;
ParserIdentifier format_p;
if (!format_p.parse(pos, query_with_output.format, expected))
return false;
setIdentifierSpecial(query_with_output.format);
if (!format_p.parse(pos, query_with_output.format, expected))
return false;
setIdentifierSpecial(query_with_output.format);
query_with_output.children.push_back(query_with_output.format);
}
else if (!query_with_output.settings_ast && s_settings.ignore(pos, expected))
query_with_output.children.push_back(query_with_output.format);
}
// SETTINGS key1 = value1, key2 = value2, ...
ParserKeyword s_settings(Keyword::SETTINGS);
if (!query_with_output.settings_ast && s_settings.ignore(pos, expected))
{
ParserSetQuery parser_settings(true);
if (!parser_settings.parse(pos, query_with_output.settings_ast, expected))
return false;
query_with_output.children.push_back(query_with_output.settings_ast);
// SETTINGS after FORMAT is not parsed by the SELECT parser (ParserSelectQuery)
// Pass them manually, to apply in InterpreterSelectQuery::initSettings()
if (query->as<ASTSelectWithUnionQuery>())
{
// SETTINGS key1 = value1, key2 = value2, ...
ParserSetQuery parser_settings(true);
if (!parser_settings.parse(pos, query_with_output.settings_ast, expected))
return false;
query_with_output.children.push_back(query_with_output.settings_ast);
auto settings = query_with_output.settings_ast->clone();
assert_cast<ASTSetQuery *>(settings.get())->print_in_format = false;
QueryWithOutputSettingsPushDownVisitor::Data data{settings};
QueryWithOutputSettingsPushDownVisitor(data).visit(query);
}
else
break;
}
node = std::move(query);

View File

@ -0,0 +1,56 @@
#include <Common/SettingsChanges.h>
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
#include <Parsers/ASTSelectWithUnionQuery.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ASTSubquery.h>
#include <iterator>
#include <algorithm>
namespace DB
{
bool QueryWithOutputSettingsPushDownMatcher::needChildVisit(ASTPtr & node, const ASTPtr & child)
{
if (node->as<ASTSelectWithUnionQuery>())
return true;
if (node->as<ASTSubquery>())
return true;
if (child->as<ASTSelectQuery>())
return true;
return false;
}
void QueryWithOutputSettingsPushDownMatcher::visit(ASTPtr & ast, Data & data)
{
if (auto * select_query = ast->as<ASTSelectQuery>())
visit(*select_query, ast, data);
}
void QueryWithOutputSettingsPushDownMatcher::visit(ASTSelectQuery & select_query, ASTPtr &, Data & data)
{
ASTPtr select_settings_ast = select_query.settings();
if (!select_settings_ast)
{
select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, data.settings_ast->clone());
return;
}
SettingsChanges & select_settings = select_settings_ast->as<ASTSetQuery &>().changes;
SettingsChanges & settings = data.settings_ast->as<ASTSetQuery &>().changes;
for (auto & setting : settings)
{
auto it = std::find_if(select_settings.begin(), select_settings.end(), [&](auto & select_setting)
{
return select_setting.name == setting.name;
});
if (it == select_settings.end())
select_settings.push_back(setting);
else
it->value = setting.value;
}
}
}

View File

@ -0,0 +1,39 @@
#pragma once
#include <Parsers/IAST.h>
#include <Interpreters/InDepthNodeVisitor.h>
namespace DB
{
class ASTSelectQuery;
struct SettingChange;
class SettingsChanges;
/// Pushdown SETTINGS clause that goes after FORMAT to the SELECT query:
/// (since settings after FORMAT parsed separately not in the ParserSelectQuery but in ParserQueryWithOutput)
///
/// SELECT 1 FORMAT Null SETTINGS max_block_size = 1 ->
/// SELECT 1 SETTINGS max_block_size = 1 FORMAT Null SETTINGS max_block_size = 1
///
/// Otherwise settings after FORMAT will not be applied.
class QueryWithOutputSettingsPushDownMatcher
{
public:
using Visitor = InDepthNodeVisitor<QueryWithOutputSettingsPushDownMatcher, true>;
struct Data
{
const ASTPtr & settings_ast;
};
static bool needChildVisit(ASTPtr & node, const ASTPtr & child);
static void visit(ASTPtr & ast, Data & data);
private:
static void visit(ASTSelectQuery &, ASTPtr &, Data &);
};
using QueryWithOutputSettingsPushDownVisitor = QueryWithOutputSettingsPushDownMatcher::Visitor;
}

View File

@ -369,7 +369,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling(
/// If sample and final are used together no need to calculate sampling expression twice.
/// The first time it was calculated for final, because sample key is a part of the PK.
/// So, assume that we already have calculated column.
ASTPtr sampling_key_ast = metadata_snapshot->getSamplingKeyAST();
ASTPtr sampling_key_ast;
if (final)
{
@ -377,6 +377,12 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling(
/// We do spoil available_real_columns here, but it is not used later.
available_real_columns.emplace_back(sampling_key.column_names[0], std::move(sampling_column_type));
}
else
{
sampling_key_ast = metadata_snapshot->getSamplingKeyAST()->clone();
}
chassert(sampling_key_ast != nullptr);
if (has_lower_limit)
{

View File

@ -15,4 +15,5 @@ warn_return_any = True
no_implicit_reexport = True
strict_equality = True
extra_checks = True
ignore_missing_imports = True
ignore_missing_imports = True
logging-fstring-interpolation = False

View File

@ -535,7 +535,10 @@ class CI:
JobNames.FAST_TEST: JobConfig(
pr_only=True,
digest=DigestConfig(
include_paths=["./tests/queries/0_stateless/"],
include_paths=[
"./tests/queries/0_stateless/",
"./tests/docker_scripts/",
],
exclude_files=[".md"],
docker=["clickhouse/fasttest"],
),

View File

@ -415,6 +415,7 @@ class CommonJobConfigs:
"./tests/clickhouse-test",
"./tests/config",
"./tests/*.txt",
"./tests/docker_scripts/",
],
exclude_files=[".md"],
docker=["clickhouse/stateless-test"],
@ -431,6 +432,7 @@ class CommonJobConfigs:
"./tests/clickhouse-test",
"./tests/config",
"./tests/*.txt",
"./tests/docker_scripts/",
],
exclude_files=[".md"],
docker=["clickhouse/stateful-test"],
@ -448,6 +450,7 @@ class CommonJobConfigs:
"./tests/clickhouse-test",
"./tests/config",
"./tests/*.txt",
"./tests/docker_scripts/",
],
exclude_files=[".md"],
docker=["clickhouse/stress-test"],
@ -459,9 +462,9 @@ class CommonJobConfigs:
UPGRADE_TEST = JobConfig(
job_name_keyword="upgrade",
digest=DigestConfig(
include_paths=["./tests/ci/upgrade_check.py"],
include_paths=["./tests/ci/upgrade_check.py", "./tests/docker_scripts/"],
exclude_files=[".md"],
docker=["clickhouse/upgrade-check"],
docker=["clickhouse/stress-test"],
),
run_command="upgrade_check.py",
runner_type=Runners.STRESS_TESTER,

View File

@ -93,7 +93,7 @@ def process_single_image(
results = [] # type: TestResults
for ver in versions:
stopwatch = Stopwatch()
for i in range(5):
for i in range(2):
success, build_log = build_and_push_one_image(
image, ver, additional_cache, push, from_tag
)

View File

@ -31,15 +31,14 @@ def get_fasttest_cmd(
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
"--network=host " # required to get access to IAM credentials
f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output "
f"-e FASTTEST_SOURCE=/ClickHouse "
f"-e FASTTEST_SOURCE=/repo "
f"-e FASTTEST_CMAKE_FLAGS='-DCOMPILER_CACHE=sccache' "
f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} "
f"-e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 "
f"-e SCCACHE_BUCKET={S3_BUILDS_BUCKET} -e SCCACHE_S3_KEY_PREFIX=ccache/sccache "
"-e stage=clone_submodules "
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse "
f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt "
f"--volume={output_path}:/test_output {image}"
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/repo "
f"--volume={output_path}:/test_output {image} /repo/tests/docker_scripts/fasttest_runner.sh"
)

View File

@ -119,24 +119,24 @@ def get_run_command(
envs += [f"-e {e}" for e in additional_envs]
env_str = " ".join(envs)
volume_with_broken_test = (
f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt "
if "analyzer" not in check_name
else ""
)
if "stateful" in check_name.lower():
run_script = "/repo/tests/docker_scripts/stateful_runner.sh"
elif "stateless" in check_name.lower():
run_script = "/repo/tests/docker_scripts/stateless_runner.sh"
else:
assert False
return (
f"docker run --rm --name func-tester --volume={builds_path}:/package_folder "
# For dmesg and sysctl
"--privileged "
f"{ci_logs_args}"
f"--volume={repo_path}/tests:/usr/share/clickhouse-test "
f"--volume={repo_path}/utils/grpc-client:/usr/share/clickhouse-utils/grpc-client "
f"{volume_with_broken_test}"
f"{ci_logs_args} "
f"--volume={repo_path}:/repo "
f"--volume={result_path}:/test_output "
f"--volume={server_log_path}:/var/log/clickhouse-server "
"--security-opt seccomp=unconfined " # required to issue io_uring sys-calls
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}"
f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} {run_script}"
)

View File

@ -57,10 +57,16 @@ def get_run_command(
additional_envs: List[str],
ci_logs_args: str,
image: DockerImage,
upgrade_check: bool,
) -> str:
envs = [f"-e {e}" for e in additional_envs]
env_str = " ".join(envs)
if upgrade_check:
run_script = "/repo/tests/docker_scripts/upgrade_runner.sh"
else:
run_script = "/repo/tests/docker_scripts/stress_runner.sh"
cmd = (
"docker run --cap-add=SYS_PTRACE "
# For dmesg and sysctl
@ -70,8 +76,8 @@ def get_run_command(
f"{ci_logs_args}"
f"--volume={build_path}:/package_folder "
f"--volume={result_path}:/test_output "
f"--volume={repo_tests_path}:/usr/share/clickhouse-test "
f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} "
f"--volume={repo_tests_path}/..:/repo "
f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} {run_script}"
)
return cmd
@ -128,7 +134,7 @@ def process_results(
return state, description, test_results, additional_files
def run_stress_test(docker_image_name: str) -> None:
def run_stress_test(upgrade_check: bool = False) -> None:
logging.basicConfig(level=logging.INFO)
for handler in logging.root.handlers:
# pylint: disable=protected-access
@ -148,7 +154,7 @@ def run_stress_test(docker_image_name: str) -> None:
pr_info = PRInfo()
docker_image = pull_image(get_docker_image(docker_image_name))
docker_image = pull_image(get_docker_image("clickhouse/stress-test"))
packages_path = temp_path / "packages"
packages_path.mkdir(parents=True, exist_ok=True)
@ -177,6 +183,7 @@ def run_stress_test(docker_image_name: str) -> None:
additional_envs,
ci_logs_args,
docker_image,
upgrade_check,
)
logging.info("Going to run stress test: %s", run_command)
@ -208,4 +215,4 @@ def run_stress_test(docker_image_name: str) -> None:
if __name__ == "__main__":
run_stress_test("clickhouse/stress-test")
run_stress_test()

View File

@ -1,4 +1,4 @@
import stress_check
if __name__ == "__main__":
stress_check.run_stress_test("clickhouse/upgrade-check")
stress_check.run_stress_test(upgrade_check=True)

View File

@ -1,7 +1,7 @@
#!/bin/bash
# shellcheck source=./utils.lib
source /utils.lib
source /repo/tests/docker_scripts/utils.lib
function attach_gdb_to_clickhouse()
{

View File

@ -325,7 +325,7 @@ case "$stage" in
;&
"run_tests")
run_tests ||:
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
/repo/tests/docker_scripts/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
;;

View File

@ -32,7 +32,7 @@ def process_test_log(log_path, broken_tests):
success_finish = False
test_results = []
test_end = True
with open(log_path, "r") as test_file:
with open(log_path, "r", encoding="utf-8") as test_file:
for line in test_file:
original_line = line
line = line.strip()
@ -150,7 +150,7 @@ def process_result(result_path, broken_tests):
if result_path and os.path.exists(result_path):
(
total,
_total,
skipped,
unknown,
failed,
@ -191,11 +191,11 @@ def process_result(result_path, broken_tests):
else:
description = ""
description += "fail: {}, passed: {}".format(failed, success)
description += f"fail: {failed}, passed: {success}"
if skipped != 0:
description += ", skipped: {}".format(skipped)
description += f", skipped: {skipped}"
if unknown != 0:
description += ", unknown: {}".format(unknown)
description += f", unknown: {unknown}"
else:
state = "failure"
description = "Output log doesn't exist"
@ -205,10 +205,10 @@ def process_result(result_path, broken_tests):
def write_results(results_file, status_file, results, status):
with open(results_file, "w") as f:
with open(results_file, "w", encoding="utf-8") as f:
out = csv.writer(f, delimiter="\t")
out.writerows(results)
with open(status_file, "w") as f:
with open(status_file, "w", encoding="utf-8") as f:
out = csv.writer(f, delimiter="\t")
out.writerow(status)
@ -221,15 +221,15 @@ if __name__ == "__main__":
parser.add_argument("--in-results-dir", default="/test_output/")
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
parser.add_argument("--broken-tests", default="/analyzer_tech_debt.txt")
parser.add_argument("--broken-tests", default="/repo/tests/analyzer_tech_debt.txt")
args = parser.parse_args()
broken_tests = list()
broken_tests = []
if os.path.exists(args.broken_tests):
logging.info(f"File {args.broken_tests} with broken tests found")
with open(args.broken_tests) as f:
print(f"File {args.broken_tests} with broken tests found")
with open(args.broken_tests, encoding="utf-8") as f:
broken_tests = f.read().splitlines()
logging.info(f"Broken tests in the list: {len(broken_tests)}")
print(f"Broken tests in the list: {len(broken_tests)}")
state, description, test_results = process_result(args.in_results_dir, broken_tests)
logging.info("Result parsed")

View File

@ -5,7 +5,7 @@ set -e -x -a -u
ls -lha
cd hadoop-3.3.1
cd /hadoop-3.3.1
export JAVA_HOME=/usr
mkdir -p target/test/data

View File

@ -143,7 +143,7 @@ main() {
fi
start_minio
setup_minio "$1"
upload_data "${query_dir}" "${2:-/usr/share/clickhouse-test}"
upload_data "${query_dir}" "${2:-/repo/tests/}"
setup_aws_credentials
}

View File

@ -14,17 +14,17 @@ dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
dpkg -i package_folder/clickhouse-server_*.deb
dpkg -i package_folder/clickhouse-client_*.deb
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test
# shellcheck disable=SC1091
source /utils.lib
source /repo/tests/docker_scripts/utils.lib
# install test configs
/usr/share/clickhouse-test/config/install.sh
/repo/tests/config/install.sh
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
./setup_minio.sh stateful
/repo/tests/docker_scripts/setup_minio.sh stateful
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID=$!
@ -105,7 +105,7 @@ setup_logs_replication
clickhouse-client --query "SHOW DATABASES"
clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < create.sql
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets"
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
@ -237,6 +237,7 @@ function run_tests()
--hung-check
--print-time
--capture-client-stacktrace
--queries "/repo/tests/queries"
"${ADDITIONAL_OPTIONS[@]}"
"$SKIP_TESTS_OPTION"
)
@ -259,7 +260,7 @@ ls -la ./
echo "Files in root directory"
ls -la /
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
sudo clickhouse stop ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then

View File

@ -1,10 +1,13 @@
#!/bin/bash
# fail on errors, verbose and export all env variables
set -e -x -a
# shellcheck disable=SC1091
source /setup_export_logs.sh
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
source /repo/tests/docker_scripts/stress_tests.lib
# Avoid overlaps with previous runs
dmesg --clear
@ -39,20 +42,22 @@ if [[ -z "$BUGFIX_VALIDATE_CHECK" ]]; then
chc --version || exit 1
fi
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
ln -sf /repo/tests/clickhouse-test /usr/bin/clickhouse-test
export CLICKHOUSE_GRPC_CLIENT="/repo/utils/grpc-client/clickhouse-grpc-client.py"
# shellcheck disable=SC1091
source /attach_gdb.lib
source /repo/tests/docker_scripts/attach_gdb.lib
# shellcheck disable=SC1091
source /utils.lib
source /repo/tests/docker_scripts/utils.lib
# install test configs
/usr/share/clickhouse-test/config/install.sh
/repo/tests/config/install.sh
./setup_minio.sh stateless
/repo/tests/docker_scripts/setup_minio.sh stateless
./setup_hdfs_minicluster.sh
/repo/tests/docker_scripts/setup_hdfs_minicluster.sh
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -316,6 +321,7 @@ function run_tests()
--print-time
--no-drop-if-fail
--capture-client-stacktrace
--queries "/repo/tests/queries"
--test-runs "$NUM_TRIES"
"${ADDITIONAL_OPTIONS[@]}"
)
@ -341,7 +347,7 @@ ls -la ./
echo "Files in root directory"
ls -la /
/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
clickhouse-client -q "system flush logs" ||:

View File

@ -3,26 +3,25 @@
# shellcheck disable=SC2086
# shellcheck disable=SC2024
set -x
# Avoid overlaps with previous runs
dmesg --clear
# shellcheck disable=SC1091
source /setup_export_logs.sh
set -x
# we mount tests folder from repo to /usr/share
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
ln -s /repo/tests/clickhouse-test/ci/stress.py /usr/bin/stress
ln -s /repo/tests/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
# Stress tests and upgrade check uses similar code that was placed
# in a separate bash library. See tests/ci/stress_tests.lib
# shellcheck source=../stateless/attach_gdb.lib
source /attach_gdb.lib
source /repo/tests/docker_scripts/attach_gdb.lib
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
source /repo/tests/docker_scripts/stress_tests.lib
# shellcheck disable=SC1091
source /utils.lib
source /repo/tests/docker_scripts/utils.lib
install_packages package_folder
@ -55,7 +54,7 @@ export ZOOKEEPER_FAULT_INJECTION=1
# available for dump via clickhouse-local
configure
./setup_minio.sh stateless # to have a proper environment
/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -64,7 +63,7 @@ start_server
setup_logs_replication
clickhouse-client --query "CREATE DATABASE datasets"
clickhouse-client --multiquery < create.sql
clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql
clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
@ -267,7 +266,7 @@ fi
start_server
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
python3 /repo/tests/ci/stress.py --hung-check --drop-databases --output-folder /test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
&& echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
|| echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv

View File

@ -42,7 +42,7 @@ function configure()
# install test configs
export USE_DATABASE_ORDINARY=1
export EXPORT_S3_STORAGE_POLICIES=1
/usr/share/clickhouse-test/config/install.sh
/repo/tests/config/install.sh
# avoid too slow startup
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \

View File

@ -9,20 +9,20 @@ dmesg --clear
set -x
# we mount tests folder from repo to /usr/share
ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
ln -s /repo/tests/ci/stress.py /usr/bin/stress
ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test
ln -s /repo/tests/ci/download_release_packages.py /usr/bin/download_release_packages
ln -s /repo/tests/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
# Stress tests and upgrade check uses similar code that was placed
# in a separate bash library. See tests/ci/stress_tests.lib
# shellcheck source=../stateless/attach_gdb.lib
source /attach_gdb.lib
source /repo/tests/docker_scripts/attach_gdb.lib
# shellcheck source=../stateless/stress_tests.lib
source /stress_tests.lib
source /repo/tests/docker_scripts/stress_tests.lib
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment
/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment
echo "Get previous release tag"
# shellcheck disable=SC2016

View File

@ -1,3 +1,4 @@
DROP TABLE IF EXISTS local_table;
DROP TABLE IF EXISTS other_table;

View File

@ -1,7 +1,7 @@
1
1
1
1
1
2
1
2
2

View File

@ -13,7 +13,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) FORMAT CSV SETTINGS max_block_size = 1'
# push down append
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_compress_block_size = 1 FORMAT CSV SETTINGS max_block_size = 1'
# not overwrite on push down
# overwrite on push down (since these settings goes latest)
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 2 FORMAT CSV SETTINGS max_block_size = 1'
# on push-down
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT DISTINCT blockSize() FROM numbers(2) SETTINGS max_block_size = 1 FORMAT CSV'

View File

@ -1 +1,2 @@
select 42 settings compatibility=NULL; -- {clientError BAD_GET}
select 42 settings compatibility=NULL; -- {clientError BAD_ARGUMENTS}

View File

@ -1,14 +0,0 @@
1
2
1
2
1
2
1
1
3
3
3
3
3
1

View File

@ -1,30 +0,0 @@
SET max_block_size = 10, max_threads = 1;
-- Take the following example:
SELECT 1 UNION ALL SELECT 2 FORMAT TSV;
-- Each subquery can be put in parentheses and have its own settings:
(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV;
-- And the whole query can have settings:
(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV SETTINGS max_block_size = 3;
-- A single query with output is parsed in the same way as the UNION ALL chain:
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV SETTINGS max_block_size = 3;
-- So while these forms have a slightly different meaning, they both exist:
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV;
SELECT getSetting('max_block_size') FORMAT TSV SETTINGS max_block_size = 3;
-- And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order.
-- But while this work:
(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) FORMAT TSV SETTINGS max_block_size = 3;
-- This does not work automatically, unless we explicitly allow different orders:
(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) SETTINGS max_block_size = 3 FORMAT TSV;
-- Inevitably, we allow this:
SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 SETTINGS max_block_size = 3 FORMAT TSV;
/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-- Because this part is consumed into ASTSelectWithUnionQuery
-- and the rest into ASTQueryWithOutput.

View File

@ -0,0 +1,4 @@
set allow_experimental_json_type = 1;
create table test (json JSON(SKIP REGEXP '[]')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP}
create table test (json JSON(SKIP REGEXP '+')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP};