mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-19 16:20:50 +00:00
Merge branch 'ClickHouse:master' into time_buckets_impl
This commit is contained in:
commit
c997593e61
@ -3226,7 +3226,7 @@ Default value: `0`.
|
||||
|
||||
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
||||
|
||||
The same as 'mutation_sync', but controls only execution of lightweight deletes.
|
||||
The same as [`mutations_sync`](#mutations_sync), but controls only execution of lightweight deletes.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -302,8 +302,12 @@ DataTypePtr tryInferDataTypeByEscapingRule(const String & field, const FormatSet
|
||||
/// Try to determine the type of value inside quotes
|
||||
auto type = tryInferDataTypeForSingleField(data, format_settings);
|
||||
|
||||
/// If we couldn't infer any type or it's a number and csv.try_infer_numbers_from_strings = 0, we determine it as a string.
|
||||
if (!type || (format_settings.csv.try_infer_strings_from_quoted_tuples && isTuple(type)) || (!format_settings.csv.try_infer_numbers_from_strings && isNumber(type)))
|
||||
/// Return String type if one of the following conditions apply
|
||||
/// - we couldn't infer any type
|
||||
/// - it's a number and csv.try_infer_numbers_from_strings = 0
|
||||
/// - it's a tuple and try_infer_strings_from_quoted_tuples = 0
|
||||
/// - it's a Bool type (we don't allow reading bool values from strings)
|
||||
if (!type || (format_settings.csv.try_infer_strings_from_quoted_tuples && isTuple(type)) || (!format_settings.csv.try_infer_numbers_from_strings && isNumber(type)) || isBool(type))
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
||||
return type;
|
||||
|
@ -4134,6 +4134,29 @@ private:
|
||||
};
|
||||
}
|
||||
|
||||
/// Create wrapper only if we support this conversion.
|
||||
WrapperType createWrapperIfCanConvert(const DataTypePtr & from, const DataTypePtr & to) const
|
||||
{
|
||||
try
|
||||
{
|
||||
/// We can avoid try/catch here if we will implement check that 2 types can be casted, but it
|
||||
/// requires quite a lot of work. By now let's simply use try/catch.
|
||||
/// First, check that we can create a wrapper.
|
||||
WrapperType wrapper = prepareUnpackDictionaries(from, to);
|
||||
/// Second, check if we can perform a conversion on column with default value.
|
||||
/// (we cannot just check empty column as we do some checks only during iteration over rows).
|
||||
auto test_col = from->createColumn();
|
||||
test_col->insertDefault();
|
||||
ColumnsWithTypeAndName column_from = {{test_col->getPtr(), from, "" }};
|
||||
wrapper(column_from, to, nullptr, 1);
|
||||
return wrapper;
|
||||
}
|
||||
catch (const Exception &)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
WrapperType createVariantToColumnWrapper(const DataTypeVariant & from_variant, const DataTypePtr & to_type) const
|
||||
{
|
||||
const auto & variant_types = from_variant.getVariants();
|
||||
@ -4142,7 +4165,19 @@ private:
|
||||
|
||||
/// Create conversion wrapper for each variant.
|
||||
for (const auto & variant_type : variant_types)
|
||||
variant_wrappers.push_back(prepareUnpackDictionaries(variant_type, to_type));
|
||||
{
|
||||
WrapperType wrapper;
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
{
|
||||
/// Create wrapper only if we support conversion from variant to the resulting type.
|
||||
wrapper = createWrapperIfCanConvert(variant_type, to_type);
|
||||
}
|
||||
else
|
||||
{
|
||||
wrapper = prepareUnpackDictionaries(variant_type, to_type);
|
||||
}
|
||||
variant_wrappers.push_back(wrapper);
|
||||
}
|
||||
|
||||
return [variant_wrappers, variant_types, to_type]
|
||||
(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr
|
||||
@ -4157,7 +4192,11 @@ private:
|
||||
auto variant_col = column_variant.getVariantPtrByGlobalDiscriminator(i);
|
||||
ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], "" }};
|
||||
const auto & variant_wrapper = variant_wrappers[i];
|
||||
casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size()));
|
||||
ColumnPtr casted_variant;
|
||||
/// Check if we have wrapper for this variant.
|
||||
if (variant_wrapper)
|
||||
casted_variant = variant_wrapper(variant, result_type, nullptr, variant_col->size());
|
||||
casted_variant_columns.push_back(std::move(casted_variant));
|
||||
}
|
||||
|
||||
/// Second, construct resulting column from casted variant columns according to discriminators.
|
||||
@ -4167,7 +4206,7 @@ private:
|
||||
for (size_t i = 0; i != input_rows_count; ++i)
|
||||
{
|
||||
auto global_discr = column_variant.globalDiscriminatorByLocal(local_discriminators[i]);
|
||||
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR)
|
||||
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR || !casted_variant_columns[global_discr])
|
||||
res->insertDefault();
|
||||
else
|
||||
res->insertFrom(*casted_variant_columns[global_discr], column_variant.offsetAt(i));
|
||||
@ -4357,10 +4396,27 @@ private:
|
||||
casted_variant_columns.reserve(variant_types.size());
|
||||
for (size_t i = 0; i != variant_types.size(); ++i)
|
||||
{
|
||||
/// Skip shared variant, it will be processed later.
|
||||
if (i == column_dynamic.getSharedVariantDiscriminator())
|
||||
{
|
||||
casted_variant_columns.push_back(nullptr);
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto & variant_col = variant_column.getVariantPtrByGlobalDiscriminator(i);
|
||||
ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], ""}};
|
||||
auto variant_wrapper = prepareUnpackDictionaries(variant_types[i], result_type);
|
||||
casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size()));
|
||||
WrapperType variant_wrapper;
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
/// Create wrapper only if we support conversion from variant to the resulting type.
|
||||
variant_wrapper = createWrapperIfCanConvert(variant_types[i], result_type);
|
||||
else
|
||||
variant_wrapper = prepareUnpackDictionaries(variant_types[i], result_type);
|
||||
|
||||
ColumnPtr casted_variant;
|
||||
/// Check if we have wrapper for this variant.
|
||||
if (variant_wrapper)
|
||||
casted_variant = variant_wrapper(variant, result_type, nullptr, variant_col->size());
|
||||
casted_variant_columns.push_back(casted_variant);
|
||||
}
|
||||
|
||||
/// Second, collect all variants stored in shared variant and cast them to result type.
|
||||
@ -4416,8 +4472,18 @@ private:
|
||||
for (size_t i = 0; i != variant_types_from_shared_variant.size(); ++i)
|
||||
{
|
||||
ColumnsWithTypeAndName variant = {{variant_columns_from_shared_variant[i]->getPtr(), variant_types_from_shared_variant[i], ""}};
|
||||
auto variant_wrapper = prepareUnpackDictionaries(variant_types_from_shared_variant[i], result_type);
|
||||
casted_shared_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_columns_from_shared_variant[i]->size()));
|
||||
WrapperType variant_wrapper;
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
/// Create wrapper only if we support conversion from variant to the resulting type.
|
||||
variant_wrapper = createWrapperIfCanConvert(variant_types_from_shared_variant[i], result_type);
|
||||
else
|
||||
variant_wrapper = prepareUnpackDictionaries(variant_types_from_shared_variant[i], result_type);
|
||||
|
||||
ColumnPtr casted_variant;
|
||||
/// Check if we have wrapper for this variant.
|
||||
if (variant_wrapper)
|
||||
casted_variant = variant_wrapper(variant, result_type, nullptr, variant_columns_from_shared_variant[i]->size());
|
||||
casted_shared_variant_columns.push_back(casted_variant);
|
||||
}
|
||||
|
||||
/// Construct result column from all casted variants.
|
||||
@ -4427,11 +4493,23 @@ private:
|
||||
{
|
||||
auto global_discr = variant_column.globalDiscriminatorByLocal(local_discriminators[i]);
|
||||
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR)
|
||||
{
|
||||
res->insertDefault();
|
||||
}
|
||||
else if (global_discr == shared_variant_discr)
|
||||
res->insertFrom(*casted_shared_variant_columns[shared_variant_indexes[i]], shared_variant_offsets[i]);
|
||||
{
|
||||
if (casted_shared_variant_columns[shared_variant_indexes[i]])
|
||||
res->insertFrom(*casted_shared_variant_columns[shared_variant_indexes[i]], shared_variant_offsets[i]);
|
||||
else
|
||||
res->insertDefault();
|
||||
}
|
||||
else
|
||||
res->insertFrom(*casted_variant_columns[global_discr], offsets[i]);
|
||||
{
|
||||
if (casted_variant_columns[global_discr])
|
||||
res->insertFrom(*casted_variant_columns[global_discr], offsets[i]);
|
||||
else
|
||||
res->insertDefault();
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Parsers/ParserStringAndSubstitution.h>
|
||||
#include <Parsers/ParserAlterQuery.h>
|
||||
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/CommonParsers.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
@ -9,14 +11,19 @@
|
||||
#include <Parsers/ParserRefreshStrategy.h>
|
||||
#include <Parsers/ParserSelectWithUnionQuery.h>
|
||||
#include <Parsers/ParserSetQuery.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ParserStringAndSubstitution.h>
|
||||
#include <Parsers/parseDatabaseAndTableName.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
auto command = std::make_shared<ASTAlterCommand>();
|
||||
@ -122,7 +129,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
ParserCompoundIdentifier parser_name;
|
||||
ParserStringLiteral parser_string_literal;
|
||||
ParserStringAndSubstitution parser_string_and_substituion;
|
||||
ParserIdentifier parser_remove_property;
|
||||
ParserCompoundColumnDeclaration parser_col_decl;
|
||||
ParserIndexDeclaration parser_idx_decl;
|
||||
ParserStatisticsDeclaration parser_stat_decl;
|
||||
@ -725,8 +731,21 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
if (!parser_modify_col_decl.parse(pos, command_col_decl, expected))
|
||||
return false;
|
||||
|
||||
auto check_no_type = [&](const std::string_view keyword)
|
||||
{
|
||||
const auto & column_decl = command_col_decl->as<const ASTColumnDeclaration &>();
|
||||
|
||||
if (!column_decl.children.empty() || column_decl.null_modifier.has_value() || !column_decl.default_specifier.empty()
|
||||
|| column_decl.ephemeral_default || column_decl.primary_key_specifier)
|
||||
{
|
||||
throw Exception(ErrorCodes::SYNTAX_ERROR, "Cannot specify column properties before '{}'", keyword);
|
||||
}
|
||||
};
|
||||
|
||||
if (s_remove.ignore(pos, expected))
|
||||
{
|
||||
check_no_type(s_remove.getName());
|
||||
|
||||
if (s_default.ignore(pos, expected))
|
||||
command->remove_property = toStringView(Keyword::DEFAULT);
|
||||
else if (s_materialized.ignore(pos, expected))
|
||||
@ -746,11 +765,15 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
}
|
||||
else if (s_modify_setting.ignore(pos, expected))
|
||||
{
|
||||
check_no_type(s_modify_setting.getName());
|
||||
|
||||
if (!parser_settings.parse(pos, command_settings_changes, expected))
|
||||
return false;
|
||||
}
|
||||
else if (s_reset_setting.ignore(pos, expected))
|
||||
{
|
||||
check_no_type(s_reset_setting.getName());
|
||||
|
||||
if (!parser_reset_setting.parse(pos, command_settings_resets, expected))
|
||||
return false;
|
||||
}
|
||||
@ -765,6 +788,11 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
}
|
||||
}
|
||||
command->type = ASTAlterCommand::MODIFY_COLUMN;
|
||||
|
||||
/// Make sure that type is not populated when REMOVE/MODIFY SETTING/RESET SETTING is used, because we wouldn't modify the type, which can be confusing
|
||||
chassert(
|
||||
nullptr == command_col_decl->as<const ASTColumnDeclaration &>().type
|
||||
|| (command->remove_property.empty() && nullptr == command_settings_changes && nullptr == command_settings_resets));
|
||||
}
|
||||
else if (s_modify_order_by.ignore(pos, expected))
|
||||
{
|
||||
|
@ -1287,7 +1287,8 @@ void Planner::buildPlanForUnionNode()
|
||||
|
||||
for (const auto & query_node : union_queries_nodes)
|
||||
{
|
||||
Planner query_planner(query_node, select_query_options);
|
||||
Planner query_planner(query_node, select_query_options, planner_context->getGlobalPlannerContext());
|
||||
|
||||
query_planner.buildQueryPlanIfNeeded();
|
||||
for (const auto & row_policy : query_planner.getUsedRowPolicies())
|
||||
used_row_policies.insert(row_policy);
|
||||
|
@ -173,7 +173,10 @@ Pipe ReadFromMemoryStorageStep::makePipe()
|
||||
|
||||
for (size_t stream = 0; stream < num_streams; ++stream)
|
||||
{
|
||||
pipes.emplace_back(std::make_shared<MemorySource>(columns_to_read, storage_snapshot, current_data, parallel_execution_index));
|
||||
auto source = std::make_shared<MemorySource>(columns_to_read, storage_snapshot, current_data, parallel_execution_index);
|
||||
if (stream == 0)
|
||||
source->addTotalRowsApprox(snapshot_data.rows_approx);
|
||||
pipes.emplace_back(std::move(source));
|
||||
}
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
}
|
||||
|
@ -2052,6 +2052,7 @@ DataPartStoragePtr IMergeTreeDataPart::makeCloneInDetached(const String & prefix
|
||||
IDataPartStorage::ClonePartParams params
|
||||
{
|
||||
.copy_instead_of_hardlink = isStoredOnRemoteDiskWithZeroCopySupport() && storage.supportsReplication() && storage_settings->allow_remote_fs_zero_copy_replication,
|
||||
.keep_metadata_version = prefix == "covered-by-broken",
|
||||
.make_source_readonly = true,
|
||||
.external_transaction = disk_transaction
|
||||
};
|
||||
|
@ -22,6 +22,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_MUTATION_COMMAND;
|
||||
extern const int MULTIPLE_ASSIGNMENTS_TO_COLUMN;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@ -115,12 +116,17 @@ std::optional<MutationCommand> MutationCommand::parse(ASTAlterCommand * command,
|
||||
res.column_name = getIdentifierName(command->column);
|
||||
return res;
|
||||
}
|
||||
else if (parse_alter_commands && command->type == ASTAlterCommand::MODIFY_COLUMN)
|
||||
/// MODIFY COLUMN x REMOVE MATERIALIZED/RESET SETTING/MODIFY SETTING is a valid alter command, but doesn't have any specified column type,
|
||||
/// thus no mutation is needed
|
||||
else if (
|
||||
parse_alter_commands && command->type == ASTAlterCommand::MODIFY_COLUMN && command->remove_property.empty() && nullptr == command->settings_changes && nullptr == command->settings_resets)
|
||||
{
|
||||
MutationCommand res;
|
||||
res.ast = command->ptr();
|
||||
res.type = MutationCommand::Type::READ_COLUMN;
|
||||
const auto & ast_col_decl = command->col_decl->as<ASTColumnDeclaration &>();
|
||||
if (nullptr == ast_col_decl.type)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "MODIFY COLUMN mutation command doesn't specify type: {}", serializeAST(*command));
|
||||
res.column_name = ast_col_decl.name;
|
||||
res.data_type = DataTypeFactory::instance().get(ast_col_decl.type);
|
||||
return res;
|
||||
|
@ -161,6 +161,9 @@ StorageSnapshotPtr StorageMemory::getStorageSnapshot(const StorageMetadataPtr &
|
||||
{
|
||||
auto snapshot_data = std::make_unique<SnapshotData>();
|
||||
snapshot_data->blocks = data.get();
|
||||
/// Not guaranteed to match `blocks`, but that's ok. It would probably be better to move
|
||||
/// rows and bytes counters into the MultiVersion-ed struct, then everything would be consistent.
|
||||
snapshot_data->rows_approx = total_size_rows.load(std::memory_order_relaxed);
|
||||
|
||||
if (!hasDynamicSubcolumns(metadata_snapshot->getColumns()))
|
||||
return std::make_shared<StorageSnapshot>(*this, metadata_snapshot, ColumnsDescription{}, std::move(snapshot_data));
|
||||
|
@ -42,6 +42,7 @@ public:
|
||||
struct SnapshotData : public StorageSnapshot::Data
|
||||
{
|
||||
std::shared_ptr<const Blocks> blocks;
|
||||
size_t rows_approx = 0;
|
||||
};
|
||||
|
||||
StorageSnapshotPtr getStorageSnapshot(const StorageMetadataPtr & metadata_snapshot, ContextPtr query_context) const override;
|
||||
|
@ -417,15 +417,13 @@ class Backport:
|
||||
f"v{branch}-must-backport" for branch in self.release_branches
|
||||
]
|
||||
else:
|
||||
fetch_release_prs = self.gh.get_release_pulls(self._fetch_from)
|
||||
fetch_release_branches = [pr.head.ref for pr in fetch_release_prs]
|
||||
self.labels_to_backport = [
|
||||
(
|
||||
f"v{branch}-must-backport"
|
||||
if self._repo_name == "ClickHouse/ClickHouse"
|
||||
else f"v{branch.replace('release/','')}-must-backport"
|
||||
)
|
||||
for branch in fetch_release_branches
|
||||
for branch in self.release_branches
|
||||
]
|
||||
|
||||
logging.info("Fetching from %s", self._fetch_from)
|
||||
|
@ -16,7 +16,7 @@ import upload_result_helper
|
||||
from build_check import get_release_or_pr
|
||||
from ci_config import CI
|
||||
from ci_metadata import CiMetadata
|
||||
from ci_utils import GH, Utils
|
||||
from ci_utils import GH, Utils, Envs
|
||||
from clickhouse_helper import (
|
||||
CiLogsCredentials,
|
||||
ClickHouseHelper,
|
||||
@ -333,11 +333,10 @@ def _pre_action(s3, job_name, batch, indata, pr_info):
|
||||
CI.JobNames.BUILD_CHECK,
|
||||
): # we might want to rerun build report job
|
||||
rerun_helper = RerunHelper(commit, _get_ext_check_name(job_name))
|
||||
if (
|
||||
rerun_helper.is_already_finished_by_status()
|
||||
and not Utils.is_job_triggered_manually()
|
||||
):
|
||||
print("WARNING: Rerunning job with GH status ")
|
||||
if rerun_helper.is_already_finished_by_status():
|
||||
print(
|
||||
f"WARNING: Rerunning job with GH status, rerun triggered by {Envs.GITHUB_ACTOR}"
|
||||
)
|
||||
status = rerun_helper.get_finished_status()
|
||||
assert status
|
||||
print("::group::Commit Status")
|
||||
|
@ -62,7 +62,6 @@ class Runners(metaclass=WithIter):
|
||||
STYLE_CHECKER_ARM = "style-checker-aarch64"
|
||||
FUNC_TESTER = "func-tester"
|
||||
FUNC_TESTER_ARM = "func-tester-aarch64"
|
||||
STRESS_TESTER = "stress-tester"
|
||||
FUZZER_UNIT_TESTER = "fuzzer-unit-tester"
|
||||
|
||||
|
||||
@ -456,7 +455,7 @@ class CommonJobConfigs:
|
||||
docker=["clickhouse/stress-test"],
|
||||
),
|
||||
run_command="stress_check.py",
|
||||
runner_type=Runners.STRESS_TESTER,
|
||||
runner_type=Runners.FUNC_TESTER,
|
||||
timeout=9000,
|
||||
)
|
||||
UPGRADE_TEST = JobConfig(
|
||||
@ -467,7 +466,7 @@ class CommonJobConfigs:
|
||||
docker=["clickhouse/stress-test"],
|
||||
),
|
||||
run_command="upgrade_check.py",
|
||||
runner_type=Runners.STRESS_TESTER,
|
||||
runner_type=Runners.FUNC_TESTER,
|
||||
timeout=3600,
|
||||
)
|
||||
INTEGRATION_TEST = JobConfig(
|
||||
@ -482,7 +481,7 @@ class CommonJobConfigs:
|
||||
docker=IMAGES.copy(),
|
||||
),
|
||||
run_command='integration_test_check.py "$CHECK_NAME"',
|
||||
runner_type=Runners.STRESS_TESTER,
|
||||
runner_type=Runners.FUNC_TESTER,
|
||||
)
|
||||
ASTFUZZER_TEST = JobConfig(
|
||||
job_name_keyword="ast",
|
||||
@ -517,7 +516,7 @@ class CommonJobConfigs:
|
||||
docker=["clickhouse/performance-comparison"],
|
||||
),
|
||||
run_command="performance_comparison_check.py",
|
||||
runner_type=Runners.STRESS_TESTER,
|
||||
runner_type=Runners.FUNC_TESTER,
|
||||
)
|
||||
SQLLANCER_TEST = JobConfig(
|
||||
job_name_keyword="lancer",
|
||||
|
@ -241,7 +241,10 @@ def main():
|
||||
additional_data = []
|
||||
try:
|
||||
test_result = _parse_jepsen_output(jepsen_log_path)
|
||||
if any(r.status == "FAIL" for r in test_result):
|
||||
if len(test_result) == 0:
|
||||
status = FAILURE
|
||||
description = "No test results found"
|
||||
elif any(r.status == "FAIL" for r in test_result):
|
||||
status = FAILURE
|
||||
description = "Found invalid analysis (ノಥ益ಥ)ノ ┻━┻"
|
||||
|
||||
|
@ -1637,6 +1637,19 @@ class TestCase:
|
||||
if args.client_log:
|
||||
log_opt = " --client_logs_file=" + args.client_log + " "
|
||||
client_options += log_opt
|
||||
|
||||
for env_name in [
|
||||
"TSAN_OPTIONS",
|
||||
"ASAN_OPTIONS",
|
||||
"MSAN_OPTIONS",
|
||||
"UBSAN_OPTIONS",
|
||||
]:
|
||||
current_options = os.environ.get(env_name, None)
|
||||
if current_options is None:
|
||||
os.environ[env_name] = f"log_path={args.client_log}"
|
||||
elif "log_path=" not in current_options:
|
||||
os.environ[env_name] += f":log_path={args.client_log}"
|
||||
|
||||
os.environ["CLICKHOUSE_CLIENT_OPT"] = (
|
||||
os.environ["CLICKHOUSE_CLIENT_OPT"]
|
||||
if "CLICKHOUSE_CLIENT_OPT" in os.environ
|
||||
@ -3108,13 +3121,15 @@ def main(args):
|
||||
print(colored("\nNo queries hung.", args, "green", attrs=["bold"]))
|
||||
|
||||
if args.client_log:
|
||||
if os.path.exists(args.client_log):
|
||||
with open(args.client_log, "rb") as stream:
|
||||
for log_file in [args.client_log, *glob.glob(f"{args.client_log}.*")]:
|
||||
if not os.path.exists(log_file):
|
||||
continue
|
||||
with open(log_file, "rb") as stream:
|
||||
content = stream.read().decode()
|
||||
if len(content):
|
||||
print("Has fatal logs from client:\n")
|
||||
print(f"Has fatal logs from client in '{log_file}':\n")
|
||||
print(content)
|
||||
os.remove(args.client_log)
|
||||
os.remove(log_file)
|
||||
|
||||
if len(restarted_tests) > 0:
|
||||
print("\nSome tests were restarted:\n")
|
||||
|
103
tests/integration/test_covered_by_broken_exists/test.py
Normal file
103
tests/integration/test_covered_by_broken_exists/test.py
Normal file
@ -0,0 +1,103 @@
|
||||
import pytest
|
||||
import logging
|
||||
import time
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance("node1", stay_alive=True, with_zookeeper=True)
|
||||
node2 = cluster.add_instance("node2", with_zookeeper=True)
|
||||
|
||||
instance = node1
|
||||
q = node1.query
|
||||
|
||||
path_to_data = "/var/lib/clickhouse/"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def wait_merged_part(table, part_name, retries=100):
|
||||
q("OPTIMIZE TABLE {} FINAL".format(table))
|
||||
for i in range(retries):
|
||||
result = q(
|
||||
"SELECT name FROM system.parts where table='{}' AND name='{}'".format(
|
||||
table, part_name
|
||||
)
|
||||
)
|
||||
if result:
|
||||
return True
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def test_make_clone_covered_by_broken_detached_dir_exists(started_cluster):
|
||||
q("DROP TABLE IF EXISTS test_make_clone_cvbdde SYNC")
|
||||
|
||||
q(
|
||||
"CREATE TABLE test_make_clone_cvbdde(n int, m String) ENGINE=ReplicatedMergeTree('/test_make_clone_cvbdde', '1') ORDER BY n SETTINGS old_parts_lifetime=3600, min_age_to_force_merge_seconds=1, min_age_to_force_merge_on_partition_only=0"
|
||||
)
|
||||
path = path_to_data + "data/default/test_make_clone_cvbdde/"
|
||||
|
||||
q("INSERT INTO test_make_clone_cvbdde VALUES (0, 'hbl')")
|
||||
|
||||
q("INSERT INTO test_make_clone_cvbdde VALUES (1, 'hbl')")
|
||||
if not (wait_merged_part("test_make_clone_cvbdde", "all_0_1_1")):
|
||||
assert False, "Part all_0_1_1 doesn't appeared in system.parts"
|
||||
|
||||
q("INSERT INTO test_make_clone_cvbdde VALUES (2, 'hbl')")
|
||||
if not (wait_merged_part("test_make_clone_cvbdde", "all_0_2_2")):
|
||||
assert False, "Part all_0_2_2 doesn't appeared in system.parts"
|
||||
|
||||
q("INSERT INTO test_make_clone_cvbdde VALUES (3, 'hbl')")
|
||||
if not (wait_merged_part("test_make_clone_cvbdde", "all_0_3_3")):
|
||||
assert False, "Part all_0_3_3 doesn't appeared in system.parts"
|
||||
|
||||
res = str(instance.exec_in_container(["ls", path]).strip().split("\n"))
|
||||
|
||||
# broke the merged parts
|
||||
instance.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"echo 'broken' > {}".format(path + "all_0_1_1/data.bin"),
|
||||
]
|
||||
)
|
||||
|
||||
instance.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"echo 'broken' > {}".format(path + "all_0_2_2/data.bin"),
|
||||
]
|
||||
)
|
||||
|
||||
instance.exec_in_container(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"echo 'broken' > {}".format(path + "all_0_3_3/data.bin"),
|
||||
]
|
||||
)
|
||||
|
||||
instance.restart_clickhouse(kill=True)
|
||||
|
||||
assert [
|
||||
"broken-on-start_all_0_1_1",
|
||||
"broken-on-start_all_0_2_2",
|
||||
"broken-on-start_all_0_3_3",
|
||||
"covered-by-broken_all_0_0_0",
|
||||
"covered-by-broken_all_1_1_0",
|
||||
"covered-by-broken_all_2_2_0",
|
||||
"covered-by-broken_all_3_3_0",
|
||||
] == sorted(
|
||||
instance.exec_in_container(["ls", path + "detached/"]).strip().split("\n")
|
||||
)
|
@ -7,10 +7,13 @@
|
||||
:main jepsen.clickhouse.main
|
||||
:plugins [[lein-cljfmt "0.7.0"]]
|
||||
:dependencies [[org.clojure/clojure "1.10.1"]
|
||||
[jepsen "0.2.7"]
|
||||
[jepsen "0.2.7":exclusions [net.java.dev.jna/jna
|
||||
net.java.dev.jna/jna-platform]]
|
||||
[zookeeper-clj "0.9.4"]
|
||||
[org.clojure/java.jdbc "0.7.12"]
|
||||
[com.hierynomus/sshj "0.34.0"]
|
||||
[net.java.dev.jna/jna "5.14.0"]
|
||||
[net.java.dev.jna/jna-platform "5.14.0"]
|
||||
[com.clickhouse/clickhouse-jdbc "0.3.2-patch11"]
|
||||
[org.apache.zookeeper/zookeeper "3.6.1" :exclusions [org.slf4j/slf4j-log4j12]]]
|
||||
:repl-options {:init-ns jepsen.clickhouse-keeper.main}
|
||||
|
@ -51,8 +51,8 @@ $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW
|
||||
$CLICKHOUSE_CLIENT -q "SELECT name, uuid, create_table_query FROM system.tables WHERE database='${DATABASE_2}'" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g"
|
||||
|
||||
RANDOM_COMMENT="$RANDOM"
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 60000000 -q "SELECT count(col), sum(col) FROM (SELECT n + sleepEachRow(1.5) AS col FROM ${DATABASE_1}.mt) -- ${RANDOM_COMMENT}" & # 33s (1.5s * 22 rows per partition [Using 5 threads in parallel]), result: 110, 5995
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 60000000 -q "INSERT INTO ${DATABASE_2}.mt SELECT number + sleepEachRow(1.5) FROM numbers(30) -- ${RANDOM_COMMENT}" & # 45s (1.5s * 30 rows)
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 120000000 -q "SELECT count(col), sum(col) FROM (SELECT n + sleepEachRow(3) AS col FROM ${DATABASE_1}.mt) -- ${RANDOM_COMMENT}" & # 66s (3s * 22 rows per partition [Using 5 threads in parallel]), result: 110, 5995
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 120000000 -q "INSERT INTO ${DATABASE_2}.mt SELECT number + sleepEachRow(2.2) FROM numbers(30) -- ${RANDOM_COMMENT}" & # 66s (2.2s * 30 rows)
|
||||
|
||||
it=0
|
||||
while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id != queryID() AND current_database = currentDatabase() AND query LIKE '%-- ${RANDOM_COMMENT}%'") -ne 2 ]]; do
|
||||
@ -87,7 +87,7 @@ SELECT count() FROM ${DATABASE_1}.mt
|
||||
" # result: 5
|
||||
|
||||
RANDOM_TUPLE="${RANDOM}_tuple"
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 60000000 -q "SELECT tuple(s, sleepEachRow(3)) FROM ${DATABASE_1}.mt -- ${RANDOM_TUPLE}" > /dev/null & # 15s (3s * 5 rows)
|
||||
$CLICKHOUSE_CLIENT --max-threads 5 --function_sleep_max_microseconds_per_block 60000000 -q "SELECT tuple(s, sleepEachRow(4)) FROM ${DATABASE_1}.mt -- ${RANDOM_TUPLE}" > /dev/null & # 20s (4s * 5 rows)
|
||||
it=0
|
||||
while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id != queryID() AND current_database = currentDatabase() AND query LIKE '%-- ${RANDOM_TUPLE}%'") -ne 1 ]]; do
|
||||
it=$((it+1))
|
||||
|
@ -40,10 +40,17 @@ OPTIMIZE TABLE shard_0.from_0;
|
||||
OPTIMIZE TABLE shard_1.from_0;
|
||||
OPTIMIZE TABLE shard_0.from_1;
|
||||
OPTIMIZE TABLE shard_1.from_1;
|
||||
|
||||
OPTIMIZE TABLE shard_0.to;
|
||||
|
||||
-- If moved parts are not merged by OPTIMIZE or background merge restart
|
||||
-- can log Warning about metadata version on disk. It's normal situation
|
||||
-- and test shouldn't rarely fail because of it.
|
||||
set send_logs_level = 'error';
|
||||
|
||||
system restart replica shard_0.to;
|
||||
|
||||
-- Doesn't lead to test flakyness, because we don't check anything after it
|
||||
select sleep(2);
|
||||
|
||||
attach table shard_1.to;
|
||||
@ -54,4 +61,3 @@ drop table if exists shard_0.from_1;
|
||||
drop table if exists shard_1.from_1;
|
||||
drop table if exists shard_0.to;
|
||||
drop table if exists shard_1.to;
|
||||
|
||||
|
@ -35,8 +35,15 @@ OPTIMIZE TABLE shard_0.to;
|
||||
OPTIMIZE TABLE shard_0.to;
|
||||
select name, active from system.parts where database='shard_0' and table='to' and active order by name;
|
||||
|
||||
-- If moved parts are not merged by OPTIMIZE or background merge restart
|
||||
-- can log Warning about metadata version on disk. It's normal situation
|
||||
-- and test shouldn't rarely fail because of it.
|
||||
set send_logs_level = 'error';
|
||||
|
||||
system restart replica shard_0.to;
|
||||
|
||||
-- Doesn't lead to test flakyness, because we don't check content in table
|
||||
-- which doesn't depend on any background operation
|
||||
select sleep(3);
|
||||
|
||||
attach table shard_1.to;
|
@ -1,6 +1,6 @@
|
||||
c1 Nullable(Int64)
|
||||
c2 Nullable(Float64)
|
||||
c3 Nullable(Bool)
|
||||
c3 Nullable(String)
|
||||
c1 Nullable(String)
|
||||
c2 Nullable(String)
|
||||
c3 Nullable(String)
|
||||
|
@ -0,0 +1,516 @@
|
||||
0 \N
|
||||
1 1
|
||||
0 str_2
|
||||
0 [0,1,2]
|
||||
0 \N
|
||||
5 5
|
||||
0 str_6
|
||||
0 [0,1,2,3,4,5,6]
|
||||
\N \N
|
||||
1 1
|
||||
\N str_2
|
||||
\N [0,1,2]
|
||||
\N \N
|
||||
5 5
|
||||
\N str_6
|
||||
\N [0,1,2,3,4,5,6]
|
||||
0 \N
|
||||
1 1
|
||||
0 str_2
|
||||
0 [0,1,2]
|
||||
0 \N
|
||||
5 5
|
||||
0 str_6
|
||||
0 [0,1,2,3,4,5,6]
|
||||
\N \N
|
||||
1 1
|
||||
\N str_2
|
||||
\N [0,1,2]
|
||||
\N \N
|
||||
5 5
|
||||
\N str_6
|
||||
\N [0,1,2,3,4,5,6]
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
3232235521
|
||||
4294967294
|
||||
4294967295
|
||||
-9223372036854775808
|
||||
-9223372036854775807
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
3232235521
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
-170141183460469231731687303715884105728
|
||||
-170141183460469231731687303715884105727
|
||||
-9223372036854775808
|
||||
-9223372036854775807
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
170141183460469231731687303715884105726
|
||||
170141183460469231731687303715884105727
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
3232235521
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
170141183460469231731687303715884105726
|
||||
170141183460469231731687303715884105727
|
||||
296245801836096677496328508227807879401
|
||||
340282366920938463463374607431768211454
|
||||
340282366920938463463374607431768211455
|
||||
-57896044618658097711785492504343953926634992332820282019728792003956564819968
|
||||
-57896044618658097711785492504343953926634992332820282019728792003956564819967
|
||||
-170141183460469231731687303715884105728
|
||||
-170141183460469231731687303715884105727
|
||||
-9223372036854775808
|
||||
-9223372036854775807
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
170141183460469231731687303715884105726
|
||||
170141183460469231731687303715884105727
|
||||
340282366920938463463374607431768211454
|
||||
340282366920938463463374607431768211455
|
||||
57896044618658097711785492504343953926634992332820282019728792003956564819966
|
||||
57896044618658097711785492504343953926634992332820282019728792003956564819967
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
3232235521
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
170141183460469231731687303715884105726
|
||||
170141183460469231731687303715884105727
|
||||
340282366920938463463374607431768211454
|
||||
340282366920938463463374607431768211455
|
||||
57896044618658097711785492504343953926634992332820282019728792003956564819966
|
||||
57896044618658097711785492504343953926634992332820282019728792003956564819967
|
||||
115792089237316195423570985008687907853269984665640564039457584007913129639934
|
||||
115792089237316195423570985008687907853269984665640564039457584007913129639935
|
||||
-inf
|
||||
-3.4028233e38
|
||||
-1.7014118e38
|
||||
-9223372000000000000
|
||||
-2147483600
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
-1.1754942e-38
|
||||
-1e-45
|
||||
0
|
||||
1e-45
|
||||
1.1754942e-38
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
3.4028233e38
|
||||
inf
|
||||
nan
|
||||
-inf
|
||||
-1.7976931348623157e308
|
||||
-5.78960446186581e76
|
||||
-3.40282347e38
|
||||
-3.4028232635611926e38
|
||||
-1.7014118346046923e38
|
||||
-9223372036854776000
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
-1.1754943499999998e-38
|
||||
-1.1754942106924411e-38
|
||||
-1.401298464324817e-45
|
||||
-1.3999999999999999e-45
|
||||
-2.2250738585072014e-308
|
||||
0
|
||||
2.2250738585072014e-308
|
||||
1.3999999999999999e-45
|
||||
1.401298464324817e-45
|
||||
1.1754942106924411e-38
|
||||
1.1754943499999998e-38
|
||||
1
|
||||
2
|
||||
3
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
3.4028232635611926e38
|
||||
3.40282347e38
|
||||
1.7976931348623157e308
|
||||
inf
|
||||
nan
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
-9223372036854775808
|
||||
-9223372036854775807
|
||||
-18446744073709551.616
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
-340282347000000000977176926486249829565.415
|
||||
-9223372036854775808
|
||||
-9223372036854775807
|
||||
-18446744073709551.616
|
||||
-2147483648
|
||||
-2147483647
|
||||
-32768
|
||||
-32767
|
||||
-128
|
||||
-127
|
||||
-1
|
||||
0
|
||||
1
|
||||
126
|
||||
127
|
||||
254
|
||||
255
|
||||
32766
|
||||
32767
|
||||
65534
|
||||
65535
|
||||
2147483646
|
||||
2147483647
|
||||
4294967294
|
||||
4294967295
|
||||
9223372036854775806
|
||||
9223372036854775807
|
||||
18446744073709551614
|
||||
18446744073709551615
|
||||
340282347000000000977176926486249829565.415
|
||||
1970-01-01
|
||||
1970-01-02
|
||||
1970-01-03
|
||||
1970-01-04
|
||||
1970-05-07
|
||||
1970-05-08
|
||||
1970-09-12
|
||||
1970-09-13
|
||||
2038-01-19
|
||||
2059-09-17
|
||||
2059-09-18
|
||||
2106-02-07
|
||||
2149-06-05
|
||||
2149-06-06
|
||||
2299-12-31
|
||||
2299-12-31
|
||||
1900-01-01
|
||||
1969-08-26
|
||||
1969-08-27
|
||||
1969-12-30
|
||||
1969-12-31
|
||||
1970-01-01
|
||||
1970-01-02
|
||||
1970-01-03
|
||||
1970-01-04
|
||||
1970-05-07
|
||||
1970-05-08
|
||||
1970-09-12
|
||||
1970-09-13
|
||||
2038-01-19
|
||||
2059-09-17
|
||||
2059-09-18
|
||||
2106-02-07
|
||||
2149-06-05
|
||||
2149-06-06
|
||||
2299-12-31
|
||||
1970-01-01 00:00:00
|
||||
1970-01-01 00:00:01
|
||||
1970-01-01 00:00:02
|
||||
1970-01-01 00:00:03
|
||||
1970-01-01 00:02:06
|
||||
1970-01-01 00:02:07
|
||||
1970-01-01 00:04:14
|
||||
1970-01-01 00:04:15
|
||||
1970-01-01 09:06:06
|
||||
1970-01-01 09:06:07
|
||||
1970-01-01 18:12:14
|
||||
1970-01-01 18:12:15
|
||||
2038-01-19 03:14:06
|
||||
2038-01-19 03:14:07
|
||||
2106-02-07 06:28:14
|
||||
2106-02-07 06:28:15
|
||||
0.0.0.0
|
||||
192.168.0.1
|
||||
::
|
||||
::1
|
||||
::ffff:192.168.0.1
|
||||
00000000-0000-0000-0000-000000000000
|
||||
dededdb6-7835-4ce4-8d11-b5de6f2820e9
|
@ -0,0 +1,117 @@
|
||||
set allow_experimental_variant_type = 1;
|
||||
set use_variant_as_common_type = 1;
|
||||
set allow_experimental_dynamic_type = 1;
|
||||
set allow_suspicious_low_cardinality_types = 1;
|
||||
set session_timezone = 'UTC';
|
||||
|
||||
select accurateCastOrDefault(variant, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number)) as variant from numbers(8);
|
||||
select accurateCastOrNull(variant, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number)) as variant from numbers(8);
|
||||
|
||||
select accurateCastOrDefault(dynamic, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number))::Dynamic as dynamic from numbers(8);
|
||||
select accurateCastOrNull(dynamic, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number))::Dynamic as dynamic from numbers(8);
|
||||
|
||||
drop table if exists t;
|
||||
create table t (d Dynamic) engine=MergeTree order by tuple();
|
||||
|
||||
-- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256)
|
||||
INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8);
|
||||
INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8);
|
||||
INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8);
|
||||
INSERT INTO t VALUES (-32768::Int16), (-32767::Int16), (-1::Int16), (0::Int16), (1::Int16), (32766::Int16), (32767::Int16);
|
||||
INSERT INTO t VALUES (-2147483648::Int32), (-2147483647::Int32), (-1::Int32), (0::Int32), (1::Int32), (2147483646::Int32), (2147483647::Int32);
|
||||
INSERT INTO t VALUES (-9223372036854775808::Int64), (-9223372036854775807::Int64), (-1::Int64), (0::Int64), (1::Int64), (9223372036854775806::Int64), (9223372036854775807::Int64);
|
||||
INSERT INTO t VALUES (-170141183460469231731687303715884105728::Int128), (-170141183460469231731687303715884105727::Int128), (-1::Int128), (0::Int128), (1::Int128), (170141183460469231731687303715884105726::Int128), (170141183460469231731687303715884105727::Int128);
|
||||
INSERT INTO t VALUES (-57896044618658097711785492504343953926634992332820282019728792003956564819968::Int256), (-57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256), (-1::Int256), (0::Int256), (1::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819966::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256);
|
||||
|
||||
INSERT INTO t VALUES (0::UInt8), (1::UInt8), (254::UInt8), (255::UInt8);
|
||||
INSERT INTO t VALUES (0::UInt16), (1::UInt16), (65534::UInt16), (65535::UInt16);
|
||||
INSERT INTO t VALUES (0::UInt32), (1::UInt32), (4294967294::UInt32), (4294967295::UInt32);
|
||||
INSERT INTO t VALUES (0::UInt64), (1::UInt64), (18446744073709551614::UInt64), (18446744073709551615::UInt64);
|
||||
INSERT INTO t VALUES (0::UInt128), (1::UInt128), (340282366920938463463374607431768211454::UInt128), (340282366920938463463374607431768211455::UInt128);
|
||||
INSERT INTO t VALUES (0::UInt256), (1::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639934::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639935::UInt256);
|
||||
|
||||
-- Floating-point numbers: floats(Float32 and Float64) values
|
||||
INSERT INTO t VALUES (1.17549435e-38::Float32), (3.40282347e+38::Float32), (-3.40282347e+38::Float32), (-1.17549435e-38::Float32), (1.4e-45::Float32), (-1.4e-45::Float32);
|
||||
INSERT INTO t VALUES (inf::Float32), (-inf::Float32), (nan::Float32);
|
||||
INSERT INTO t VALUES (inf::FLOAT(12)), (-inf::FLOAT(12)), (nan::FLOAT(12));
|
||||
INSERT INTO t VALUES (inf::FLOAT(15,22)), (-inf::FLOAT(15,22)), (nan::FLOAT(15,22));
|
||||
|
||||
INSERT INTO t VALUES (1.17549435e-38::Float64), (3.40282347e+38::Float64), (-3.40282347e+38::Float64), (-1.17549435e-38::Float64), (1.4e-45::Float64), (-1.4e-45::Float64);
|
||||
INSERT INTO t VALUES (2.2250738585072014e-308::Float64), (1.7976931348623157e+308::Float64), (-1.7976931348623157e+308::Float64), (-2.2250738585072014e-308::Float64);
|
||||
INSERT INTO t VALUES (inf::Float64), (-inf::Float64), (nan::Float64);
|
||||
INSERT INTO t VALUES (inf::DOUBLE(12)), (-inf::DOUBLE(12)), (nan::DOUBLE(12));
|
||||
INSERT INTO t VALUES (inf::DOUBLE(15,22)), (-inf::DOUBLE(15,22)), (nan::DOUBLE(15,22));
|
||||
|
||||
-- Strings: String and FixedString
|
||||
INSERT INTO t VALUES ('string'::String), ('1'::FixedString(1)), ('1'::FixedString(2)), ('1'::FixedString(10)); --(''::String),
|
||||
|
||||
-- Boolean
|
||||
INSERT INTO t VALUES ('1'::Bool), (0::Bool);
|
||||
|
||||
-- UUID
|
||||
INSERT INTO t VALUES ('dededdb6-7835-4ce4-8d11-b5de6f2820e9'::UUID);
|
||||
INSERT INTO t VALUES ('00000000-0000-0000-0000-000000000000'::UUID);
|
||||
|
||||
-- LowCardinality
|
||||
INSERT INTO t VALUES ('1'::LowCardinality(String)), ('1'::LowCardinality(String)), (0::LowCardinality(UInt16));
|
||||
|
||||
-- Arrays
|
||||
INSERT INTO t VALUES ([]::Array(Dynamic)), ([[]]::Array(Array(Dynamic))), ([[[]]]::Array(Array(Array(Dynamic))));
|
||||
|
||||
-- Tuple
|
||||
INSERT INTO t VALUES (()::Tuple(Dynamic)), ((())::Tuple(Tuple(Dynamic))), (((()))::Tuple(Tuple(Tuple(Dynamic))));
|
||||
|
||||
-- Map.
|
||||
INSERT INTO t VALUES (map(11::Dynamic, 'v1'::Dynamic, '22'::Dynamic, 1::Dynamic));
|
||||
|
||||
-- SimpleAggregateFunction
|
||||
INSERT INTO t VALUES ([1,2]::SimpleAggregateFunction(anyLast, Array(Int16)));
|
||||
|
||||
-- IPs
|
||||
INSERT INTO t VALUES (toIPv4('192.168.0.1')), (toIPv6('::1'));
|
||||
|
||||
-- Geo
|
||||
INSERT INTO t VALUES ((1.23, 4.56)::Point), (([(1.23, 4.56)::Point, (2.34, 5.67)::Point])::Ring);
|
||||
INSERT INTO t VALUES ([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]::MultiPolygon);
|
||||
|
||||
-- Interval
|
||||
INSERT INTO t VALUES (interval '1' day), (interval '2' month), (interval '3' year);
|
||||
|
||||
-- Nested
|
||||
INSERT INTO t VALUES ([(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y String));
|
||||
INSERT INTO t VALUES ([(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, ['ee', 'ff']), [(7, 'gg'), (8, 'hh')])]::Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String)));
|
||||
|
||||
optimize table t final;
|
||||
|
||||
select distinct toInt8OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt8OrDefault(d) as res from t order by res;
|
||||
select distinct toInt16OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt16OrDefault(d) as res from t order by res;
|
||||
select distinct toInt32OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt32OrDefault(d) as res from t order by res;
|
||||
select distinct toInt64OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt64OrDefault(d) as res from t order by res;
|
||||
select distinct toInt128OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt128OrDefault(d) as res from t order by res;
|
||||
select distinct toInt256OrDefault(d) as res from t order by res;
|
||||
select distinct toUInt256OrDefault(d) as res from t order by res;
|
||||
|
||||
select distinct toFloat32OrDefault(d) as res from t order by res;
|
||||
select distinct toFloat64OrDefault(d) as res from t order by res;
|
||||
|
||||
select distinct toDecimal32OrDefault(d, 3) as res from t order by res;
|
||||
select distinct toDecimal64OrDefault(d, 3) as res from t order by res;
|
||||
select distinct toDecimal128OrDefault(d, 3) as res from t order by res;
|
||||
select distinct toDecimal256OrDefault(d, 3) as res from t order by res;
|
||||
|
||||
select distinct toDateOrDefault(d) as res from t order by res;
|
||||
select distinct toDate32OrDefault(d) as res from t order by res;
|
||||
select distinct toDateTimeOrDefault(d) as res from t order by res;
|
||||
|
||||
select distinct toIPv4OrDefault(d) as res from t order by res;
|
||||
select distinct toIPv6OrDefault(d) as res from t order by res;
|
||||
|
||||
select distinct toUUIDOrDefault(d) as res from t order by res;
|
||||
|
||||
drop table t;
|
||||
|
@ -0,0 +1,4 @@
|
||||
BEFORE a x String
|
||||
BEFORE a y String MATERIALIZED \'str\'
|
||||
AFTER a x String
|
||||
AFTER a y String
|
@ -0,0 +1,13 @@
|
||||
DROP TABLE IF EXISTS a SYNC;
|
||||
CREATE TABLE a (x String, y String MATERIALIZED 'str') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/a', 'r1') ORDER BY x;
|
||||
|
||||
INSERT INTO a SELECT toString(number) FROM numbers(100);
|
||||
SELECT 'BEFORE', table, name, type, default_kind, default_expression FROM system.columns WHERE database = currentDatabase() AND table = 'a' ORDER BY table, name;
|
||||
|
||||
-- DROP INDEX is important to make the mutation not a pure metadata mutation
|
||||
ALTER TABLE a
|
||||
DROP INDEX IF EXISTS some_index,
|
||||
MODIFY COLUMN y REMOVE MATERIALIZED
|
||||
SETTINGS alter_sync = 2, mutations_sync = 2;
|
||||
|
||||
SELECT 'AFTER', table, name, type, default_kind, default_expression FROM system.columns WHERE database = currentDatabase() AND table = 'a' ORDER BY table, name;
|
@ -0,0 +1,13 @@
|
||||
REMOVE
|
||||
The same, but with type
|
||||
MODIFY SETTING
|
||||
The same, but with type
|
||||
RESET SETTING
|
||||
The same, but with type
|
||||
All the above, but on server side
|
||||
REMOVE
|
||||
The same, but with type
|
||||
MODIFY SETTING
|
||||
The same, but with type
|
||||
RESET SETTING
|
||||
The same, but with type
|
@ -0,0 +1,151 @@
|
||||
DROP TABLE IF EXISTS a SYNC;
|
||||
CREATE TABLE a (x Int64, y Int64 MATERIALIZED 1 SETTINGS (max_compress_block_size = 30000)) ENGINE = MergeTree ORDER BY x;
|
||||
|
||||
-- In cases when the type is not present in column declaration, the parser interprets TTL/COLLATE/SETTINGS as a data type,
|
||||
-- thus such queries doesn't throw syntax error on client side, just fails to parse. For server side validation these
|
||||
-- queries still result in an exception of syntax error. Even though the exception is throw for a different reason, they
|
||||
-- are good safe guards for the future where the parsing of such properties might change.
|
||||
SELECT 'REMOVE';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y DEFAULT 2 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y COMMENT '5' REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y PRIMARY KEY REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'MODIFY SETTING';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y COMMENT '5' MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'RESET SETTING';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y DEFAULT 2 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y COMMENT '5' RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y PRIMARY KEY RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR }
|
||||
|
||||
|
||||
|
||||
SELECT 'All the above, but on server side';
|
||||
|
||||
SELECT 'REMOVE';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'MODIFY SETTING';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'RESET SETTING';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
|
||||
SELECT 'The same, but with type';
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
||||
SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR }
|
@ -0,0 +1,4 @@
|
||||
c1 Nullable(Int64)
|
||||
c2 Nullable(Float64)
|
||||
c3 Nullable(String)
|
||||
42 42.42 True
|
@ -0,0 +1,4 @@
|
||||
set input_format_csv_try_infer_numbers_from_strings = 1;
|
||||
desc format(CSV, '"42","42.42","True"');
|
||||
select * from format(CSV, '"42","42.42","True"');
|
||||
|
@ -0,0 +1,2 @@
|
||||
0 Value_0
|
||||
1 Value_1
|
@ -0,0 +1,23 @@
|
||||
DROP TABLE IF EXISTS test_table SYNC;
|
||||
CREATE TABLE test_table
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
) ENGINE=ReplicatedMergeTree('/clickhouse/test/{database}/test_table', 'r1') ORDER BY tuple();
|
||||
|
||||
INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2');
|
||||
|
||||
DROP TABLE IF EXISTS test_table_for_in SYNC;
|
||||
CREATE TABLE test_table_for_in
|
||||
(
|
||||
id UInt64
|
||||
) ENGINE=ReplicatedMergeTree('/clickhouse/test/{database}/test_table_for_in', 'r1') ORDER BY tuple();
|
||||
|
||||
INSERT INTO test_table_for_in VALUES (0), (1);
|
||||
|
||||
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
|
||||
|
||||
SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in UNION DISTINCT SELECT id FROM test_table_for_in);
|
||||
|
||||
DROP TABLE test_table SYNC;
|
||||
DROP TABLE test_table_for_in SYNC;
|
@ -0,0 +1 @@
|
||||
CAT 2
|
@ -0,0 +1,45 @@
|
||||
DROP TABLE IF EXISTS ANIMAL SYNC;
|
||||
|
||||
CREATE TABLE ANIMAL ( ANIMAL Nullable(String) ) ENGINE = ReplicatedMergeTree('/clickhouse/test/{database}/animal', 'r1') ORDER BY tuple();
|
||||
|
||||
INSERT INTO ANIMAL (ANIMAL) VALUES ('CAT'), ('FISH'), ('DOG'), ('HORSE'), ('BIRD');
|
||||
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
|
||||
|
||||
SELECT *
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
x.b AS x,
|
||||
countDistinct(x.c) AS ANIMAL
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
a.ANIMAL AS a,
|
||||
'CAT' AS b,
|
||||
c.ANIMAL AS c,
|
||||
d.ANIMAL AS d
|
||||
FROM ANIMAL AS a
|
||||
INNER JOIN ANIMAL AS b ON a.ANIMAL = b.ANIMAL
|
||||
LEFT JOIN ANIMAL AS c ON b.ANIMAL = c.ANIMAL
|
||||
RIGHT JOIN
|
||||
(
|
||||
SELECT *
|
||||
FROM ANIMAL
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM ANIMAL
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM ANIMAL
|
||||
) AS d ON a.ANIMAL = d.ANIMAL
|
||||
WHERE (d.ANIMAL != 'CAT') AND (c.ANIMAL != 'DOG') AND (b.ANIMAL != 'FISH')
|
||||
) AS x
|
||||
WHERE x.b >= 'CAT'
|
||||
GROUP BY x.b
|
||||
HAVING ANIMAL >= 0
|
||||
) AS ANIMAL
|
||||
WHERE ANIMAL.ANIMAL >= 0;
|
||||
|
||||
DROP TABLE ANIMAL SYNC;
|
151
tests/queries/0_stateless/03231_pr_reverse_in_order.reference
Normal file
151
tests/queries/0_stateless/03231_pr_reverse_in_order.reference
Normal file
@ -0,0 +1,151 @@
|
||||
ReadType: InReverseOrder
|
||||
2024-06-11 02:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-11 00:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 22:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 20:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 18:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 16:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 14:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 12:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 10:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 08:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 06:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 04:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 02:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 02:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 00:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-10 00:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 22:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 22:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 20:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 20:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 18:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 18:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 16:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 16:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 14:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 14:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 12:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 12:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 10:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 10:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 08:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 08:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 06:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 06:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 04:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 04:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 02:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 02:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 02:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 00:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 00:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-09 00:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 22:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 22:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 22:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 20:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 20:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 20:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 18:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 18:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 18:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 16:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 16:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 16:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 14:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 14:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 14:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 12:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 12:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 12:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 10:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 10:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 10:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 08:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 08:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 08:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 06:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 06:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 06:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 04:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 04:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 04:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 02:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 02:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 02:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 00:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 00:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-08 00:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 22:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 22:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 22:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 20:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 20:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 20:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 18:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 18:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 18:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 16:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 16:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 16:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 14:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 14:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 14:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 12:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 12:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 12:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 10:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 10:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 10:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 08:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 08:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 08:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 06:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 06:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 06:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 04:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 04:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 04:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 02:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 02:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 02:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 00:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 00:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-07 00:00:01 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 22:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 22:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 20:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 20:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 18:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 18:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 16:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 16:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 14:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 14:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 12:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 12:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 10:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 10:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 08:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 08:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 06:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 06:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 04:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 04:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 02:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 02:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 00:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-06 00:00:02 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 22:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 20:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 18:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 16:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 14:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 12:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 10:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 08:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 06:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 04:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 02:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
||||
2024-06-05 00:00:03 asan 02221_parallel_replicas_bug FAIL fail
|
49
tests/queries/0_stateless/03231_pr_reverse_in_order.sql
Normal file
49
tests/queries/0_stateless/03231_pr_reverse_in_order.sql
Normal file
@ -0,0 +1,49 @@
|
||||
DROP TABLE IF EXISTS checks SYNC;
|
||||
|
||||
CREATE TABLE checks
|
||||
(
|
||||
`check_name` LowCardinality(String),
|
||||
`check_status` LowCardinality(String),
|
||||
`check_start_time` DateTime,
|
||||
`test_name` LowCardinality(String),
|
||||
`test_status` LowCardinality(String),
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/checks', '{replica}')
|
||||
ORDER BY check_start_time;
|
||||
|
||||
SYSTEM STOP MERGES checks;
|
||||
|
||||
INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-07 00:00:01') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100);
|
||||
INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-06 00:00:02') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100);
|
||||
INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-05 00:00:03') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100);
|
||||
|
||||
SELECT trimBoth(explain)
|
||||
FROM
|
||||
(
|
||||
EXPLAIN actions=1 SELECT
|
||||
check_start_time,
|
||||
check_name,
|
||||
test_name,
|
||||
test_status,
|
||||
check_status
|
||||
FROM checks
|
||||
WHERE 1 AND (test_status != 'SKIPPED') AND (test_status != 'OK') AND (check_status != 'success') AND (test_name ILIKE '%parallel_replicas%')
|
||||
ORDER BY
|
||||
check_start_time DESC,
|
||||
check_name ASC,
|
||||
test_name ASC
|
||||
SETTINGS query_plan_read_in_order = 1, optimize_read_in_order = 1, max_parallel_replicas = 1
|
||||
)
|
||||
WHERE explain LIKE '%InReverseOrder%';
|
||||
|
||||
SELECT check_start_time, check_name, test_name, test_status, check_status
|
||||
FROM checks
|
||||
WHERE 1
|
||||
AND test_status != 'SKIPPED'
|
||||
AND test_status != 'OK'
|
||||
AND check_status != 'success'
|
||||
AND test_name ilike '%parallel_replicas%'
|
||||
ORDER BY check_start_time desc, check_name, test_name
|
||||
SETTINGS query_plan_read_in_order = 1, optimize_read_in_order = 1, allow_experimental_parallel_reading_from_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3;
|
||||
|
||||
DROP TABLE checks SYNC;
|
Loading…
Reference in New Issue
Block a user