mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge remote-tracking branch 'origin/master' into morton-utils
This commit is contained in:
commit
0bf6fd3671
2
contrib/croaring
vendored
2
contrib/croaring
vendored
@ -1 +1 @@
|
||||
Subproject commit e4a7ad5542746103e71ca8b5e56225baf0014c87
|
||||
Subproject commit 9b7cc0ff1c41e9457efb6228cfd2c538d0155303
|
@ -16,7 +16,7 @@ CLICKHOUSE_CI_LOGS_USER=${CLICKHOUSE_CI_LOGS_USER:-ci}
|
||||
CLICKHOUSE_CI_LOGS_CLUSTER=${CLICKHOUSE_CI_LOGS_CLUSTER:-system_logs_export}
|
||||
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name String, instance_type String, instance_id String, "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS Int32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type, '' AS instance_id"}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type, '' AS instance_id"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
||||
|
||||
function __set_connection_args
|
||||
|
@ -444,7 +444,7 @@ DB::Exception: Decimal result's scale is less than argument's one: While process
|
||||
|
||||
## byteSwap
|
||||
|
||||
Reverses the bytes of an integer, i.e. changes its [endianness](https://en.wikipedia.org/wiki/Endianness). Currently, integers of up to 64 bit are supported.
|
||||
Reverses the bytes of an integer, i.e. changes its [endianness](https://en.wikipedia.org/wiki/Endianness).
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -1474,7 +1474,7 @@ try
|
||||
|
||||
{
|
||||
std::lock_guard lock(servers_lock);
|
||||
/// We should start interserver communications before (and more imporant shutdown after) tables.
|
||||
/// We should start interserver communications before (and more important shutdown after) tables.
|
||||
/// Because server can wait for a long-running queries (for example in tcp_handler) after interserver handler was already shut down.
|
||||
/// In this case we will have replicated tables which are unable to send any parts to other replicas, but still can
|
||||
/// communicate with zookeeper, execute merges, etc.
|
||||
|
@ -223,7 +223,7 @@ namespace DB
|
||||
|
||||
void CaresPTRResolver::process_possible_timeout(ares_channel channel)
|
||||
{
|
||||
/* Call ares_process() unconditonally here, even if we simply timed out
|
||||
/* Call ares_process() unconditionally here, even if we simply timed out
|
||||
above, as otherwise the ares name resolve won't timeout! */
|
||||
ares_process_fd(channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ finish:
|
||||
/// this two vals are useless, xxx|xxx cannot be trivial nor prefix.
|
||||
bool next_is_trivial = true;
|
||||
pos = analyzeImpl(regexp, pos, required_substring, next_is_trivial, next_alternatives);
|
||||
/// For xxx|xxx|xxx, we only conbine the alternatives and return a empty required_substring.
|
||||
/// For xxx|xxx|xxx, we only combine the alternatives and return a empty required_substring.
|
||||
if (next_alternatives.empty() || shortest_literal_length(next_alternatives) < required_substring.literal.size())
|
||||
{
|
||||
global_alternatives.push_back(required_substring);
|
||||
|
@ -321,7 +321,7 @@ protected:
|
||||
percolate(ptr);
|
||||
}
|
||||
|
||||
// This is equivallent to one step of bubble sort
|
||||
// This is equivalent to one step of bubble sort
|
||||
void percolate(Counter * counter)
|
||||
{
|
||||
while (counter->slot > 0)
|
||||
|
@ -284,7 +284,7 @@ void deserializeLogMagic(ReadBuffer & in)
|
||||
/// strange, that this 550 bytes obviously was a part of Create transaction,
|
||||
/// but the operation code was -1. We have added debug prints to original
|
||||
/// zookeeper (3.6.3) and found that it just reads 550 bytes of this "Error"
|
||||
/// transaction, tooks the first 4 bytes as an error code (it was 79, non
|
||||
/// transaction, took the first 4 bytes as an error code (it was 79, non
|
||||
/// existing code) and skip all remaining 546 bytes. NOTE: it looks like a bug
|
||||
/// in ZooKeeper.
|
||||
///
|
||||
|
@ -339,7 +339,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che
|
||||
for (size_t i = 1; i < subtypes.size(); ++i)
|
||||
if (first_dim != getNumberOfDimensions(*subtypes[i]))
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH,
|
||||
"Uncompatible types of subcolumn '{}': {} and {}",
|
||||
"Incompatible types of subcolumn '{}': {} and {}",
|
||||
key.getPath(), subtypes[0]->getName(), subtypes[i]->getName());
|
||||
|
||||
tuple_paths.emplace_back(key);
|
||||
|
@ -33,7 +33,7 @@ private:
|
||||
|
||||
/// Number of references (hardlinks) to this metadata file.
|
||||
///
|
||||
/// FIXME: Why we are tracking it explicetly, without
|
||||
/// FIXME: Why we are tracking it explicitly, without
|
||||
/// info from filesystem????
|
||||
uint32_t ref_count = 0;
|
||||
|
||||
|
@ -17,6 +17,15 @@ T byteSwap(T x)
|
||||
return std::byteswap(x);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires std::is_same_v<T, UInt128> || std::is_same_v<T, Int128> || std::is_same_v<T, UInt256> || std::is_same_v<T, Int256>
|
||||
T byteSwap(T x)
|
||||
{
|
||||
T dest;
|
||||
reverseMemcpy(&dest, &x, sizeof(T));
|
||||
return dest;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T byteSwap(T)
|
||||
{
|
||||
@ -55,7 +64,7 @@ REGISTER_FUNCTION(ByteSwap)
|
||||
factory.registerFunction<FunctionByteSwap>(
|
||||
FunctionDocumentation{
|
||||
.description = R"(
|
||||
Reverses the bytes of an integer, i.e. changes its [endianness](https://en.wikipedia.org/wiki/Endianness). Currently, integers of up to 64 bit are supported.
|
||||
Reverses the bytes of an integer, i.e. changes its [endianness](https://en.wikipedia.org/wiki/Endianness).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -182,7 +182,7 @@ INSTANTIATE_TEST_SUITE_P(Basic,
|
||||
DateLUT::instance("Europe/Minsk")
|
||||
},
|
||||
{
|
||||
"When scale is 0, subsecond part (and separtor) is missing from string",
|
||||
"When scale is 0, subsecond part (and separator) is missing from string",
|
||||
"2019-09-16 19:20:17",
|
||||
1568650817ULL,
|
||||
0,
|
||||
@ -197,4 +197,3 @@ INSTANTIATE_TEST_SUITE_P(Basic,
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
|
@ -166,7 +166,7 @@ namespace
|
||||
access_to_revoke.grant(elements_to_revoke);
|
||||
access_to_revoke.makeIntersection(all_granted_access);
|
||||
|
||||
/// Build more accurate list of elements to revoke, now we use an intesection of the initial list of elements to revoke
|
||||
/// Build more accurate list of elements to revoke, now we use an intersection of the initial list of elements to revoke
|
||||
/// and all the granted access rights to these grantees.
|
||||
bool grant_option = !elements_to_revoke.empty() && elements_to_revoke[0].grant_option;
|
||||
elements_to_revoke.clear();
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||
#include <Common/checkStackSize.h>
|
||||
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Core/ColumnWithTypeAndName.h>
|
||||
@ -691,6 +692,8 @@ bool ActionsMatcher::needChildVisit(const ASTPtr & node, const ASTPtr & child)
|
||||
|
||||
void ActionsMatcher::visit(const ASTPtr & ast, Data & data)
|
||||
{
|
||||
checkStackSize();
|
||||
|
||||
if (const auto * identifier = ast->as<ASTIdentifier>())
|
||||
visit(*identifier, ast, data);
|
||||
else if (const auto * table = ast->as<ASTTableIdentifier>())
|
||||
|
@ -11,7 +11,7 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
/// @note We place strings in ascending order here under the assumption it colud speed up String to Enum conversion.
|
||||
/// @note We place strings in ascending order here under the assumption it could speed up String to Enum conversion.
|
||||
String makeStringsEnum(const std::set<String> & values)
|
||||
{
|
||||
String enum_string = "Enum8(";
|
||||
|
@ -140,7 +140,7 @@ void QueryAliasesMatcher<T>::visitOther(const ASTPtr & ast, Data & data)
|
||||
information for our ast node with query string. And this alias will be dropped because prefer_alias_to_column_name for ASTWIthAlias
|
||||
by default is false.
|
||||
|
||||
It is imporant that subquery can be converted to literal during ExecuteScalarSubqueriesVisitor.
|
||||
It is important that subquery can be converted to literal during ExecuteScalarSubqueriesVisitor.
|
||||
And code below check if we previously set for subquery alias as _subquery, and if it is true
|
||||
then set prefer_alias_to_column_name = true for node that was optimized during ExecuteScalarSubqueriesVisitor.
|
||||
*/
|
||||
|
@ -2435,6 +2435,7 @@ const std::vector<std::pair<std::string_view, Operator>> ParserExpressionImpl::o
|
||||
{"||", Operator("concat", 10, 2, OperatorType::Mergeable)},
|
||||
{"+", Operator("plus", 11, 2)},
|
||||
{"-", Operator("minus", 11, 2)},
|
||||
{"−", Operator("minus", 11, 2)},
|
||||
{"*", Operator("multiply", 12, 2)},
|
||||
{"/", Operator("divide", 12, 2)},
|
||||
{"%", Operator("modulo", 12, 2)},
|
||||
@ -2448,7 +2449,8 @@ const std::vector<std::pair<std::string_view, Operator>> ParserExpressionImpl::o
|
||||
const std::vector<std::pair<std::string_view, Operator>> ParserExpressionImpl::unary_operators_table
|
||||
{
|
||||
{"NOT", Operator("not", 5, 1)},
|
||||
{"-", Operator("negate", 13, 1)}
|
||||
{"-", Operator("negate", 13, 1)},
|
||||
{"−", Operator("negate", 13, 1)}
|
||||
};
|
||||
|
||||
const Operator ParserExpressionImpl::finish_between_operator("", 8, 0, OperatorType::FinishBetween);
|
||||
|
@ -110,7 +110,7 @@ String IParserKQLFunction::generateUniqueIdentifier()
|
||||
// This particular random generator hits each number exactly once before looping over.
|
||||
// Because of this, it's sufficient for queries consisting of up to 2^16 (= 65536) distinct function calls.
|
||||
// Reference: https://www.pcg-random.org/using-pcg-cpp.html#insecure-generators
|
||||
static pcg32_once_insecure random_generator;
|
||||
static thread_local pcg32_once_insecure random_generator;
|
||||
return std::to_string(random_generator());
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,17 @@ Token Lexer::nextTokenImpl()
|
||||
return Token(TokenType::VerticalDelimiter, token_begin, ++pos);
|
||||
return Token(TokenType::Error, token_begin, pos);
|
||||
}
|
||||
|
||||
case '\xE2':
|
||||
{
|
||||
/// Mathematical minus symbol, UTF-8
|
||||
if (pos + 3 <= end && pos[1] == '\x88' && pos[2] == '\x92')
|
||||
{
|
||||
pos += 3;
|
||||
return Token(TokenType::Minus, token_begin, pos);
|
||||
}
|
||||
/// Other characters starting at E2 can be parsed, see skipWhitespacesUTF8
|
||||
[[fallthrough]];
|
||||
}
|
||||
default:
|
||||
if (*pos == '$')
|
||||
{
|
||||
|
@ -263,6 +263,10 @@ ASTPtr tryParseQuery(
|
||||
const auto last_token = token_iterator.max();
|
||||
_out_query_end = last_token.end;
|
||||
|
||||
/// Also check on the AST level, because the generated AST depth can be greater than the recursion depth of the parser.
|
||||
if (res && max_parser_depth)
|
||||
res->checkDepth(max_parser_depth);
|
||||
|
||||
ASTInsertQuery * insert = nullptr;
|
||||
if (parse_res)
|
||||
{
|
||||
|
@ -226,7 +226,7 @@ struct StatisticsStringRef
|
||||
/// or [element of ColumnString] -> std::string_view.
|
||||
/// We do this conversion in small batches rather than all at once, just before encoding the batch,
|
||||
/// in hopes of getting better performance through cache locality.
|
||||
/// The Coverter* structs below are responsible for that.
|
||||
/// The Converter* structs below are responsible for that.
|
||||
/// When conversion is not needed, getBatch() will just return pointer into original data.
|
||||
|
||||
template <typename Col, typename To, typename MinMaxType = typename std::conditional_t<
|
||||
|
@ -1174,7 +1174,7 @@ void MergeTreeRangeReader::fillPartOffsetColumn(ReadResult & result, UInt64 lead
|
||||
UInt64 * pos = vec.data();
|
||||
UInt64 * end = &vec[num_rows];
|
||||
|
||||
/// Fill the reamining part of the previous range (it was started in the previous read request).
|
||||
/// Fill the remaining part of the previous range (it was started in the previous read request).
|
||||
while (pos < end && leading_begin_part_offset < leading_end_part_offset)
|
||||
*pos++ = leading_begin_part_offset++;
|
||||
|
||||
|
@ -152,7 +152,7 @@ void MergeTreeReaderCompact::fillColumnPositions()
|
||||
auto [name_in_storage, subcolumn_name] = Nested::splitName(column_to_read.name);
|
||||
|
||||
/// If it is a part of Nested, we need to get the column from
|
||||
/// storage metatadata which is converted to Nested type with subcolumns.
|
||||
/// storage metadata which is converted to Nested type with subcolumns.
|
||||
/// It is required for proper counting of shared streams.
|
||||
if (!subcolumn_name.empty())
|
||||
{
|
||||
|
@ -749,7 +749,7 @@ QueueRepresentation getQueueRepresentation(const std::list<ReplicatedMergeTreeLo
|
||||
const auto & key = entry->znode_name;
|
||||
switch (entry->type)
|
||||
{
|
||||
/// explicetely specify all types of entries without default, so if
|
||||
/// explicitly specify all types of entries without default, so if
|
||||
/// someone decide to add new type it will produce a compiler warning (error in our case)
|
||||
case LogEntryType::GET_PART:
|
||||
case LogEntryType::ATTACH_PART:
|
||||
|
@ -240,7 +240,7 @@ private:
|
||||
/// by first argument. If remove_part == true, than also remove part itself.
|
||||
/// Both negative flags will throw exception.
|
||||
///
|
||||
/// Part removed from mutations which satisfy contitions:
|
||||
/// Part removed from mutations which satisfy conditions:
|
||||
/// block_number > part.getDataVersion()
|
||||
/// or block_number == part.getDataVersion()
|
||||
/// ^ (this may happen if we downloaded mutated part from other replica)
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Core/TypeId.h>
|
||||
@ -81,14 +82,33 @@ bool extractFunctions(const ASTPtr & expression, const std::function<bool(const
|
||||
}
|
||||
else if (function->name == "or")
|
||||
{
|
||||
bool ret = true;
|
||||
bool ret = false;
|
||||
ASTs or_args;
|
||||
for (const auto & child : function->arguments->children)
|
||||
ret &= extractFunctions(child, is_constant, or_args);
|
||||
/// We can keep condition only if it still OR condition (i.e. we
|
||||
/// have dependent conditions for columns at both sides)
|
||||
if (or_args.size() == 2)
|
||||
ret |= extractFunctions(child, is_constant, or_args);
|
||||
|
||||
if (!or_args.empty())
|
||||
{
|
||||
/// In case of there are less number of arguments for which
|
||||
/// is_constant() == true, we need to add always-true
|
||||
/// implicitly to avoid breaking AND invariant.
|
||||
///
|
||||
/// Consider the following:
|
||||
///
|
||||
/// ((value = 10) OR (_table = 'v2')) AND ((_table = 'v1') OR (value = 20))
|
||||
///
|
||||
/// Without implicit always-true:
|
||||
///
|
||||
/// (_table = 'v2') AND (_table = 'v1')
|
||||
///
|
||||
/// With:
|
||||
///
|
||||
/// (_table = 'v2' OR 1) AND (_table = 'v1' OR 1) -> (_table = 'v2') OR (_table = 'v1')
|
||||
///
|
||||
if (or_args.size() != function->arguments->children.size())
|
||||
or_args.push_back(std::make_shared<ASTLiteral>(Field(1)));
|
||||
result.push_back(makeASTForLogicalOr(std::move(or_args)));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -165,8 +185,10 @@ bool prepareFilterBlockWithQuery(const ASTPtr & query, ContextPtr context, Block
|
||||
if (!select.where() && !select.prewhere())
|
||||
return unmodified;
|
||||
|
||||
// Provide input columns as constant columns to check if an expression is constant.
|
||||
std::function<bool(const ASTPtr &)> is_constant = [&block, &context](const ASTPtr & node)
|
||||
// Provide input columns as constant columns to check if an expression is
|
||||
// constant and depends on the columns from provided block (the last is
|
||||
// required to allow skipping some conditions for handling OR).
|
||||
std::function<bool(const ASTPtr &)> is_constant = [&block, &context](const ASTPtr & expr)
|
||||
{
|
||||
auto actions = std::make_shared<ActionsDAG>(block.getColumnsWithTypeAndName());
|
||||
PreparedSetsPtr prepared_sets = std::make_shared<PreparedSets>();
|
||||
@ -178,13 +200,26 @@ bool prepareFilterBlockWithQuery(const ASTPtr & query, ContextPtr context, Block
|
||||
context, SizeLimits{}, 1, source_columns, std::move(actions), prepared_sets, true, true, true,
|
||||
{ aggregation_keys, grouping_set_keys, GroupByKind::NONE });
|
||||
|
||||
ActionsVisitor(visitor_data).visit(node);
|
||||
ActionsVisitor(visitor_data).visit(expr);
|
||||
actions = visitor_data.getActions();
|
||||
auto expr_column_name = expr->getColumnName();
|
||||
|
||||
const auto * expr_const_node = actions->tryFindInOutputs(expr_column_name);
|
||||
if (!expr_const_node)
|
||||
return false;
|
||||
auto filter_actions = ActionsDAG::buildFilterActionsDAG({expr_const_node}, {}, context);
|
||||
const auto & nodes = filter_actions->getNodes();
|
||||
bool has_dependent_columns = std::any_of(nodes.begin(), nodes.end(), [&](const auto & node)
|
||||
{
|
||||
return block.has(node.result_name);
|
||||
});
|
||||
if (!has_dependent_columns)
|
||||
return false;
|
||||
|
||||
auto expression_actions = std::make_shared<ExpressionActions>(actions);
|
||||
auto block_with_constants = block;
|
||||
expression_actions->execute(block_with_constants);
|
||||
auto column_name = node->getColumnName();
|
||||
return block_with_constants.has(column_name) && isColumnConst(*block_with_constants.getByName(column_name).column);
|
||||
return block_with_constants.has(expr_column_name) && isColumnConst(*block_with_constants.getByName(expr_column_name).column);
|
||||
};
|
||||
|
||||
/// Create an expression that evaluates the expressions in WHERE and PREWHERE, depending only on the existing columns.
|
||||
|
@ -1,94 +1,48 @@
|
||||
test_access_for_functions/test.py::test_access_rights_for_function
|
||||
test_build_sets_from_multiple_threads/test.py::test_set
|
||||
test_concurrent_backups_s3/test.py::test_concurrent_backups
|
||||
test_distributed_ddl/test.py::test_default_database[configs]
|
||||
test_distributed_ddl/test.py::test_default_database[configs_secure]
|
||||
test_distributed_ddl/test.py::test_on_server_fail[configs]
|
||||
test_distributed_ddl/test.py::test_on_server_fail[configs_secure]
|
||||
test_distributed_insert_backward_compatibility/test.py::test_distributed_in_tuple
|
||||
test_dictionaries_update_and_reload/test.py::test_reload_after_fail_in_cache_dictionary
|
||||
test_distributed_backward_compatability/test.py::test_distributed_in_tuple
|
||||
test_distributed_type_object/test.py::test_distributed_type_object
|
||||
test_drop_is_lock_free/test.py::test_query_is_lock_free[detach table]
|
||||
test_executable_table_function/test.py::test_executable_function_input_python
|
||||
test_mask_sensitive_info/test.py::test_encryption_functions
|
||||
test_merge_table_over_distributed/test.py::test_global_in
|
||||
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
|
||||
test_merge_tree_s3/test.py::test_heavy_insert_select_check_memory[node]
|
||||
test_mutations_with_merge_tree/test.py::test_mutations_with_merge_background_task
|
||||
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
|
||||
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
|
||||
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
|
||||
test_row_policy/test.py::test_dcl_introspection
|
||||
test_row_policy/test.py::test_dcl_introspection
|
||||
test_row_policy/test.py::test_dcl_management
|
||||
test_row_policy/test.py::test_dcl_management
|
||||
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
|
||||
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
|
||||
test_row_policy/test.py::test_grant_create_row_policy
|
||||
test_row_policy/test.py::test_grant_create_row_policy
|
||||
test_row_policy/test.py::test_introspection
|
||||
test_row_policy/test.py::test_introspection
|
||||
test_row_policy/test.py::test_join
|
||||
test_row_policy/test.py::test_join
|
||||
test_row_policy/test.py::test_miscellaneous_engines
|
||||
test_row_policy/test.py::test_miscellaneous_engines
|
||||
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
|
||||
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
|
||||
test_row_policy/test.py::test_policy_on_distributed_table_via_role
|
||||
test_row_policy/test.py::test_policy_on_distributed_table_via_role
|
||||
test_row_policy/test.py::test_reload_users_xml_by_timer
|
||||
test_row_policy/test.py::test_reload_users_xml_by_timer
|
||||
test_row_policy/test.py::test_row_policy_filter_with_subquery
|
||||
test_row_policy/test.py::test_row_policy_filter_with_subquery
|
||||
test_row_policy/test.py::test_smoke
|
||||
test_row_policy/test.py::test_smoke
|
||||
test_row_policy/test.py::test_some_users_without_policies
|
||||
test_row_policy/test.py::test_some_users_without_policies
|
||||
test_row_policy/test.py::test_tags_with_db_and_table_names
|
||||
test_row_policy/test.py::test_tags_with_db_and_table_names
|
||||
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_users_xml_is_readonly
|
||||
test_row_policy/test.py::test_users_xml_is_readonly
|
||||
test_row_policy/test.py::test_with_prewhere
|
||||
test_row_policy/test.py::test_with_prewhere
|
||||
test_settings_constraints_distributed/test.py::test_select_clamps_settings
|
||||
test_compression_codec_read/test.py::test_default_codec_read
|
||||
test_dictionaries_update_and_reload/test.py::test_reload_after_fail_in_cache_dictionary
|
||||
test_distributed_type_object/test.py::test_distributed_type_object
|
||||
test_materialized_mysql_database/test.py::test_select_without_columns_5_7
|
||||
test_materialized_mysql_database/test.py::test_select_without_columns_8_0
|
||||
test_shard_level_const_function/test.py::test_remote
|
||||
test_storage_postgresql/test.py::test_postgres_select_insert
|
||||
test_storage_rabbitmq/test.py::test_rabbitmq_materialized_view
|
||||
test_system_merges/test.py::test_mutation_simple[]
|
||||
test_system_merges/test.py::test_mutation_simple[replicated]
|
||||
test_disk_over_web_server/test.py::test_cache[node2]
|
||||
test_disk_over_web_server/test.py::test_incorrect_usage
|
||||
test_disk_over_web_server/test.py::test_replicated_database
|
||||
test_disk_over_web_server/test.py::test_unavailable_server
|
||||
test_disk_over_web_server/test.py::test_usage[node2]
|
||||
test_distributed_backward_compatability/test.py::test_distributed_in_tuple
|
||||
test_executable_table_function/test.py::test_executable_function_input_python
|
||||
test_settings_profile/test.py::test_show_profiles
|
||||
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
|
||||
test_postgresql_protocol/test.py::test_python_client
|
||||
test_mysql_database_engine/test.py::test_mysql_ddl_for_mysql_database
|
||||
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
|
||||
test_profile_events_s3/test.py::test_profile_events
|
||||
test_user_defined_object_persistence/test.py::test_persistence
|
||||
test_settings_profile/test.py::test_show_profiles
|
||||
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
|
||||
test_replicating_constants/test.py::test_different_versions
|
||||
test_row_policy/test.py::test_change_of_users_xml_changes_row_policies
|
||||
test_row_policy/test.py::test_dcl_introspection
|
||||
test_row_policy/test.py::test_dcl_management
|
||||
test_row_policy/test.py::test_dcl_users_with_policies_from_users_xml
|
||||
test_row_policy/test.py::test_grant_create_row_policy
|
||||
test_row_policy/test.py::test_policy_from_users_xml_affects_only_user_assigned
|
||||
test_row_policy/test.py::test_policy_on_distributed_table_via_role
|
||||
test_row_policy/test.py::test_reload_users_xml_by_timer
|
||||
test_row_policy/test.py::test_row_policy_filter_with_subquery
|
||||
test_row_policy/test.py::test_smoke
|
||||
test_row_policy/test.py::test_some_users_without_policies
|
||||
test_row_policy/test.py::test_tags_with_db_and_table_names
|
||||
test_row_policy/test.py::test_throwif_error_in_prewhere_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_error_in_where_with_same_condition_as_filter
|
||||
test_row_policy/test.py::test_throwif_in_prewhere_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_throwif_in_where_doesnt_expose_restricted_data
|
||||
test_row_policy/test.py::test_users_xml_is_readonly
|
||||
test_row_policy/test.py::test_with_prewhere
|
||||
test_select_access_rights/test_main.py::test_alias_columns
|
||||
test_select_access_rights/test_main.py::test_select_count
|
||||
test_select_access_rights/test_main.py::test_select_join
|
||||
test_postgresql_protocol/test.py::test_python_client
|
||||
test_replicating_constants/test.py::test_different_versions
|
||||
test_merge_tree_s3/test.py::test_heavy_insert_select_check_memory[node]
|
||||
test_settings_profile/test.py::test_show_profiles
|
||||
test_shard_level_const_function/test.py::test_remote
|
||||
test_sql_user_defined_functions_on_cluster/test.py::test_sql_user_defined_functions_on_cluster
|
||||
test_storage_rabbitmq/test.py::test_rabbitmq_materialized_view
|
||||
test_system_merges/test.py::test_mutation_simple[]
|
||||
test_system_merges/test.py::test_mutation_simple[replicated]
|
||||
test_user_defined_object_persistence/test.py::test_persistence
|
||||
test_wrong_db_or_table_name/test.py::test_wrong_table_name
|
||||
test_drop_is_lock_free/test.py::test_query_is_lock_free[detach table]
|
||||
test_odbc_interaction/test.py::test_postgres_insert
|
||||
test_zookeeper_config/test.py::test_chroot_with_different_root
|
||||
test_zookeeper_config/test.py::test_chroot_with_same_root
|
||||
test_merge_tree_azure_blob_storage/test.py::test_table_manipulations
|
||||
test_parallel_replicas_skip_shards/test.py::test_skip_unavailable_shards
|
||||
test_build_sets_from_multiple_threads/test.py::test_set
|
||||
test_zookeeper_config/test.py::test_chroot_with_different_root
|
||||
|
@ -1,68 +1,46 @@
|
||||
00223_shard_distributed_aggregation_memory_efficient
|
||||
00562_in_subquery_merge_tree
|
||||
00593_union_all_assert_columns_removed
|
||||
00673_subquery_prepared_set_performance
|
||||
00717_merge_and_distributed
|
||||
00725_memory_tracking
|
||||
00754_distributed_optimize_skip_select_on_unused_shards
|
||||
00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere
|
||||
00927_asof_joins
|
||||
00940_order_by_read_in_order_query_plan
|
||||
00945_bloom_filter_index
|
||||
00981_in_subquery_with_tuple
|
||||
01049_join_low_card_bug_long
|
||||
01062_pm_all_join_with_block_continuation
|
||||
01064_incremental_streaming_from_2_src_with_feedback
|
||||
01071_force_optimize_skip_unused_shards
|
||||
01072_optimize_skip_unused_shards_const_expr_eval
|
||||
01083_expressions_in_engine_arguments
|
||||
01086_odbc_roundtrip
|
||||
01155_rename_move_materialized_view
|
||||
01173_transaction_control_queries
|
||||
01211_optimize_skip_unused_shards_type_mismatch
|
||||
01213_optimize_skip_unused_shards_DISTINCT
|
||||
01214_test_storage_merge_aliases_with_where
|
||||
01231_distributed_aggregation_memory_efficient_mix_levels
|
||||
01244_optimize_distributed_group_by_sharding_key
|
||||
01247_optimize_distributed_group_by_sharding_key_dist_on_dist
|
||||
01268_mv_scalars
|
||||
01268_shard_avgweighted
|
||||
01270_optimize_skip_unused_shards_low_cardinality
|
||||
01319_optimize_skip_unused_shards_nesting
|
||||
01428_nullable_asof_join
|
||||
01455_shard_leaf_max_rows_bytes_to_read
|
||||
01495_subqueries_in_with_statement
|
||||
01504_rocksdb
|
||||
01526_client_start_and_exit
|
||||
01527_dist_sharding_key_dictGet_reload
|
||||
01528_allow_nondeterministic_optimize_skip_unused_shards
|
||||
01540_verbatim_partition_pruning
|
||||
01560_merge_distributed_join
|
||||
01563_distributed_query_finish
|
||||
01576_alias_column_rewrite
|
||||
01583_const_column_in_set_index
|
||||
01584_distributed_buffer_cannot_find_column
|
||||
01585_use_index_for_global_in
|
||||
01585_use_index_for_global_in_with_null
|
||||
01586_columns_pruning
|
||||
01624_soft_constraints
|
||||
01651_bugs_from_15889
|
||||
01656_test_query_log_factories_info
|
||||
01676_clickhouse_client_autocomplete
|
||||
01681_bloom_filter_nullable_column
|
||||
01700_system_zookeeper_path_in
|
||||
01710_projection_additional_filters
|
||||
01739_index_hint
|
||||
02880_indexHint__partition_id
|
||||
01747_join_view_filter_dictionary
|
||||
01748_partition_id_pruning
|
||||
01756_optimize_skip_unused_shards_rewrite_in
|
||||
01757_optimize_skip_unused_shards_limit
|
||||
01758_optimize_skip_unused_shards_once
|
||||
01759_optimize_skip_unused_shards_zero_shards
|
||||
01761_cast_to_enum_nullable
|
||||
01786_explain_merge_tree
|
||||
01889_key_condition_function_chains
|
||||
01890_materialized_distributed_join
|
||||
01901_in_literal_shard_prune
|
||||
01925_join_materialized_columns
|
||||
@ -70,8 +48,6 @@
|
||||
01930_optimize_skip_unused_shards_rewrite_in
|
||||
01947_mv_subquery
|
||||
01952_optimize_distributed_group_by_sharding_key
|
||||
02000_join_on_const
|
||||
02001_shard_num_shard_count
|
||||
02131_used_row_policies_in_query_log
|
||||
02139_MV_with_scalar_subquery
|
||||
02174_cte_scalar_cache_mv
|
||||
@ -93,35 +69,18 @@
|
||||
02554_fix_grouping_sets_predicate_push_down
|
||||
02575_merge_prewhere_different_default_kind
|
||||
02713_array_low_cardinality_string
|
||||
02707_skip_index_with_in
|
||||
02241_join_rocksdb_bs
|
||||
02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET
|
||||
01115_join_with_dictionary
|
||||
01009_global_array_join_names
|
||||
00917_multiple_joins_denny_crane
|
||||
00725_join_on_bug_1
|
||||
00636_partition_key_parts_pruning
|
||||
00261_storage_aliases_and_array_join
|
||||
01825_type_json_multiple_files
|
||||
01281_group_by_limit_memory_tracking
|
||||
02723_zookeeper_name
|
||||
00002_log_and_exception_messages_formatting
|
||||
01646_rewrite_sum_if_bug
|
||||
02725_agg_projection_resprect_PK
|
||||
01019_alter_materialized_view_consistent
|
||||
01600_parts_states_metrics_long
|
||||
01600_parts_types_metrics_long
|
||||
01287_max_execution_speed
|
||||
02703_row_policy_for_database
|
||||
02721_url_cluster
|
||||
02534_s3_cluster_insert_select_schema_inference
|
||||
02765_parallel_replicas_final_modifier
|
||||
02784_parallel_replicas_automatic_disabling
|
||||
02581_share_big_sets_between_mutation_tasks_long
|
||||
02581_share_big_sets_between_multiple_mutations_tasks_long
|
||||
00992_system_parts_race_condition_zookeeper_long
|
||||
02818_parameterized_view_with_cte_multiple_usage
|
||||
02790_optimize_skip_unused_shards_join
|
||||
01940_custom_tld_sharding_key
|
||||
02815_range_dict_no_direct_join
|
||||
02861_join_on_nullsafe_compare
|
||||
|
@ -296,7 +296,7 @@ class CiLogsCredentials:
|
||||
logging.info("Do not use external logs pushing")
|
||||
return ""
|
||||
extra_columns = (
|
||||
f"CAST({pr_info.number} AS Int32) AS pull_request_number, '{pr_info.sha}' AS commit_sha, "
|
||||
f"CAST({pr_info.number} AS UInt32) AS pull_request_number, '{pr_info.sha}' AS commit_sha, "
|
||||
f"toDateTime('{check_start_time}', 'UTC') AS check_start_time, '{check_name}' AS check_name, "
|
||||
f"'{get_instance_type()}' AS instance_type, '{get_instance_id()}' AS instance_id"
|
||||
)
|
||||
|
@ -719,6 +719,36 @@ def test_too_many_parts(started_cluster):
|
||||
pg_manager2.drop_materialized_db()
|
||||
|
||||
|
||||
def test_toast(started_cluster):
|
||||
table = "test_toast"
|
||||
pg_manager.execute(
|
||||
f"CREATE TABLE {table} (id integer PRIMARY KEY, txt text, other text)"
|
||||
)
|
||||
pg_manager.create_materialized_db(
|
||||
ip=started_cluster.postgres_ip,
|
||||
port=started_cluster.postgres_port,
|
||||
settings=[
|
||||
f"materialized_postgresql_tables_list = '{table}'",
|
||||
"materialized_postgresql_backoff_min_ms = 100",
|
||||
"materialized_postgresql_backoff_max_ms = 100",
|
||||
],
|
||||
)
|
||||
|
||||
pg_manager.execute(
|
||||
f"""\
|
||||
INSERT INTO {table} (id, txt)\
|
||||
VALUES (1, (SELECT array_to_string(ARRAY(SELECT chr((100 + round(random() * 25)) :: integer) FROM generate_series(1,30000) as t(i)), '')))
|
||||
"""
|
||||
)
|
||||
|
||||
check_tables_are_synchronized(
|
||||
instance,
|
||||
table,
|
||||
postgres_database=pg_manager.get_default_database(),
|
||||
order_by="id",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cluster.start()
|
||||
input("Cluster created, press any key to destroy...")
|
||||
|
0
tests/integration/test_store_cleanup/__init__.py
Normal file
0
tests/integration/test_store_cleanup/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
<clickhouse>
|
||||
<database_catalog_unused_dir_hide_timeout_sec>0</database_catalog_unused_dir_hide_timeout_sec>
|
||||
<database_catalog_unused_dir_rm_timeout_sec>60</database_catalog_unused_dir_rm_timeout_sec>
|
||||
<database_catalog_unused_dir_cleanup_period_sec>1</database_catalog_unused_dir_cleanup_period_sec>
|
||||
|
||||
<!-- We don't really need [Zoo]Keeper for this test.
|
||||
And it makes sense to have at least one test with TestKeeper. -->
|
||||
<zookeeper>
|
||||
<implementation>testkeeper</implementation>
|
||||
</zookeeper>
|
||||
</clickhouse>
|
212
tests/integration/test_store_cleanup/test.py
Normal file
212
tests/integration/test_store_cleanup/test.py
Normal file
@ -0,0 +1,212 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance(
|
||||
"node1", stay_alive=True, main_configs=["configs/store_cleanup.xml"]
|
||||
)
|
||||
|
||||
path_to_data = "/var/lib/clickhouse/"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_store_cleanup(started_cluster):
|
||||
node1.query("CREATE DATABASE db UUID '10000000-1000-4000-8000-000000000001'")
|
||||
node1.query(
|
||||
"CREATE TABLE db.log UUID '10000000-1000-4000-8000-000000000002' ENGINE=Log AS SELECT 1"
|
||||
)
|
||||
node1.query(
|
||||
"CREATE TABLE db.mt UUID '10000000-1000-4000-8000-000000000003' ENGINE=MergeTree ORDER BY tuple() AS SELECT 1"
|
||||
)
|
||||
node1.query(
|
||||
"CREATE TABLE db.mem UUID '10000000-1000-4000-8000-000000000004' ENGINE=Memory AS SELECT 1"
|
||||
)
|
||||
|
||||
node1.query("CREATE DATABASE db2 UUID '20000000-1000-4000-8000-000000000001'")
|
||||
node1.query(
|
||||
"CREATE TABLE db2.log UUID '20000000-1000-4000-8000-000000000002' ENGINE=Log AS SELECT 1"
|
||||
)
|
||||
node1.query("DETACH DATABASE db2")
|
||||
|
||||
node1.query("CREATE DATABASE db3 UUID '30000000-1000-4000-8000-000000000001'")
|
||||
node1.query(
|
||||
"CREATE TABLE db3.log UUID '30000000-1000-4000-8000-000000000002' ENGINE=Log AS SELECT 1"
|
||||
)
|
||||
node1.query(
|
||||
"CREATE TABLE db3.log2 UUID '30000000-1000-4000-8000-000000000003' ENGINE=Log AS SELECT 1"
|
||||
)
|
||||
node1.query("DETACH TABLE db3.log")
|
||||
node1.query("DETACH TABLE db3.log2 PERMANENTLY")
|
||||
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store"]
|
||||
)
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/100"]
|
||||
)
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/200"]
|
||||
)
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/300"]
|
||||
)
|
||||
|
||||
node1.stop_clickhouse(kill=True)
|
||||
# All dirs related to `db` will be removed
|
||||
node1.exec_in_container(["rm", f"{path_to_data}/metadata/db.sql"])
|
||||
|
||||
node1.exec_in_container(["mkdir", f"{path_to_data}/store/kek"])
|
||||
node1.exec_in_container(["touch", f"{path_to_data}/store/12"])
|
||||
try:
|
||||
node1.exec_in_container(["mkdir", f"{path_to_data}/store/456"])
|
||||
except Exception as e:
|
||||
print("Failed to create 456/:", str(e))
|
||||
node1.exec_in_container(["mkdir", f"{path_to_data}/store/456/testgarbage"])
|
||||
node1.exec_in_container(
|
||||
["mkdir", f"{path_to_data}/store/456/30000000-1000-4000-8000-000000000003"]
|
||||
)
|
||||
node1.exec_in_container(
|
||||
["touch", f"{path_to_data}/store/456/45600000-1000-4000-8000-000000000003"]
|
||||
)
|
||||
node1.exec_in_container(
|
||||
["mkdir", f"{path_to_data}/store/456/45600000-1000-4000-8000-000000000004"]
|
||||
)
|
||||
|
||||
node1.start_clickhouse()
|
||||
node1.query("DETACH DATABASE db2")
|
||||
node1.query("DETACH TABLE db3.log")
|
||||
|
||||
node1.wait_for_log_line(
|
||||
"Removing access rights for unused directory",
|
||||
timeout=60,
|
||||
look_behind_lines=1000000,
|
||||
)
|
||||
node1.wait_for_log_line(
|
||||
"directories from store", timeout=60, look_behind_lines=1000000
|
||||
)
|
||||
|
||||
store = node1.exec_in_container(["ls", f"{path_to_data}/store"])
|
||||
assert "100" in store
|
||||
assert "200" in store
|
||||
assert "300" in store
|
||||
assert "456" in store
|
||||
assert "kek" in store
|
||||
assert "12" in store
|
||||
assert "d---------" in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store"]
|
||||
)
|
||||
assert "d---------" in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/456"]
|
||||
)
|
||||
|
||||
# Metadata is removed, so store/100 contains garbage
|
||||
store100 = node1.exec_in_container(["ls", f"{path_to_data}/store/100"])
|
||||
assert "10000000-1000-4000-8000-000000000001" in store100
|
||||
assert "10000000-1000-4000-8000-000000000002" in store100
|
||||
assert "10000000-1000-4000-8000-000000000003" in store100
|
||||
assert "d---------" in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/100"]
|
||||
)
|
||||
|
||||
# Database is detached, nothing to clean up
|
||||
store200 = node1.exec_in_container(["ls", f"{path_to_data}/store/200"])
|
||||
assert "20000000-1000-4000-8000-000000000001" in store200
|
||||
assert "20000000-1000-4000-8000-000000000002" in store200
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/200"]
|
||||
)
|
||||
|
||||
# Tables are detached, nothing to clean up
|
||||
store300 = node1.exec_in_container(["ls", f"{path_to_data}/store/300"])
|
||||
assert "30000000-1000-4000-8000-000000000001" in store300
|
||||
assert "30000000-1000-4000-8000-000000000002" in store300
|
||||
assert "30000000-1000-4000-8000-000000000003" in store300
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/300"]
|
||||
)
|
||||
|
||||
# Manually created garbage
|
||||
store456 = node1.exec_in_container(["ls", f"{path_to_data}/store/456"])
|
||||
assert "30000000-1000-4000-8000-000000000003" in store456
|
||||
assert "45600000-1000-4000-8000-000000000003" in store456
|
||||
assert "45600000-1000-4000-8000-000000000004" in store456
|
||||
assert "testgarbage" in store456
|
||||
assert "----------" in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/456"]
|
||||
)
|
||||
|
||||
node1.wait_for_log_line(
|
||||
"Removing unused directory", timeout=90, look_behind_lines=1000000
|
||||
)
|
||||
node1.wait_for_log_line(
|
||||
"directories from store", timeout=90, look_behind_lines=1000000
|
||||
)
|
||||
node1.wait_for_log_line(
|
||||
"Nothing to clean up from store/", timeout=90, look_behind_lines=1000000
|
||||
)
|
||||
|
||||
store = node1.exec_in_container(["ls", f"{path_to_data}/store"])
|
||||
assert "100" in store
|
||||
assert "200" in store
|
||||
assert "300" in store
|
||||
assert "456" in store
|
||||
assert "kek" not in store # changed
|
||||
assert "\n12\n" not in store # changed
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store"]
|
||||
) # changed
|
||||
|
||||
# Metadata is removed, so store/100 contains garbage
|
||||
store100 = node1.exec_in_container(["ls", f"{path_to_data}/store/100"]) # changed
|
||||
assert "10000000-1000-4000-8000-000000000001" not in store100 # changed
|
||||
assert "10000000-1000-4000-8000-000000000002" not in store100 # changed
|
||||
assert "10000000-1000-4000-8000-000000000003" not in store100 # changed
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/100"]
|
||||
) # changed
|
||||
|
||||
# Database is detached, nothing to clean up
|
||||
store200 = node1.exec_in_container(["ls", f"{path_to_data}/store/200"])
|
||||
assert "20000000-1000-4000-8000-000000000001" in store200
|
||||
assert "20000000-1000-4000-8000-000000000002" in store200
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/200"]
|
||||
)
|
||||
|
||||
# Tables are detached, nothing to clean up
|
||||
store300 = node1.exec_in_container(["ls", f"{path_to_data}/store/300"])
|
||||
assert "30000000-1000-4000-8000-000000000001" in store300
|
||||
assert "30000000-1000-4000-8000-000000000002" in store300
|
||||
assert "30000000-1000-4000-8000-000000000003" in store300
|
||||
assert "d---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/300"]
|
||||
)
|
||||
|
||||
# Manually created garbage
|
||||
store456 = node1.exec_in_container(["ls", f"{path_to_data}/store/456"])
|
||||
assert "30000000-1000-4000-8000-000000000003" not in store456 # changed
|
||||
assert "45600000-1000-4000-8000-000000000003" not in store456 # changed
|
||||
assert "45600000-1000-4000-8000-000000000004" not in store456 # changed
|
||||
assert "testgarbage" not in store456 # changed
|
||||
assert "---------" not in node1.exec_in_container(
|
||||
["ls", "-l", f"{path_to_data}/store/456"]
|
||||
) # changed
|
||||
|
||||
node1.query("ATTACH TABLE db3.log2")
|
||||
node1.query("ATTACH DATABASE db2")
|
||||
node1.query("ATTACH TABLE db3.log")
|
||||
|
||||
assert "1\n" == node1.query("SELECT * FROM db3.log")
|
||||
assert "1\n" == node1.query("SELECT * FROM db3.log2")
|
||||
assert "1\n" == node1.query("SELECT * FROM db2.log")
|
@ -0,0 +1,7 @@
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<function_sleep_max_microseconds_per_block>10G</function_sleep_max_microseconds_per_block>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
@ -10,6 +10,7 @@ cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/logs_config.xml"],
|
||||
user_configs=["configs/user_overrides.xml"],
|
||||
with_zookeeper=True,
|
||||
macros={"shard": 0, "replica": 1},
|
||||
)
|
||||
@ -17,6 +18,7 @@ node1 = cluster.add_instance(
|
||||
node2 = cluster.add_instance(
|
||||
"node2",
|
||||
main_configs=["configs/logs_config.xml"],
|
||||
user_configs=["configs/user_overrides.xml"],
|
||||
with_zookeeper=True,
|
||||
macros={"shard": 0, "replica": 2},
|
||||
)
|
||||
@ -183,10 +185,10 @@ def test_mutation_simple(started_cluster, replicated):
|
||||
starting_block, starting_block, starting_block + 1
|
||||
)
|
||||
|
||||
# ALTER will sleep for 3s * 3 (rows) = 9s
|
||||
# ALTER will sleep for 9s
|
||||
def alter():
|
||||
node1.query(
|
||||
f"ALTER TABLE {name} UPDATE a = 42 WHERE sleep(3) OR 1",
|
||||
f"ALTER TABLE {name} UPDATE a = 42 WHERE sleep(9) OR 1",
|
||||
settings=settings,
|
||||
)
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
-- 1.
|
||||
Code: 167
|
||||
-- 2.
|
||||
Code: 167
|
||||
Code: 167
|
||||
Code: 167
|
||||
-- 3.
|
||||
Code: 167
|
||||
|
@ -6,6 +6,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
{ printf "select "; for _ in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for _ in {1..1000}; do printf ")"; done; } > "${CLICKHOUSE_TMP}"/query
|
||||
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_CLIENT 2>&1 | grep -o -F 'Code: 167'
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_LOCAL 2>&1 | grep -o -F 'Code: 167'
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_CURL --data-binary @- -vsS "$CLICKHOUSE_URL" 2>&1 | grep -o -F 'Code: 167'
|
||||
echo '-- 1.'
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_CLIENT 2>&1 | grep -o -m1 -F 'Code: 167'
|
||||
echo '-- 2.'
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_LOCAL 2>&1 | grep -o -m1 -F 'Code: 167'
|
||||
echo '-- 3.'
|
||||
cat "${CLICKHOUSE_TMP}"/query | $CLICKHOUSE_CURL --data-binary @- -vsS "$CLICKHOUSE_URL" 2>&1 | grep -o -m1 -F 'Code: 167'
|
||||
|
@ -1,5 +1,4 @@
|
||||
-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-cpu-aarch64, disabled
|
||||
-- Tag disabled: Parsing inlines may lead to "could not find abbreviation code" (FIXME)
|
||||
-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-cpu-aarch64
|
||||
|
||||
SET allow_introspection_functions = 0;
|
||||
SELECT addressToLineWithInlines(1); -- { serverError 446 }
|
||||
|
@ -18,6 +18,11 @@ create view v2 as select * from d2;
|
||||
|
||||
create table m as v1 engine=Merge(currentDatabase(), '^(v1|v2)$');
|
||||
|
||||
{# -- FIXME:
|
||||
select _table, key from m where (value = 10 and _table = 'v1') or (value = 20 and _table = 'v1') or 0 or 0 settings {{ settings }};
|
||||
select _table, key from m where (value = 10 and _table = 'v3') or (value = 20 and _table = 'v3') or 0 or 0 settings {{ settings }};
|
||||
#}
|
||||
|
||||
-- avoid reorder
|
||||
set max_threads=1;
|
||||
-- { echoOn }
|
||||
|
2
tests/queries/0_stateless/02869_unicode_minus.reference
Normal file
2
tests/queries/0_stateless/02869_unicode_minus.reference
Normal file
@ -0,0 +1,2 @@
|
||||
-1
|
||||
-1
|
2
tests/queries/0_stateless/02869_unicode_minus.sql
Normal file
2
tests/queries/0_stateless/02869_unicode_minus.sql
Normal file
@ -0,0 +1,2 @@
|
||||
SELECT 1 − 2;
|
||||
SELECT −1;
|
@ -25,5 +25,9 @@
|
||||
-549755813889
|
||||
4039370097989451775
|
||||
128
|
||||
72057594037927936
|
||||
-2361183241434822606849
|
||||
1329227995784915872903807060280344576
|
||||
-43556142965880123323311949751266331066369
|
||||
0
|
||||
1
|
||||
|
@ -33,6 +33,12 @@ SELECT byteSwap(-2147483649::Int64);
|
||||
SELECT byteSwap(-1242525266376::Int64);
|
||||
SELECT byteSwap(-9223372036854775808::Int64);
|
||||
|
||||
SELECT byteSwap(18446744073709551616::UInt128);
|
||||
SELECT byteSwap(-9223372036854775809::Int128);
|
||||
|
||||
SELECT byteSwap(340282366920938463463374607431768211456::UInt256);
|
||||
SELECT byteSwap(-170141183460469231731687303715884105729::Int256);
|
||||
|
||||
-- Booleans are interpreted as UInt8
|
||||
SELECT byteSwap(false);
|
||||
SELECT byteSwap(true);
|
||||
@ -52,6 +58,4 @@ SELECT byteSwap(generateUUIDv4()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT byteSwap(toDecimal32(2, 4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||
SELECT byteSwap(toFloat32(123.456)); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT byteSwap(toFloat64(123.456)); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT byteSwap(18446744073709551616::UInt128); -- { serverError NOT_IMPLEMENTED }
|
||||
SELECT byteSwap(-9223372036854775809::Int128); -- { serverError NOT_IMPLEMENTED }
|
||||
|
||||
|
@ -0,0 +1 @@
|
||||
TOO_DEEP_AST
|
7
tests/queries/0_stateless/02894_ast_depth_check.sh
Executable file
7
tests/queries/0_stateless/02894_ast_depth_check.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_LOCAL --query "SELECT * FROM format('TSV', \$\$ a UInt8, x ALIAS ''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN''IN'' \$\$, '1')" 2>&1 | grep -oF 'TOO_DEEP_AST'
|
14
tests/queries/0_stateless/02896_multiple_OR.reference
Normal file
14
tests/queries/0_stateless/02896_multiple_OR.reference
Normal file
@ -0,0 +1,14 @@
|
||||
-- { echoOn }
|
||||
SELECT * FROM or_bug WHERE (key = 1) OR false OR false;
|
||||
1
|
||||
SELECT * FROM or_bug WHERE (key = 1) OR false;
|
||||
1
|
||||
SELECT * FROM or_bug WHERE (key = 1);
|
||||
1
|
||||
-- { echoOn }
|
||||
select * from forms where text_field like '%this%' or 0 = 1 or 0 = 1;
|
||||
5840ead423829c1eab29fa97 this is a test
|
||||
select * from forms where text_field like '%this%' or 0 = 1;
|
||||
5840ead423829c1eab29fa97 this is a test
|
||||
select * from forms where text_field like '%this%';
|
||||
5840ead423829c1eab29fa97 this is a test
|
28
tests/queries/0_stateless/02896_multiple_OR.sql
Normal file
28
tests/queries/0_stateless/02896_multiple_OR.sql
Normal file
@ -0,0 +1,28 @@
|
||||
-- https://github.com/ClickHouse/ClickHouse/pull/52653
|
||||
DROP TABLE IF EXISTS or_bug;
|
||||
CREATE TABLE or_bug (key UInt8) ENGINE=MergeTree ORDER BY key;
|
||||
INSERT INTO TABLE or_bug VALUES (0), (1);
|
||||
|
||||
-- { echoOn }
|
||||
SELECT * FROM or_bug WHERE (key = 1) OR false OR false;
|
||||
SELECT * FROM or_bug WHERE (key = 1) OR false;
|
||||
SELECT * FROM or_bug WHERE (key = 1);
|
||||
-- { echoOff }
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/55288
|
||||
DROP TABLE IF EXISTS forms;
|
||||
CREATE TABLE forms
|
||||
(
|
||||
`form_id` FixedString(24),
|
||||
`text_field` String
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY form_id
|
||||
ORDER BY form_id;
|
||||
insert into forms values ('5840ead423829c1eab29fa97','this is a test');
|
||||
|
||||
-- { echoOn }
|
||||
select * from forms where text_field like '%this%' or 0 = 1 or 0 = 1;
|
||||
select * from forms where text_field like '%this%' or 0 = 1;
|
||||
select * from forms where text_field like '%this%';
|
||||
-- { echoOff }
|
Loading…
Reference in New Issue
Block a user