mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge pull request #9948 from ClickHouse/alter_rename_column
Alter rename column
This commit is contained in:
commit
f1e0f15556
@ -274,7 +274,17 @@ private:
|
||||
comment_column->aliases.push_back("ALTER COMMENT COLUMN");
|
||||
auto clear_column = std::make_unique<Node>("CLEAR COLUMN", next_flag++, COLUMN);
|
||||
clear_column->aliases.push_back("ALTER CLEAR COLUMN");
|
||||
auto alter_column = std::make_unique<Node>("ALTER COLUMN", std::move(add_column), std::move(modify_column), std::move(drop_column), std::move(comment_column), std::move(clear_column));
|
||||
auto rename_column = std::make_unique<Node>("RENAME COLUMN", next_flag++, COLUMN);
|
||||
rename_column->aliases.push_back("ALTER RENAME COLUMN");
|
||||
|
||||
auto alter_column = std::make_unique<Node>(
|
||||
"ALTER COLUMN",
|
||||
std::move(add_column),
|
||||
std::move(modify_column),
|
||||
std::move(drop_column),
|
||||
std::move(comment_column),
|
||||
std::move(clear_column),
|
||||
std::move(rename_column));
|
||||
|
||||
auto alter_order_by = std::make_unique<Node>("ALTER ORDER BY", next_flag++, TABLE);
|
||||
alter_order_by->aliases.push_back("MODIFY ORDER BY");
|
||||
|
@ -30,6 +30,7 @@ enum class AccessType
|
||||
MODIFY_COLUMN,
|
||||
COMMENT_COLUMN,
|
||||
CLEAR_COLUMN,
|
||||
RENAME_COLUMN,
|
||||
ALTER_COLUMN, /// allow to execute ALTER {ADD|DROP|MODIFY...} COLUMN
|
||||
|
||||
ALTER_ORDER_BY,
|
||||
@ -197,6 +198,7 @@ namespace impl
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(MODIFY_COLUMN);
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(COMMENT_COLUMN);
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(CLEAR_COLUMN);
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(RENAME_COLUMN);
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_COLUMN);
|
||||
|
||||
ACCESS_TYPE_TO_KEYWORD_CASE(ALTER_ORDER_BY);
|
||||
|
@ -280,6 +280,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS
|
||||
required_access.emplace_back(AccessType::REFRESH_VIEW, database, table);
|
||||
break;
|
||||
}
|
||||
case ASTAlterCommand::RENAME_COLUMN:
|
||||
{
|
||||
required_access.emplace_back(AccessType::RENAME_COLUMN, database, table, column_name());
|
||||
break;
|
||||
}
|
||||
case ASTAlterCommand::NO_TYPE: break;
|
||||
}
|
||||
|
||||
|
12
src/Interpreters/RenameColumnVisitor.cpp
Normal file
12
src/Interpreters/RenameColumnVisitor.cpp
Normal file
@ -0,0 +1,12 @@
|
||||
#include <Interpreters/RenameColumnVisitor.h>
|
||||
#include <Interpreters/IdentifierSemantic.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
void RenameColumnData::visit(ASTIdentifier & identifier, ASTPtr &)
|
||||
{
|
||||
std::optional<String> identifier_column_name = IdentifierSemantic::getColumnName(identifier);
|
||||
if (identifier_column_name && identifier_column_name == column_name)
|
||||
identifier.name = rename_to;
|
||||
}
|
||||
}
|
22
src/Interpreters/RenameColumnVisitor.h
Normal file
22
src/Interpreters/RenameColumnVisitor.h
Normal file
@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/// Data for RenameColumnVisitor which traverse tree and rename all columns with
|
||||
/// name column_name to rename_to
|
||||
struct RenameColumnData
|
||||
{
|
||||
using TypeToVisit = ASTIdentifier;
|
||||
|
||||
String column_name;
|
||||
String rename_to;
|
||||
|
||||
void visit(ASTIdentifier & identifier, ASTPtr & ast);
|
||||
};
|
||||
|
||||
using RenameColumnMatcher = OneTypeMatcher<RenameColumnData>;
|
||||
using RenameColumnVisitor = InDepthNodeVisitor<RenameColumnMatcher, true>;
|
||||
}
|
@ -56,6 +56,11 @@ ASTPtr ASTAlterCommand::clone() const
|
||||
res->values = values->clone();
|
||||
res->children.push_back(res->values);
|
||||
}
|
||||
if (rename_to)
|
||||
{
|
||||
res->rename_to = rename_to->clone();
|
||||
res->children.push_back(res->rename_to);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -285,6 +290,15 @@ void ASTAlterCommand::formatImpl(
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "REFRESH " << (settings.hilite ? hilite_none : "");
|
||||
}
|
||||
else if (type == ASTAlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "RENAME COLUMN " << (if_exists ? "IF EXISTS " : "")
|
||||
<< (settings.hilite ? hilite_none : "");
|
||||
column->formatImpl(settings, state, frame);
|
||||
|
||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO ";
|
||||
rename_to->formatImpl(settings, state, frame);
|
||||
}
|
||||
else
|
||||
throw Exception("Unexpected type of ALTER", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ public:
|
||||
DROP_COLUMN,
|
||||
MODIFY_COLUMN,
|
||||
COMMENT_COLUMN,
|
||||
RENAME_COLUMN,
|
||||
MODIFY_ORDER_BY,
|
||||
MODIFY_TTL,
|
||||
MATERIALIZE_TTL,
|
||||
@ -69,6 +70,7 @@ public:
|
||||
|
||||
/** The ADD COLUMN query here optionally stores the name of the column following AFTER
|
||||
* The DROP query stores the column name for deletion here
|
||||
* Also used for RENAME COLUMN.
|
||||
*/
|
||||
ASTPtr column;
|
||||
|
||||
@ -155,6 +157,9 @@ public:
|
||||
String to_database;
|
||||
String to_table;
|
||||
|
||||
/// Target column name
|
||||
ASTPtr rename_to;
|
||||
|
||||
String getID(char delim) const override { return "AlterCommand" + (delim + std::to_string(static_cast<int>(type))); }
|
||||
|
||||
ASTPtr clone() const override;
|
||||
|
@ -27,6 +27,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
ParserKeyword s_drop_column("DROP COLUMN");
|
||||
ParserKeyword s_clear_column("CLEAR COLUMN");
|
||||
ParserKeyword s_modify_column("MODIFY COLUMN");
|
||||
ParserKeyword s_rename_column("RENAME COLUMN");
|
||||
ParserKeyword s_comment_column("COMMENT COLUMN");
|
||||
ParserKeyword s_modify_order_by("MODIFY ORDER BY");
|
||||
ParserKeyword s_modify_ttl("MODIFY TTL");
|
||||
@ -77,6 +78,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
ParserKeyword s_delete_where("DELETE WHERE");
|
||||
ParserKeyword s_update("UPDATE");
|
||||
ParserKeyword s_where("WHERE");
|
||||
ParserKeyword s_to("TO");
|
||||
|
||||
ParserCompoundIdentifier parser_name;
|
||||
ParserStringLiteral parser_string_literal;
|
||||
@ -121,6 +123,22 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
|
||||
command->type = ASTAlterCommand::ADD_COLUMN;
|
||||
}
|
||||
else if (s_rename_column.ignore(pos, expected))
|
||||
{
|
||||
if (s_if_exists.ignore(pos, expected))
|
||||
command->if_exists = true;
|
||||
|
||||
if (!parser_name.parse(pos, command->column, expected))
|
||||
return false;
|
||||
|
||||
if (!s_to.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!parser_name.parse(pos, command->rename_to, expected))
|
||||
return false;
|
||||
|
||||
command->type = ASTAlterCommand::RENAME_COLUMN;
|
||||
}
|
||||
else if (s_drop_partition.ignore(pos, expected))
|
||||
{
|
||||
if (!parser_partition.parse(pos, command->partition, expected))
|
||||
|
@ -12,6 +12,7 @@ namespace DB
|
||||
* [DROP COLUMN [IF EXISTS] col_to_drop, ...]
|
||||
* [CLEAR COLUMN [IF EXISTS] col_to_clear [IN PARTITION partition],]
|
||||
* [MODIFY COLUMN [IF EXISTS] col_to_modify type, ...]
|
||||
* [RENAME COLUMN [IF EXISTS] col_name TO col_name]
|
||||
* [MODIFY PRIMARY KEY (a, b, c...)]
|
||||
* [MODIFY SETTING setting_name=setting_value, ...]
|
||||
* [COMMENT COLUMN [IF EXISTS] col_name string]
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Interpreters/addTypeConversionToAST.h>
|
||||
#include <Interpreters/ExpressionAnalyzer.h>
|
||||
#include <Interpreters/SyntaxAnalyzer.h>
|
||||
#include <Interpreters/RenameColumnVisitor.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
#include <Parsers/ASTConstraintDeclaration.h>
|
||||
@ -39,6 +40,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
||||
@ -231,10 +233,21 @@ std::optional<AlterCommand> AlterCommand::parse(const ASTAlterCommand * command_
|
||||
else if (command_ast->type == ASTAlterCommand::MODIFY_QUERY)
|
||||
{
|
||||
AlterCommand command;
|
||||
command.ast = command_ast->clone();
|
||||
command.type = AlterCommand::MODIFY_QUERY;
|
||||
command.select = command_ast->select;
|
||||
return command;
|
||||
}
|
||||
else if (command_ast->type == ASTAlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
AlterCommand command;
|
||||
command.ast = command_ast->clone();
|
||||
command.type = AlterCommand::RENAME_COLUMN;
|
||||
command.column_name = command_ast->column->as<ASTIdentifier &>().name;
|
||||
command.rename_to = command_ast->rename_to->as<ASTIdentifier &>().name;
|
||||
command.if_exists = command_ast->if_exists;
|
||||
return command;
|
||||
}
|
||||
else
|
||||
return {};
|
||||
}
|
||||
@ -437,6 +450,24 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata) const
|
||||
settings_from_storage.push_back(change);
|
||||
}
|
||||
}
|
||||
else if (type == RENAME_COLUMN)
|
||||
{
|
||||
metadata.columns.rename(column_name, rename_to);
|
||||
RenameColumnData rename_data{column_name, rename_to};
|
||||
RenameColumnVisitor rename_visitor(rename_data);
|
||||
for (auto & column : metadata.columns)
|
||||
{
|
||||
metadata.columns.modify(column.name, [&](ColumnDescription & column_to_modify)
|
||||
{
|
||||
if (column_to_modify.default_desc.expression)
|
||||
rename_visitor.visit(column_to_modify.default_desc.expression);
|
||||
if (column_to_modify.ttl)
|
||||
rename_visitor.visit(column_to_modify.ttl);
|
||||
});
|
||||
}
|
||||
if (metadata.ttl_for_table_ast)
|
||||
rename_visitor.visit(metadata.ttl_for_table_ast);
|
||||
}
|
||||
else
|
||||
throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
@ -519,7 +550,7 @@ bool AlterCommand::isRequireMutationStage(const StorageInMemoryMetadata & metada
|
||||
if (ignore)
|
||||
return false;
|
||||
|
||||
if (type == DROP_COLUMN || type == DROP_INDEX)
|
||||
if (type == DROP_COLUMN || type == DROP_INDEX || type == RENAME_COLUMN)
|
||||
return true;
|
||||
|
||||
if (type != MODIFY_COLUMN || data_type == nullptr)
|
||||
@ -585,6 +616,12 @@ std::optional<MutationCommand> AlterCommand::tryConvertToMutationCommand(const S
|
||||
|
||||
result.predicate = nullptr;
|
||||
}
|
||||
else if (type == RENAME_COLUMN)
|
||||
{
|
||||
result.type = MutationCommand::Type::RENAME_COLUMN;
|
||||
result.column_name = column_name;
|
||||
result.rename_to = rename_to;
|
||||
}
|
||||
|
||||
result.ast = ast->clone();
|
||||
return result;
|
||||
@ -619,6 +656,8 @@ String alterTypeToString(const AlterCommand::Type type)
|
||||
return "MODIFY SETTING";
|
||||
case AlterCommand::Type::MODIFY_QUERY:
|
||||
return "MODIFY QUERY";
|
||||
case AlterCommand::Type::RENAME_COLUMN:
|
||||
return "RENAME COLUMN";
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
@ -666,7 +705,8 @@ void AlterCommands::prepare(const StorageInMemoryMetadata & metadata)
|
||||
command.ignore = true;
|
||||
}
|
||||
else if (command.type == AlterCommand::DROP_COLUMN
|
||||
|| command.type == AlterCommand::COMMENT_COLUMN)
|
||||
|| command.type == AlterCommand::COMMENT_COLUMN
|
||||
|| command.type == AlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
if (!has_column && command.if_exists)
|
||||
command.ignore = true;
|
||||
@ -680,6 +720,7 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con
|
||||
auto all_columns = metadata.columns;
|
||||
/// Default expression for all added/modified columns
|
||||
ASTPtr default_expr_list = std::make_shared<ASTExpressionList>();
|
||||
NameToNameMap renames_map;
|
||||
for (size_t i = 0; i < size(); ++i)
|
||||
{
|
||||
auto & command = (*this)[i];
|
||||
@ -753,6 +794,52 @@ void AlterCommands::validate(const StorageInMemoryMetadata & metadata, const Con
|
||||
if (metadata.settings_ast == nullptr)
|
||||
throw Exception{"Cannot alter settings, because table engine doesn't support settings changes", ErrorCodes::BAD_ARGUMENTS};
|
||||
}
|
||||
else if (command.type == AlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
/// TODO Implement nested rename
|
||||
if (metadata.columns.hasNested(command.column_name))
|
||||
{
|
||||
throw Exception{"Cannot rename whole Nested struct", ErrorCodes::NOT_IMPLEMENTED};
|
||||
}
|
||||
|
||||
if (!metadata.columns.has(command.column_name))
|
||||
{
|
||||
if (!command.if_exists)
|
||||
throw Exception{"Wrong column name. Cannot find column " + backQuote(command.column_name) + " to rename",
|
||||
ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK};
|
||||
}
|
||||
|
||||
if (metadata.columns.has(command.rename_to))
|
||||
throw Exception{"Cannot rename to " + backQuote(command.rename_to) + ": column with this name already exists",
|
||||
ErrorCodes::DUPLICATE_COLUMN};
|
||||
|
||||
|
||||
if (renames_map.count(command.column_name))
|
||||
throw Exception{"Cannot rename column '" + backQuote(command.column_name) + "' to two different names in a single ALTER query", ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
if (renames_map.count(command.rename_to))
|
||||
throw Exception{"Rename loop detected in ALTER query",
|
||||
ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
String from_nested_table_name = Nested::extractTableName(command.column_name);
|
||||
String to_nested_table_name = Nested::extractTableName(command.rename_to);
|
||||
bool from_nested = from_nested_table_name != command.column_name;
|
||||
bool to_nested = to_nested_table_name != command.rename_to;
|
||||
|
||||
if (from_nested && to_nested)
|
||||
{
|
||||
if (from_nested_table_name != to_nested_table_name)
|
||||
throw Exception{"Cannot rename column from one nested name to another", ErrorCodes::BAD_ARGUMENTS};
|
||||
}
|
||||
else if (!from_nested && !to_nested)
|
||||
{
|
||||
renames_map[command.column_name] = command.rename_to;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{"Cannot rename column from nested struct to normal column and vice versa", ErrorCodes::BAD_ARGUMENTS};
|
||||
}
|
||||
}
|
||||
|
||||
/// Collect default expressions for MODIFY and ADD comands
|
||||
if (command.type == AlterCommand::MODIFY_COLUMN || command.type == AlterCommand::ADD_COLUMN)
|
||||
|
@ -35,6 +35,7 @@ struct AlterCommand
|
||||
MODIFY_TTL,
|
||||
MODIFY_SETTING,
|
||||
MODIFY_QUERY,
|
||||
RENAME_COLUMN,
|
||||
};
|
||||
|
||||
Type type;
|
||||
@ -96,6 +97,9 @@ struct AlterCommand
|
||||
/// For MODIFY_QUERY
|
||||
ASTPtr select = nullptr;
|
||||
|
||||
/// Target column name
|
||||
String rename_to;
|
||||
|
||||
static std::optional<AlterCommand> parse(const ASTAlterCommand * command);
|
||||
|
||||
void apply(StorageInMemoryMetadata & metadata) const;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
@ -36,6 +37,7 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int CANNOT_PARSE_TEXT;
|
||||
extern const int THERE_IS_NO_DEFAULT_VALUE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
ColumnDescription::ColumnDescription(String name_, DataTypePtr type_, bool is_virtual_)
|
||||
@ -195,6 +197,18 @@ void ColumnsDescription::remove(const String & column_name)
|
||||
list_it = columns.get<0>().erase(list_it);
|
||||
}
|
||||
|
||||
void ColumnsDescription::rename(const String & column_from, const String & column_to)
|
||||
{
|
||||
auto it = columns.get<1>().find(column_from);
|
||||
if (it == columns.get<1>().end())
|
||||
throw Exception("Cannot find column " + column_from + " in ColumnsDescription", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
columns.get<1>().modify_key(it, [&column_to] (String & old_name)
|
||||
{
|
||||
old_name = column_to;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void ColumnsDescription::flattenNested()
|
||||
{
|
||||
|
@ -57,6 +57,10 @@ public:
|
||||
/// `column_name` can be a Nested column name;
|
||||
void remove(const String & column_name);
|
||||
|
||||
/// Rename column. column_from and column_to cannot be nested columns.
|
||||
/// TODO add ability to rename nested columns
|
||||
void rename(const String & column_from, const String & column_to);
|
||||
|
||||
void flattenNested(); /// TODO: remove, insert already flattened Nested columns.
|
||||
|
||||
bool operator==(const ColumnsDescription & other) const { return columns == other.columns; }
|
||||
|
@ -30,6 +30,7 @@ IMergeTreeReader::IMergeTreeReader(const MergeTreeData::DataPartPtr & data_part_
|
||||
, columns(columns_), uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_)
|
||||
, settings(settings_), storage(data_part_->storage)
|
||||
, all_mark_ranges(all_mark_ranges_)
|
||||
, alter_conversions(storage.getAlterConversionsForPart(data_part))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -78,6 +78,8 @@ protected:
|
||||
MarkRanges all_mark_ranges;
|
||||
|
||||
friend class MergeTreeRangeReader::DelayedStream;
|
||||
/// Alter conversions, which must be applied on fly if required
|
||||
MergeTreeData::AlterConversions alter_conversions;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ MergeTreeData::MergeTreeData(
|
||||
min_format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING;
|
||||
}
|
||||
|
||||
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast);
|
||||
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast);
|
||||
|
||||
/// format_file always contained on any data path
|
||||
PathWithDisk version_file;
|
||||
@ -610,14 +610,17 @@ void checkTTLExpression(const ExpressionActionsPtr & ttl_expression, const Strin
|
||||
}
|
||||
|
||||
|
||||
void MergeTreeData::setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls,
|
||||
void MergeTreeData::setTTLExpressions(const ColumnsDescription & new_columns,
|
||||
const ASTPtr & new_ttl_table_ast, bool only_check)
|
||||
{
|
||||
auto create_ttl_entry = [this](ASTPtr ttl_ast)
|
||||
|
||||
auto new_column_ttls = new_columns.getColumnTTLs();
|
||||
|
||||
auto create_ttl_entry = [this, &new_columns](ASTPtr ttl_ast)
|
||||
{
|
||||
TTLEntry result;
|
||||
|
||||
auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, getColumns().getAllPhysical());
|
||||
auto syntax_result = SyntaxAnalyzer(global_context).analyze(ttl_ast, new_columns.getAllPhysical());
|
||||
result.expression = ExpressionAnalyzer(ttl_ast, syntax_result, global_context).getActions(false);
|
||||
result.destination_type = PartDestinationType::DELETE;
|
||||
result.result_column = ttl_ast->getColumnName();
|
||||
@ -1457,6 +1460,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
for (const auto & column : getColumns().getAllPhysical())
|
||||
old_types.emplace(column.name, column.type.get());
|
||||
|
||||
|
||||
for (const AlterCommand & command : commands)
|
||||
{
|
||||
if (command.type == AlterCommand::MODIFY_ORDER_BY && !is_custom_partitioned)
|
||||
@ -1471,6 +1475,15 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
"ALTER ADD INDEX is not supported for tables with the old syntax",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
if (command.type == AlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
if (columns_alter_type_forbidden.count(command.column_name) || columns_alter_type_metadata_only.count(command.column_name))
|
||||
{
|
||||
throw Exception(
|
||||
"Trying to ALTER RENAME key " + backQuoteIfNeed(command.column_name) + " column which is a part of key expression",
|
||||
ErrorCodes::ILLEGAL_COLUMN);
|
||||
}
|
||||
}
|
||||
else if (command.isModifyingData())
|
||||
{
|
||||
if (columns_alter_type_forbidden.count(command.column_name))
|
||||
@ -1490,7 +1503,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, const S
|
||||
|
||||
setProperties(metadata, /* only_check = */ true);
|
||||
|
||||
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast, /* only_check = */ true);
|
||||
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast, /* only_check = */ true);
|
||||
|
||||
if (settings_ast)
|
||||
{
|
||||
@ -3589,4 +3602,18 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S
|
||||
return true;
|
||||
}
|
||||
|
||||
MergeTreeData::AlterConversions MergeTreeData::getAlterConversionsForPart(const MergeTreeDataPartPtr part) const
|
||||
{
|
||||
MutationCommands commands = getFirtsAlterMutationCommandsForPart(part);
|
||||
|
||||
AlterConversions result{};
|
||||
for (const auto & command : commands)
|
||||
/// Currently we need explicit conversions only for RENAME alter
|
||||
/// all other conversions can be deduced from diff between part columns
|
||||
/// and columns in storage.
|
||||
if (command.type == MutationCommand::Type::RENAME_COLUMN)
|
||||
result.rename_map[command.rename_to] = command.column_name;
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ namespace DB
|
||||
class MergeListEntry;
|
||||
class AlterCommands;
|
||||
class MergeTreePartsMover;
|
||||
class MutationCommands;
|
||||
|
||||
class ExpressionActions;
|
||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||
@ -124,6 +125,20 @@ public:
|
||||
|
||||
STRONG_TYPEDEF(String, PartitionID)
|
||||
|
||||
/// Alter conversions which should be applied on-fly for part. Build from of
|
||||
/// the most recent mutation commands for part. Now we have only rename_map
|
||||
/// here (from ALTER_RENAME) command, because for all other type of alters
|
||||
/// we can deduce conversions for part from difference between
|
||||
/// part->getColumns() and storage->getColumns().
|
||||
struct AlterConversions
|
||||
{
|
||||
/// Rename map new_name -> old_name
|
||||
std::unordered_map<String, String> rename_map;
|
||||
|
||||
bool isColumnRenamed(const String & new_name) const { return rename_map.count(new_name) > 0; }
|
||||
String getColumnOldName(const String & new_name) const { return rename_map.at(new_name); }
|
||||
};
|
||||
|
||||
struct LessDataPart
|
||||
{
|
||||
using is_transparent = void;
|
||||
@ -535,10 +550,9 @@ public:
|
||||
broken_part_callback(name);
|
||||
}
|
||||
|
||||
/** Get the key expression AST as an ASTExpressionList.
|
||||
* It can be specified in the tuple: (CounterID, Date),
|
||||
* or as one column: CounterID.
|
||||
*/
|
||||
/** Get the key expression AST as an ASTExpressionList. It can be specified
|
||||
* in the tuple: (CounterID, Date), or as one column: CounterID.
|
||||
*/
|
||||
static ASTPtr extractKeyExpressionList(const ASTPtr & node);
|
||||
|
||||
bool hasSortingKey() const { return !sorting_key_columns.empty(); }
|
||||
@ -647,6 +661,9 @@ public:
|
||||
/// Reserves 0 bytes
|
||||
ReservationPtr makeEmptyReservationOnLargestDisk() { return getStoragePolicy()->makeEmptyReservationOnLargestDisk(); }
|
||||
|
||||
/// Return alter conversions for part which must be applied on fly.
|
||||
AlterConversions getAlterConversionsForPart(const MergeTreeDataPartPtr part) const;
|
||||
|
||||
MergeTreeDataFormatVersion format_version;
|
||||
|
||||
Context & global_context;
|
||||
@ -856,14 +873,14 @@ protected:
|
||||
std::mutex grab_old_parts_mutex;
|
||||
/// The same for clearOldTemporaryDirectories.
|
||||
std::mutex clear_old_temporary_directories_mutex;
|
||||
/// Mutex for settings usage
|
||||
|
||||
void setProperties(const StorageInMemoryMetadata & metadata, bool only_check = false);
|
||||
|
||||
void initPartitionKey();
|
||||
|
||||
void setTTLExpressions(const ColumnsDescription::ColumnTTLs & new_column_ttls,
|
||||
const ASTPtr & new_ttl_table_ast, bool only_check = false);
|
||||
void setTTLExpressions(const ColumnsDescription & columns,
|
||||
const ASTPtr & new_ttl_table_ast, bool only_check = false);
|
||||
|
||||
void checkStoragePolicy(const StoragePolicyPtr & new_storage_policy);
|
||||
|
||||
void setStoragePolicy(const String & new_storage_policy_name, bool only_check = false);
|
||||
@ -908,6 +925,11 @@ protected:
|
||||
/// mechanisms for parts locking
|
||||
virtual bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const = 0;
|
||||
|
||||
/// Return most recent mutations commands for part which weren't applied
|
||||
/// Used to receive AlterConversions for part and apply them on fly. This
|
||||
/// method has different implementations for replicated and non replicated
|
||||
/// MergeTree because they store mutations in different way.
|
||||
virtual MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const = 0;
|
||||
/// Moves part to specified space, used in ALTER ... MOVE ... queries
|
||||
bool movePartsToSpace(const DataPartsVector & parts, SpacePtr space);
|
||||
|
||||
|
@ -26,11 +26,14 @@
|
||||
#include <Common/SimpleIncrement.h>
|
||||
#include <Common/interpolate.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <iomanip>
|
||||
|
||||
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event MergedRows;
|
||||
@ -988,6 +991,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
|
||||
splitMutationCommands(source_part, commands_for_part, for_interpreter, for_file_renames);
|
||||
|
||||
|
||||
UInt64 watch_prev_elapsed = 0;
|
||||
MergeStageProgress stage_progress(1.0);
|
||||
|
||||
@ -1056,7 +1060,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
auto indices_to_recalc = getIndicesToRecalculate(in, storage_from_source_part, updated_header.getNamesAndTypesList(), context);
|
||||
|
||||
NameSet files_to_skip = collectFilesToSkip(updated_header, indices_to_recalc, mrk_extension);
|
||||
NameSet files_to_remove = collectFilesToRemove(source_part, for_file_renames, mrk_extension);
|
||||
NameToNameMap files_to_rename = collectFilesForRenames(source_part, for_file_renames, mrk_extension);
|
||||
|
||||
if (need_remove_expired_values)
|
||||
files_to_skip.insert("ttl.txt");
|
||||
@ -1064,10 +1068,21 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
/// Create hardlinks for unchanged files
|
||||
for (auto it = disk->iterateDirectory(source_part->getFullRelativePath()); it->isValid(); it->next())
|
||||
{
|
||||
if (files_to_skip.count(it->name()) || files_to_remove.count(it->name()))
|
||||
if (files_to_skip.count(it->name()))
|
||||
continue;
|
||||
|
||||
String destination = new_part_tmp_path + "/" + it->name();
|
||||
String destination = new_part_tmp_path + "/";
|
||||
auto rename_it = files_to_rename.find(it->name());
|
||||
if (rename_it != files_to_rename.end())
|
||||
{
|
||||
if (rename_it->second.empty())
|
||||
continue;
|
||||
destination += rename_it->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
destination += it->name();
|
||||
}
|
||||
|
||||
disk->createHardLink(it->path(), destination);
|
||||
}
|
||||
@ -1090,9 +1105,19 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
|
||||
need_remove_expired_values);
|
||||
}
|
||||
|
||||
for (const String & removed_file : files_to_remove)
|
||||
if (new_data_part->checksums.files.count(removed_file))
|
||||
new_data_part->checksums.files.erase(removed_file);
|
||||
for (const auto & [rename_from, rename_to] : files_to_rename)
|
||||
{
|
||||
if (rename_to.empty() && new_data_part->checksums.files.count(rename_from))
|
||||
{
|
||||
new_data_part->checksums.files.erase(rename_from);
|
||||
}
|
||||
else if (new_data_part->checksums.files.count(rename_from))
|
||||
{
|
||||
new_data_part->checksums.files[rename_to] = new_data_part->checksums.files[rename_from];
|
||||
|
||||
new_data_part->checksums.files.erase(rename_from);
|
||||
}
|
||||
}
|
||||
|
||||
finalizeMutatedPart(source_part, new_data_part, need_remove_expired_values);
|
||||
}
|
||||
@ -1236,6 +1261,20 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
|
||||
{
|
||||
removed_columns_from_compact_part.emplace(command.column_name);
|
||||
}
|
||||
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
|
||||
{
|
||||
if (is_compact_part)
|
||||
{
|
||||
for_interpreter.push_back(
|
||||
{
|
||||
.type = MutationCommand::Type::READ_COLUMN,
|
||||
.column_name = command.rename_to,
|
||||
});
|
||||
already_changed_columns.emplace(command.column_name);
|
||||
}
|
||||
else
|
||||
for_file_renames.push_back(command);
|
||||
}
|
||||
else
|
||||
{
|
||||
for_file_renames.push_back(command);
|
||||
@ -1248,7 +1287,8 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
|
||||
/// we just don't read dropped columns
|
||||
for (const auto & column : part->getColumns())
|
||||
{
|
||||
if (!removed_columns_from_compact_part.count(column.name) && !already_changed_columns.count(column.name))
|
||||
if (!removed_columns_from_compact_part.count(column.name)
|
||||
&& !already_changed_columns.count(column.name))
|
||||
{
|
||||
for_interpreter.emplace_back(MutationCommand
|
||||
{
|
||||
@ -1262,7 +1302,7 @@ void MergeTreeDataMergerMutator::splitMutationCommands(
|
||||
}
|
||||
|
||||
|
||||
NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
|
||||
NameToNameMap MergeTreeDataMergerMutator::collectFilesForRenames(
|
||||
MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension)
|
||||
{
|
||||
/// Collect counts for shared streams of different columns. As an example, Nested columns have shared stream with array sizes.
|
||||
@ -1277,14 +1317,14 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
|
||||
{});
|
||||
}
|
||||
|
||||
NameSet remove_files;
|
||||
NameToNameMap rename_map;
|
||||
/// Remove old indices
|
||||
for (const auto & command : commands_for_removes)
|
||||
{
|
||||
if (command.type == MutationCommand::Type::DROP_INDEX)
|
||||
{
|
||||
remove_files.emplace("skp_idx_" + command.column_name + ".idx");
|
||||
remove_files.emplace("skp_idx_" + command.column_name + mrk_extension);
|
||||
rename_map.emplace("skp_idx_" + command.column_name + ".idx", "");
|
||||
rename_map.emplace("skp_idx_" + command.column_name + mrk_extension, "");
|
||||
}
|
||||
else if (command.type == MutationCommand::Type::DROP_COLUMN)
|
||||
{
|
||||
@ -1294,8 +1334,8 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
|
||||
/// Delete files if they are no longer shared with another column.
|
||||
if (--stream_counts[stream_name] == 0)
|
||||
{
|
||||
remove_files.emplace(stream_name + ".bin");
|
||||
remove_files.emplace(stream_name + mrk_extension);
|
||||
rename_map.emplace(stream_name + ".bin", "");
|
||||
rename_map.emplace(stream_name + mrk_extension, "");
|
||||
}
|
||||
};
|
||||
|
||||
@ -1304,9 +1344,31 @@ NameSet MergeTreeDataMergerMutator::collectFilesToRemove(
|
||||
if (column)
|
||||
column->type->enumerateStreams(callback, stream_path);
|
||||
}
|
||||
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
|
||||
{
|
||||
String escaped_name_from = escapeForFileName(command.column_name);
|
||||
String escaped_name_to = escapeForFileName(command.rename_to);
|
||||
|
||||
IDataType::StreamCallback callback = [&](const IDataType::SubstreamPath & substream_path)
|
||||
{
|
||||
String stream_from = IDataType::getFileNameForStream(command.column_name, substream_path);
|
||||
|
||||
String stream_to = boost::replace_first_copy(stream_from, escaped_name_from, escaped_name_to);
|
||||
|
||||
if (stream_from != stream_to)
|
||||
{
|
||||
rename_map.emplace(stream_from + ".bin", stream_to + ".bin");
|
||||
rename_map.emplace(stream_from + mrk_extension, stream_to + mrk_extension);
|
||||
}
|
||||
};
|
||||
IDataType::SubstreamPath stream_path;
|
||||
auto column = source_part->getColumns().tryGetByName(command.column_name);
|
||||
if (column)
|
||||
column->type->enumerateStreams(callback, stream_path);
|
||||
}
|
||||
}
|
||||
|
||||
return remove_files;
|
||||
return rename_map;
|
||||
}
|
||||
|
||||
NameSet MergeTreeDataMergerMutator::collectFilesToSkip(
|
||||
@ -1344,10 +1406,13 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart(
|
||||
const MutationCommands & commands_for_removes)
|
||||
{
|
||||
NameSet removed_columns;
|
||||
NameToNameMap renamed_columns;
|
||||
for (const auto & command : commands_for_removes)
|
||||
{
|
||||
if (command.type == MutationCommand::DROP_COLUMN)
|
||||
removed_columns.insert(command.column_name);
|
||||
if (command.type == MutationCommand::RENAME_COLUMN)
|
||||
renamed_columns.emplace(command.rename_to, command.column_name);
|
||||
}
|
||||
Names source_column_names = source_part->getColumns().getNames();
|
||||
NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end());
|
||||
@ -1364,6 +1429,10 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart(
|
||||
{
|
||||
++it;
|
||||
}
|
||||
else if (renamed_columns.count(it->name) && source_columns_name_set.count(renamed_columns[it->name]))
|
||||
{
|
||||
++it;
|
||||
}
|
||||
else
|
||||
it = all_columns.erase(it);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ private:
|
||||
|
||||
/// Apply commands to source_part i.e. remove some columns in source_part
|
||||
/// and return set of files, that have to be removed from filesystem and checksums
|
||||
static NameSet collectFilesToRemove(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension);
|
||||
static NameToNameMap collectFilesForRenames(MergeTreeData::DataPartPtr source_part, const MutationCommands & commands_for_removes, const String & mrk_extension);
|
||||
|
||||
/// Files, that we don't need to remove and don't need to hardlink, for example columns.txt and checksums.txt.
|
||||
/// Because we will generate new versions of them after we perform mutation.
|
||||
|
@ -81,14 +81,21 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
|
||||
const auto & [name, type] = *name_and_type;
|
||||
auto position = data_part->getColumnPosition(name);
|
||||
|
||||
/// If array of Nested column is missing in part,
|
||||
/// we have to read it's offsets if they exists.
|
||||
if (!position && alter_conversions.isColumnRenamed(name))
|
||||
{
|
||||
String old_name = alter_conversions.getColumnOldName(name);
|
||||
position = data_part->getColumnPosition(old_name);
|
||||
}
|
||||
|
||||
if (!position && typeid_cast<const DataTypeArray *>(type.get()))
|
||||
{
|
||||
/// If array of Nested column is missing in part,
|
||||
/// we have to read it's offsets if they exists.
|
||||
position = findColumnForOffsets(name);
|
||||
read_only_offsets[i] = (position != std::nullopt);
|
||||
}
|
||||
|
||||
|
||||
column_positions[i] = std::move(position);
|
||||
}
|
||||
|
||||
@ -125,7 +132,15 @@ size_t MergeTreeReaderCompact::readRows(size_t from_mark, bool continue_reading,
|
||||
if (!res_columns[pos])
|
||||
continue;
|
||||
|
||||
const auto & [name, type] = *name_and_type;
|
||||
auto [name, type] = *name_and_type;
|
||||
|
||||
if (alter_conversions.isColumnRenamed(name))
|
||||
{
|
||||
String old_name = alter_conversions.getColumnOldName(name);
|
||||
if (!data_part->getColumnPosition(name) && data_part->getColumnPosition(old_name))
|
||||
name = old_name;
|
||||
}
|
||||
|
||||
auto & column = mutable_columns[pos];
|
||||
|
||||
try
|
||||
|
@ -42,16 +42,27 @@ MergeTreeReaderWide::MergeTreeReaderWide(
|
||||
try
|
||||
{
|
||||
for (const NameAndTypePair & column_from_part : data_part->getColumns())
|
||||
{
|
||||
columns_from_part[column_from_part.name] = column_from_part.type;
|
||||
}
|
||||
|
||||
for (const NameAndTypePair & column : columns)
|
||||
{
|
||||
if (columns_from_part.count(column.name))
|
||||
{
|
||||
addStreams(column.name, *columns_from_part[column.name], profile_callback_, clock_type_);
|
||||
}
|
||||
else
|
||||
addStreams(column.name, *column.type, profile_callback_, clock_type_);
|
||||
{
|
||||
if (alter_conversions.isColumnRenamed(column.name))
|
||||
{
|
||||
String old_name = alter_conversions.getColumnOldName(column.name);
|
||||
if (columns_from_part.count(old_name))
|
||||
addStreams(old_name, *columns_from_part[old_name], profile_callback_, clock_type_);
|
||||
}
|
||||
else
|
||||
{
|
||||
addStreams(column.name, *column.type, profile_callback_, clock_type_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -82,7 +93,14 @@ size_t MergeTreeReaderWide::readRows(size_t from_mark, bool continue_reading, si
|
||||
auto name_and_type = columns.begin();
|
||||
for (size_t pos = 0; pos < num_columns; ++pos, ++name_and_type)
|
||||
{
|
||||
String & name = name_and_type->name;
|
||||
String name = name_and_type->name;
|
||||
if (alter_conversions.isColumnRenamed(name))
|
||||
{
|
||||
String original_name = alter_conversions.getColumnOldName(name);
|
||||
if (!columns_from_part.count(name) && columns_from_part.count(original_name))
|
||||
name = original_name;
|
||||
}
|
||||
|
||||
DataTypePtr type;
|
||||
if (columns_from_part.count(name))
|
||||
type = columns_from_part[name];
|
||||
|
@ -1309,6 +1309,21 @@ ReplicatedMergeTreeMergePredicate ReplicatedMergeTreeQueue::getMergePredicate(zk
|
||||
}
|
||||
|
||||
|
||||
MutationCommands ReplicatedMergeTreeQueue::getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const
|
||||
{
|
||||
std::lock_guard lock(state_mutex);
|
||||
auto in_partition = mutations_by_partition.find(part->info.partition_id);
|
||||
if (in_partition == mutations_by_partition.end())
|
||||
return MutationCommands{};
|
||||
|
||||
Int64 part_version = part->info.getDataVersion();
|
||||
for (auto [mutation_version, mutation_status] : in_partition->second)
|
||||
if (mutation_version > part_version && mutation_status->entry->alter_version != -1)
|
||||
return mutation_status->entry->commands;
|
||||
|
||||
return MutationCommands{};
|
||||
}
|
||||
|
||||
MutationCommands ReplicatedMergeTreeQueue::getMutationCommands(
|
||||
const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const
|
||||
{
|
||||
|
@ -331,6 +331,11 @@ public:
|
||||
|
||||
MutationCommands getMutationCommands(const MergeTreeData::DataPartPtr & part, Int64 desired_mutation_version) const;
|
||||
|
||||
/// Return mutation commands for part with smallest mutation version bigger
|
||||
/// than data part version. Used when we apply alter commands on fly,
|
||||
/// without actual data modification on disk.
|
||||
MutationCommands getFirstAlterMutationCommandsForPart(const MergeTreeData::DataPartPtr & part) const;
|
||||
|
||||
/// Mark finished mutations as done. If the function needs to be called again at some later time
|
||||
/// (because some mutations are probably done but we are not sure yet), returns true.
|
||||
bool tryFinalizeMutations(zkutil::ZooKeeperPtr zookeeper);
|
||||
|
@ -52,12 +52,10 @@ public:
|
||||
return part->storage.getInMemoryMetadata();
|
||||
}
|
||||
|
||||
|
||||
bool hasSortingKey() const { return part->storage.hasSortingKey(); }
|
||||
|
||||
Names getSortingKeyColumns() const override { return part->storage.getSortingKeyColumns(); }
|
||||
|
||||
|
||||
protected:
|
||||
StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_)
|
||||
: IStorage(getIDFromPart(part_), part_->storage.getVirtuals())
|
||||
|
@ -94,6 +94,15 @@ std::optional<MutationCommand> MutationCommand::parse(ASTAlterCommand * command,
|
||||
res.clear = true;
|
||||
return res;
|
||||
}
|
||||
else if (parse_alter_commands && command->type == ASTAlterCommand::RENAME_COLUMN)
|
||||
{
|
||||
MutationCommand res;
|
||||
res.ast = command->ptr();
|
||||
res.type = MutationCommand::Type::RENAME_COLUMN;
|
||||
res.column_name = command->column->as<ASTIdentifier &>().name;
|
||||
res.rename_to = command->rename_to->as<ASTIdentifier &>().name;
|
||||
return res;
|
||||
}
|
||||
else if (command->type == ASTAlterCommand::MATERIALIZE_TTL)
|
||||
{
|
||||
MutationCommand res;
|
||||
|
@ -31,7 +31,8 @@ struct MutationCommand
|
||||
READ_COLUMN,
|
||||
DROP_COLUMN,
|
||||
DROP_INDEX,
|
||||
MATERIALIZE_TTL
|
||||
MATERIALIZE_TTL,
|
||||
RENAME_COLUMN,
|
||||
};
|
||||
|
||||
Type type = EMPTY;
|
||||
@ -53,6 +54,9 @@ struct MutationCommand
|
||||
/// We need just clear column, not drop from metadata.
|
||||
bool clear = false;
|
||||
|
||||
/// Column rename_to
|
||||
String rename_to;
|
||||
|
||||
/// If parse_alter_commands, than consider more Alter commands as mutation commands
|
||||
static std::optional<MutationCommand> parse(ASTAlterCommand * command, bool parse_alter_commands = false);
|
||||
};
|
||||
|
@ -237,10 +237,15 @@ void StorageMergeTree::alter(
|
||||
/// Reinitialize primary key because primary key column types might have changed.
|
||||
setProperties(metadata);
|
||||
|
||||
setTTLExpressions(metadata.columns.getColumnTTLs(), metadata.ttl_for_table_ast);
|
||||
setTTLExpressions(metadata.columns, metadata.ttl_for_table_ast);
|
||||
|
||||
DatabaseCatalog::instance().getDatabase(table_id.database_name)->alterTable(context, table_id.table_name, metadata);
|
||||
|
||||
String mutation_file_name;
|
||||
Int64 mutation_version = -1;
|
||||
if (!maybe_mutation_commands.empty())
|
||||
mutation_version = startMutation(maybe_mutation_commands, mutation_file_name);
|
||||
|
||||
/// We release all locks except alter_intention_lock which allows
|
||||
/// to execute alter queries sequentially
|
||||
table_lock_holder.releaseAllExceptAlterIntention();
|
||||
@ -248,7 +253,7 @@ void StorageMergeTree::alter(
|
||||
/// Always execute required mutations synchronously, because alters
|
||||
/// should be executed in sequential order.
|
||||
if (!maybe_mutation_commands.empty())
|
||||
mutateImpl(maybe_mutation_commands, /* mutations_sync = */ 1);
|
||||
waitForMutation(mutation_version, mutation_file_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -351,43 +356,42 @@ public:
|
||||
};
|
||||
|
||||
|
||||
void StorageMergeTree::mutateImpl(const MutationCommands & commands, size_t mutations_sync)
|
||||
Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String & mutation_file_name)
|
||||
{
|
||||
/// Choose any disk, because when we load mutations we search them at each disk
|
||||
/// where storage can be placed. See loadMutations().
|
||||
auto disk = getStoragePolicy()->getAnyDisk();
|
||||
String file_name;
|
||||
Int64 version;
|
||||
std::lock_guard lock(currently_processing_in_background_mutex);
|
||||
|
||||
{
|
||||
std::lock_guard lock(currently_processing_in_background_mutex);
|
||||
MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get());
|
||||
version = increment.get();
|
||||
entry.commit(version);
|
||||
mutation_file_name = entry.file_name;
|
||||
auto insertion = current_mutations_by_id.emplace(mutation_file_name, std::move(entry));
|
||||
current_mutations_by_version.emplace(version, insertion.first->second);
|
||||
|
||||
MergeTreeMutationEntry entry(commands, disk, relative_data_path, insert_increment.get());
|
||||
version = increment.get();
|
||||
entry.commit(version);
|
||||
file_name = entry.file_name;
|
||||
auto insertion = current_mutations_by_id.emplace(file_name, std::move(entry));
|
||||
current_mutations_by_version.emplace(version, insertion.first->second);
|
||||
|
||||
LOG_INFO(log, "Added mutation: " << file_name);
|
||||
merging_mutating_task_handle->wake();
|
||||
}
|
||||
|
||||
/// We have to wait mutation end
|
||||
if (mutations_sync > 0)
|
||||
{
|
||||
LOG_INFO(log, "Waiting mutation: " << file_name);
|
||||
auto check = [version, this]() { return shutdown_called || isMutationDone(version); };
|
||||
std::unique_lock lock(mutation_wait_mutex);
|
||||
mutation_wait_event.wait(lock, check);
|
||||
LOG_INFO(log, "Mutation " << file_name << " done");
|
||||
}
|
||||
LOG_INFO(log, "Added mutation: " << mutation_file_name);
|
||||
merging_mutating_task_handle->wake();
|
||||
return version;
|
||||
}
|
||||
|
||||
void StorageMergeTree::waitForMutation(Int64 version, const String & file_name)
|
||||
{
|
||||
LOG_INFO(log, "Waiting mutation: " << file_name);
|
||||
auto check = [version, this]() { return shutdown_called || isMutationDone(version); };
|
||||
std::unique_lock lock(mutation_wait_mutex);
|
||||
mutation_wait_event.wait(lock, check);
|
||||
LOG_INFO(log, "Mutation " << file_name << " done");
|
||||
}
|
||||
|
||||
void StorageMergeTree::mutate(const MutationCommands & commands, const Context & query_context)
|
||||
{
|
||||
mutateImpl(commands, query_context.getSettingsRef().mutations_sync);
|
||||
String mutation_file_name;
|
||||
Int64 version = startMutation(commands, mutation_file_name);
|
||||
|
||||
if (query_context.getSettingsRef().mutations_sync > 0)
|
||||
waitForMutation(version, mutation_file_name);
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -688,10 +692,16 @@ bool StorageMergeTree::tryMutatePart()
|
||||
MutationCommands commands_for_size_validation;
|
||||
for (const auto & command : it->second.commands)
|
||||
{
|
||||
if (command.type != MutationCommand::Type::DROP_COLUMN && command.type != MutationCommand::Type::DROP_INDEX)
|
||||
if (command.type != MutationCommand::Type::DROP_COLUMN
|
||||
&& command.type != MutationCommand::Type::DROP_INDEX
|
||||
&& command.type != MutationCommand::Type::RENAME_COLUMN)
|
||||
{
|
||||
commands_for_size_validation.push_back(command);
|
||||
}
|
||||
else
|
||||
{
|
||||
commands_size += command.ast->size();
|
||||
}
|
||||
}
|
||||
|
||||
if (!commands_for_size_validation.empty())
|
||||
@ -1254,4 +1264,15 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, const Context & c
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
MutationCommands StorageMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const
|
||||
{
|
||||
std::lock_guard lock(currently_processing_in_background_mutex);
|
||||
|
||||
auto it = current_mutations_by_version.upper_bound(part->info.getDataVersion());
|
||||
if (it == current_mutations_by_version.end())
|
||||
return {};
|
||||
return it->second.commands;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -120,7 +120,11 @@ private:
|
||||
|
||||
BackgroundProcessingPoolTaskResult movePartsTask();
|
||||
|
||||
void mutateImpl(const MutationCommands & commands, size_t mutations_sync);
|
||||
/// Allocate block number for new mutation, write mutation to disk
|
||||
/// and into in-memory structures. Wake up merge-mutation task.
|
||||
Int64 startMutation(const MutationCommands & commands, String & mutation_file_name);
|
||||
/// Wait until mutation with version will finish mutation for all parts
|
||||
void waitForMutation(Int64 version, const String & file_name);
|
||||
|
||||
/// Try and find a single part to mutate and mutate it. If some part was successfully mutated, return true.
|
||||
bool tryMutatePart();
|
||||
@ -165,6 +169,8 @@ protected:
|
||||
const MergingParams & merging_params_,
|
||||
std::unique_ptr<MergeTreeSettings> settings_,
|
||||
bool has_force_restore_data_flag);
|
||||
|
||||
MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -447,7 +447,6 @@ void StorageReplicatedMergeTree::checkTableStructure(const String & zookeeper_pr
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_columns, const ReplicatedMergeTreeTableMetadata::Diff & metadata_diff)
|
||||
{
|
||||
StorageInMemoryMetadata metadata = getInMemoryMetadata();
|
||||
@ -497,7 +496,7 @@ void StorageReplicatedMergeTree::setTableStructure(ColumnsDescription new_column
|
||||
/// Even if the primary/sorting keys didn't change we must reinitialize it
|
||||
/// because primary key column types might have changed.
|
||||
setProperties(metadata);
|
||||
setTTLExpressions(new_columns.getColumnTTLs(), metadata.ttl_for_table_ast);
|
||||
setTTLExpressions(new_columns, metadata.ttl_for_table_ast);
|
||||
}
|
||||
|
||||
|
||||
@ -5293,32 +5292,8 @@ bool StorageReplicatedMergeTree::canUseAdaptiveGranularity() const
|
||||
}
|
||||
|
||||
|
||||
StorageInMemoryMetadata
|
||||
StorageReplicatedMergeTree::getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const
|
||||
MutationCommands StorageReplicatedMergeTree::getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const
|
||||
{
|
||||
auto replicated_metadata = ReplicatedMergeTreeTableMetadata::parse(metadata_str);
|
||||
StorageInMemoryMetadata result = getInMemoryMetadata();
|
||||
result.columns = ColumnsDescription::parse(columns_str);
|
||||
result.constraints = ConstraintsDescription::parse(replicated_metadata.constraints);
|
||||
result.indices = IndicesDescription::parse(replicated_metadata.skip_indices);
|
||||
|
||||
ParserExpression expression_p;
|
||||
|
||||
/// The only thing, that can be changed is ttl expression
|
||||
if (replicated_metadata.primary_key.empty())
|
||||
throw Exception("Primary key cannot be empty" , ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (!replicated_metadata.sorting_key.empty())
|
||||
{
|
||||
result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.sorting_key + ")", 0);
|
||||
result.primary_key_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
result.order_by_ast = parseQuery(expression_p, "(" + replicated_metadata.primary_key + ")", 0);
|
||||
}
|
||||
return result;
|
||||
|
||||
return queue.getFirstAlterMutationCommandsForPart(part);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ private:
|
||||
void waitMutationToFinishOnReplicas(
|
||||
const Strings & replicas, const String & mutation_id) const;
|
||||
|
||||
StorageInMemoryMetadata getMetadataFromSharedZookeeper(const String & metadata_str, const String & columns_str) const;
|
||||
MutationCommands getFirtsAlterMutationCommandsForPart(const DataPartPtr & part) const override;
|
||||
|
||||
protected:
|
||||
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
|
||||
@ -543,6 +543,7 @@ protected:
|
||||
const MergingParams & merging_params_,
|
||||
std::unique_ptr<MergeTreeSettings> settings_,
|
||||
bool has_force_restore_data_flag);
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -0,0 +1,7 @@
|
||||
1
|
||||
1
|
||||
date key renamed_value1 value2 value3
|
||||
2019-10-02 1 1 1 1
|
||||
7 7
|
||||
date key renamed_value1 renamed_value2 renamed_value3
|
||||
2019-10-02 7 7 7 7
|
38
tests/queries/0_stateless/01213_alter_rename_column.sql
Normal file
38
tests/queries/0_stateless/01213_alter_rename_column.sql
Normal file
@ -0,0 +1,38 @@
|
||||
DROP TABLE IF EXISTS table_for_rename;
|
||||
|
||||
CREATE TABLE table_for_rename
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
value1 String,
|
||||
value2 String,
|
||||
value3 String
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY date
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);
|
||||
|
||||
SELECT value1 FROM table_for_rename WHERE key = 1;
|
||||
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1;
|
||||
|
||||
SELECT renamed_value1 FROM table_for_rename WHERE key = 1;
|
||||
|
||||
SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames;
|
||||
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError 15}
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError 36}
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError 10}
|
||||
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3;
|
||||
|
||||
SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7;
|
||||
|
||||
SELECT * FROM table_for_rename WHERE key = 7 FORMAT TSVWithNames;
|
||||
|
||||
ALTER TABLE table_for_rename RENAME COLUMN value100 to renamed_value100; --{serverError 10}
|
||||
ALTER TABLE table_for_rename RENAME COLUMN IF EXISTS value100 to renamed_value100;
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename;
|
@ -0,0 +1,8 @@
|
||||
1
|
||||
CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.table_for_rename_replicated (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String, `value3` String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/table_for_rename_replicated\', \'1\') PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
1
|
||||
date key renamed_value1 value2 value3
|
||||
2019-10-02 1 1 1 1
|
||||
date key renamed_value1 value2 value3
|
||||
2019-10-02 1 1 1 1
|
52
tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh
Executable file
52
tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh
Executable file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. $CURDIR/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated"
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
CREATE TABLE table_for_rename_replicated
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
value1 String,
|
||||
value2 String,
|
||||
value3 String
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_replicated', '1')
|
||||
PARTITION BY date
|
||||
ORDER BY key;
|
||||
"
|
||||
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_rename_replicated SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT value1 FROM table_for_rename_replicated WHERE key = 1;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "ALTER TABLE table_for_rename_replicated RENAME COLUMN value1 to renamed_value1" --replication_alter_partitions_sync=0
|
||||
|
||||
|
||||
while [[ -z $($CLICKHOUSE_CLIENT --query "SELECT name FROM system.columns WHERE name = 'renamed_value1' and table = 'table_for_rename_replicated'" 2>/dev/null) ]]; do
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
# RENAME on fly works
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE table_for_rename_replicated;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT renamed_value1 FROM table_for_rename_replicated WHERE key = 1;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA table_for_rename_replicated;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM table_for_rename_replicated WHERE key = 1 FORMAT TSVWithNames;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated;"
|
@ -0,0 +1,7 @@
|
||||
1
|
||||
1
|
||||
date key renamed_value1 value2 value3
|
||||
2019-10-02 1 1 1 1
|
||||
7 7
|
||||
date key renamed_value1 renamed_value2 renamed_value3
|
||||
2019-10-02 7 7 7 7
|
@ -0,0 +1,33 @@
|
||||
DROP TABLE IF EXISTS table_with_compact_parts;
|
||||
|
||||
CREATE TABLE table_with_compact_parts
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
value1 String,
|
||||
value2 String,
|
||||
value3 String
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY date
|
||||
ORDER BY key
|
||||
settings index_granularity = 8,
|
||||
min_rows_for_wide_part = 10;
|
||||
|
||||
INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9);
|
||||
|
||||
SELECT value1 FROM table_with_compact_parts WHERE key = 1;
|
||||
|
||||
ALTER TABLE table_with_compact_parts RENAME COLUMN value1 to renamed_value1;
|
||||
|
||||
SELECT renamed_value1 FROM table_with_compact_parts WHERE key = 1;
|
||||
|
||||
SELECT * FROM table_with_compact_parts WHERE key = 1 FORMAT TSVWithNames;
|
||||
|
||||
ALTER TABLE table_with_compact_parts RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3;
|
||||
|
||||
SELECT renamed_value2, renamed_value3 FROM table_with_compact_parts WHERE key = 7;
|
||||
|
||||
SELECT * FROM table_with_compact_parts WHERE key = 7 FORMAT TSVWithNames;
|
||||
|
||||
DROP TABLE IF EXISTS table_with_compact_parts;
|
@ -0,0 +1,10 @@
|
||||
[8,9,10]
|
||||
['a','b','c']
|
||||
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
7 [8,9,10]
|
||||
7 ['a','b','c']
|
||||
[['7']]
|
||||
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `renamed_value1` Array(Array(LowCardinality(String)))) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
date key n.renamed_x n.renamed_y renamed_value1
|
||||
2019-10-01 7 [8,9,10] ['a','b','c'] [['7']]
|
42
tests/queries/0_stateless/01213_alter_rename_nested.sql
Normal file
42
tests/queries/0_stateless/01213_alter_rename_nested.sql
Normal file
@ -0,0 +1,42 @@
|
||||
DROP TABLE IF EXISTS table_for_rename_nested;
|
||||
CREATE TABLE table_for_rename_nested
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
n Nested(x UInt32, y String),
|
||||
value1 Array(Array(LowCardinality(String))) -- column with several files
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY date
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], [[toString(number)]] FROM numbers(10);
|
||||
|
||||
SELECT n.x FROM table_for_rename_nested WHERE key = 7;
|
||||
SELECT n.y FROM table_for_rename_nested WHERE key = 7;
|
||||
|
||||
SHOW CREATE TABLE table_for_rename_nested;
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x;
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y;
|
||||
|
||||
SHOW CREATE TABLE table_for_rename_nested;
|
||||
|
||||
SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7;
|
||||
SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7;
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36}
|
||||
|
||||
-- Currently not implemented
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48}
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO renamed_value1;
|
||||
|
||||
SELECT renamed_value1 FROM table_for_rename_nested WHERE key = 7;
|
||||
|
||||
SHOW CREATE TABLE table_for_rename_nested;
|
||||
|
||||
SELECT * FROM table_for_rename_nested WHERE key = 7 FORMAT TSVWithNames;
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename_nested;
|
||||
|
@ -0,0 +1 @@
|
||||
1 1
|
@ -0,0 +1,55 @@
|
||||
DROP TABLE IF EXISTS table_for_rename_pk;
|
||||
|
||||
CREATE TABLE table_for_rename_pk
|
||||
(
|
||||
date Date,
|
||||
key1 UInt64,
|
||||
key2 UInt64,
|
||||
key3 UInt64,
|
||||
value1 String,
|
||||
value2 String
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1')
|
||||
PARTITION BY date
|
||||
ORDER BY (key1, pow(key2, 2), key3);
|
||||
|
||||
INSERT INTO table_for_rename_pk SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9);
|
||||
|
||||
SELECT key1, value1 FROM table_for_rename_pk WHERE key1 = 1 AND key2 = 1 AND key3 = 1;
|
||||
|
||||
ALTER TABLE table_for_rename_pk RENAME COLUMN key1 TO renamed_key1; --{serverError 44}
|
||||
|
||||
ALTER TABLE table_for_rename_pk RENAME COLUMN key3 TO renamed_key3; --{serverError 44}
|
||||
|
||||
ALTER TABLE table_for_rename_pk RENAME COLUMN key2 TO renamed_key2; --{serverError 44}
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename_pk;
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename_with_primary_key;
|
||||
|
||||
CREATE TABLE table_for_rename_with_primary_key
|
||||
(
|
||||
date Date,
|
||||
key1 UInt64,
|
||||
key2 UInt64,
|
||||
key3 UInt64,
|
||||
value1 String,
|
||||
value2 String,
|
||||
INDEX idx (value1) TYPE set(1) GRANULARITY 1
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/table_for_rename_pk', '1')
|
||||
PARTITION BY date
|
||||
ORDER BY (key1, key2, key3)
|
||||
PRIMARY KEY (key1, key2);
|
||||
|
||||
INSERT INTO table_for_rename_with_primary_key SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9);
|
||||
|
||||
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key1 TO renamed_key1; --{serverError 44}
|
||||
|
||||
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2; --{serverError 44}
|
||||
|
||||
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError 44}
|
||||
|
||||
ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN value1 TO renamed_value1; --{serverError 44}
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename_with_primary_key;
|
@ -0,0 +1,17 @@
|
||||
date key value1 value2
|
||||
2019-10-02 1 1 Hello 1
|
||||
CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `value1` String, `value2` String DEFAULT concat(\'Hello \', value1), `value3` String ALIAS concat(\'Word \', value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
date key renamed_value1 value2
|
||||
2019-10-02 1 1 Hello 1
|
||||
CREATE TABLE default.table_rename_with_default (`date` Date, `key` UInt64, `renamed_value1` String, `value2` String DEFAULT concat(\'Hello \', renamed_value1), `value3` String ALIAS concat(\'Word \', renamed_value1)) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
Hello 1
|
||||
Word 1
|
||||
date1 date2 value1 value2
|
||||
2019-10-02 2018-10-02 1 1
|
||||
CREATE TABLE default.table_rename_with_ttl (`date1` Date, `date2` Date, `value1` String, `value2` String TTL date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192
|
||||
renamed_date1 date2 value1 value2
|
||||
2019-10-02 2018-10-02 1 1
|
||||
CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192
|
||||
renamed_date1 renamed_date2 value1 value2
|
||||
2019-10-02 2018-10-02 1 1
|
||||
CREATE TABLE default.table_rename_with_ttl (`renamed_date1` Date, `renamed_date2` Date, `value1` String, `value2` String TTL renamed_date1 + toIntervalMonth(10000)) ENGINE = ReplicatedMergeTree(\'/clickhouse/test/table_rename_with_ttl\', \'1\') ORDER BY tuple() TTL renamed_date2 + toIntervalMonth(10000) SETTINGS index_granularity = 8192
|
@ -0,0 +1,63 @@
|
||||
DROP TABLE IF EXISTS table_rename_with_default;
|
||||
|
||||
CREATE TABLE table_rename_with_default
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
value1 String,
|
||||
value2 String DEFAULT concat('Hello ', value1),
|
||||
value3 String ALIAS concat('Word ', value1)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY date
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO table_rename_with_default (date, key, value1) SELECT toDate('2019-10-01') + number % 3, number, toString(number) from numbers(9);
|
||||
|
||||
SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames;
|
||||
|
||||
SHOW CREATE TABLE table_rename_with_default;
|
||||
|
||||
ALTER TABLE table_rename_with_default RENAME COLUMN value1 TO renamed_value1;
|
||||
|
||||
SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames;
|
||||
|
||||
SHOW CREATE TABLE table_rename_with_default;
|
||||
|
||||
SELECT value2 FROM table_rename_with_default WHERE key = 1;
|
||||
SELECT value3 FROM table_rename_with_default WHERE key = 1;
|
||||
|
||||
DROP TABLE IF EXISTS table_rename_with_default;
|
||||
|
||||
DROP TABLE IF EXISTS table_rename_with_ttl;
|
||||
|
||||
CREATE TABLE table_rename_with_ttl
|
||||
(
|
||||
date1 Date,
|
||||
date2 Date,
|
||||
value1 String,
|
||||
value2 String TTL date1 + INTERVAL 10000 MONTH
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/test/table_rename_with_ttl', '1')
|
||||
ORDER BY tuple()
|
||||
TTL date2 + INTERVAL 10000 MONTH;
|
||||
|
||||
INSERT INTO table_rename_with_ttl SELECT toDate('2019-10-01') + number % 3, toDate('2018-10-01') + number % 3, toString(number), toString(number) from numbers(9);
|
||||
|
||||
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
|
||||
|
||||
SHOW CREATE TABLE table_rename_with_ttl;
|
||||
|
||||
ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1;
|
||||
|
||||
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
|
||||
|
||||
SHOW CREATE TABLE table_rename_with_ttl;
|
||||
|
||||
ALTER TABLE table_rename_with_ttl RENAME COLUMN date2 TO renamed_date2;
|
||||
|
||||
SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames;
|
||||
|
||||
SHOW CREATE TABLE table_rename_with_ttl;
|
||||
|
||||
DROP TABLE IF EXISTS table_rename_with_ttl;
|
@ -0,0 +1,6 @@
|
||||
[8,9,10]
|
||||
['a','b','c']
|
||||
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.x` Array(UInt32), `n.y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.table_for_rename_nested (`date` Date, `key` UInt64, `n.renamed_x` Array(UInt32), `n.renamed_y` Array(String), `value1` String) ENGINE = MergeTree() PARTITION BY date ORDER BY key SETTINGS index_granularity = 8192
|
||||
7 [8,9,10]
|
||||
7 ['a','b','c']
|
@ -0,0 +1,38 @@
|
||||
DROP TABLE IF EXISTS table_for_rename_nested;
|
||||
CREATE TABLE table_for_rename_nested
|
||||
(
|
||||
date Date,
|
||||
key UInt64,
|
||||
n Nested(x UInt32, y String),
|
||||
value1 String
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY date
|
||||
ORDER BY key;
|
||||
|
||||
INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10);
|
||||
|
||||
SELECT n.x FROM table_for_rename_nested WHERE key = 7;
|
||||
SELECT n.y FROM table_for_rename_nested WHERE key = 7;
|
||||
|
||||
SHOW CREATE TABLE table_for_rename_nested;
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x;
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y;
|
||||
|
||||
SHOW CREATE TABLE table_for_rename_nested;
|
||||
|
||||
SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7;
|
||||
SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7;
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError 36}
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO q.renamed_x; --{serverError 36}
|
||||
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO q.renamed_x; --{serverError 36}
|
||||
|
||||
-- Currently not implemented
|
||||
ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError 48}
|
||||
|
||||
DROP TABLE IF EXISTS table_for_rename_nested;
|
||||
|
Loading…
Reference in New Issue
Block a user