This commit is contained in:
Andrey Mironov 2014-11-12 13:37:47 +03:00
commit 731be3e7cf
91 changed files with 2668 additions and 774 deletions

View File

@ -20,6 +20,8 @@ namespace DB
* Позволяет вставлять, удалять столбцы в любом порядке, менять порядок столбцов.
*/
class Context;
class Block
{
public:
@ -54,6 +56,7 @@ public:
void insert(size_t position, const ColumnWithNameAndType & elem);
/// вставить столбец в конец
void insert(const ColumnWithNameAndType & elem);
void insertDefault(const String & name, const DataTypePtr & type);
/// вставить столбец в конец, если столбца с таким именем ещё нет
void insertUnique(const ColumnWithNameAndType & elem);
/// удалить столбец в заданной позиции
@ -61,7 +64,7 @@ public:
/// удалить столбец с заданным именем
void erase(const String & name);
/// Добавляет в блок недостающие столбцы со значениями по-умолчанию
void addDefaults(NamesAndTypesListPtr required_columns);
void addDefaults(const NamesAndTypesList & required_columns);
ColumnWithNameAndType & getByPosition(size_t position);
const ColumnWithNameAndType & getByPosition(size_t position) const;

View File

@ -3,8 +3,10 @@
#include <Poco/SharedPtr.h>
#include <DB/DataStreams/IProfilingBlockInputStream.h>
#include <DB/Interpreters/evaluateMissingDefaults.h>
#include <DB/Columns/ColumnConst.h>
#include <DB/Storages/ColumnDefault.h>
namespace DB
@ -19,12 +21,20 @@ class AddingDefaultBlockInputStream : public IProfilingBlockInputStream
public:
AddingDefaultBlockInputStream(
BlockInputStreamPtr input_,
NamesAndTypesListPtr required_columns_)
: required_columns(required_columns_)
NamesAndTypesListPtr required_columns_,
const ColumnDefaults & column_defaults_,
const Context & context_)
: required_columns(required_columns_),
column_defaults(column_defaults_), context(context_)
{
children.push_back(input_);
}
AddingDefaultBlockInputStream(BlockInputStreamPtr input_, NamesAndTypesListPtr required_columns_, const Context & context_)
: AddingDefaultBlockInputStream{input_, required_columns, {}, context}
{
}
String getName() const override { return "AddingDefaultBlockInputStream"; }
String getID() const override
@ -45,12 +55,15 @@ protected:
Block res = children.back()->read();
if (!res)
return res;
res.addDefaults(required_columns);
evaluateMissingDefaults(res, *required_columns, column_defaults, context);
res.addDefaults(*required_columns);
return res;
}
private:
NamesAndTypesListPtr required_columns;
const ColumnDefaults & column_defaults;
Context context;
};
}

View File

@ -5,6 +5,9 @@
#include <DB/DataStreams/IBlockOutputStream.h>
#include <DB/Columns/ColumnConst.h>
#include <DB/Storages/ColumnDefault.h>
#include <DB/Interpreters/Context.h>
#include <DB/Interpreters/evaluateMissingDefaults.h>
namespace DB
@ -19,15 +22,25 @@ class AddingDefaultBlockOutputStream : public IBlockOutputStream
public:
AddingDefaultBlockOutputStream(
BlockOutputStreamPtr output_,
NamesAndTypesListPtr required_columns_)
: output(output_), required_columns(required_columns_)
NamesAndTypesListPtr required_columns_,
const ColumnDefaults & column_defaults_,
const Context & context_)
: output(output_), required_columns(required_columns_),
column_defaults(column_defaults_), context(context_)
{
}
AddingDefaultBlockOutputStream(BlockOutputStreamPtr output_, NamesAndTypesListPtr required_columns_, const Context & context_)
: AddingDefaultBlockOutputStream{output_, required_columns_, {}, context_}
{
}
void write(const Block & block) override
{
Block res = block;
res.addDefaults(required_columns);
evaluateMissingDefaults(res, *required_columns, column_defaults, context);
res.addDefaults(*required_columns);
output->write(res);
}
@ -39,6 +52,8 @@ public:
private:
BlockOutputStreamPtr output;
NamesAndTypesListPtr required_columns;
const ColumnDefaults & column_defaults;
Context context;
};

View File

@ -0,0 +1,37 @@
#pragma once
#include <DB/Columns/ColumnConst.h>
#include <DB/DataStreams/IBlockOutputStream.h>
#include <statdaemons/ext/range.hpp>
namespace DB
{
/** Преобразует столбцы-константы в полноценные столбцы ("материализует" их).
*/
class MaterializingBlockOutputStream : public IBlockOutputStream
{
public:
MaterializingBlockOutputStream(const BlockOutputStreamPtr & output) : output{output}
{}
void write(const Block & original_block) override
{
/// copy block to get rid of const
auto block = original_block;
for (const auto i : ext::range(0, block.columns()))
{
ColumnPtr & col = block.getByPosition(i).column;
if (col->isConst())
col = dynamic_cast<IColumnConst &>(*col).convertToFullColumn();
}
output->write(block);
}
BlockOutputStreamPtr output;
};
}

View File

@ -0,0 +1,39 @@
#pragma once
#include <DB/DataStreams/IBlockOutputStream.h>
namespace DB
{
/// Throws exception on encountering prohibited column in block
class ProhibitColumnsBlockOutputStream : public IBlockOutputStream
{
public:
ProhibitColumnsBlockOutputStream(const BlockOutputStreamPtr & output, const NamesAndTypesList & columns)
: output{output}, columns{columns}
{
}
private:
void write(const Block & block) override
{
for (const auto & column : columns)
if (block.has(column.name))
throw Exception{"Cannot insert column " + column.name, ErrorCodes::ILLEGAL_COLUMN};
output->write(block);
}
void flush() override { output->flush(); }
void writePrefix() override { output->writePrefix(); }
void writeSuffix() override { output->writeSuffix(); }
BlockOutputStreamPtr output;
NamesAndTypesList columns;
};
}

View File

@ -124,7 +124,8 @@ public:
return new DataTypeString;
}
/// Выполнить функцию над блоком.
/** Выполнить функцию над блоком. convertToFullColumn вызывается для того, чтобы в случае
* распределенного выполнения запроса каждый сервер возвращал свое имя хоста. */
void execute(Block & block, const ColumnNumbers & arguments, size_t result)
{
block.getByPosition(result).column = ColumnConstString(

View File

@ -210,9 +210,11 @@ private:
*/
void collectJoinedColumns(NameSet & joined_columns, NamesAndTypesList & joined_columns_name_type);
void addStorageAliases();
/** Создать словарь алиасов.
*/
void createAliasesDict(ASTPtr & ast, int ignore_levels = 0);
void addASTAliases(ASTPtr & ast, int ignore_levels = 0);
/** Для узлов-звёздочек - раскрыть их в список всех столбцов.
* Для узлов-литералов - подставить алиасы.

View File

@ -22,7 +22,13 @@ public:
/** Изменяет список столбцов в метаданных таблицы на диске. Нужно вызывать под TableStructureLock соответствующей таблицы.
*/
static void updateMetadata(const String & database, const String & table, const NamesAndTypesList & columns, Context & context);
static void updateMetadata(const String & database,
const String & table,
const NamesAndTypesList & columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults,
Context & context);
private:
struct PartitionCommand
{
@ -62,10 +68,11 @@ private:
typedef std::vector<PartitionCommand> PartitionCommands;
ASTPtr query_ptr;
Context context;
static void parseAlter(const ASTAlterQuery::ParameterContainer & params, const DataTypeFactory & data_type_factory,
AlterCommands & out_alter_commands, PartitionCommands & out_partition_commands);
};
}

View File

@ -2,6 +2,7 @@
#include <DB/Storages/IStorage.h>
#include <DB/Interpreters/Context.h>
#include <DB/Storages/ColumnDefault.h>
namespace DB
@ -14,7 +15,7 @@ class InterpreterCreateQuery
{
public:
InterpreterCreateQuery(ASTPtr query_ptr_, Context & context_);
/** В случае таблицы: добавляет созданную таблицу в контекст, а также возвращает её.
* В случае БД: добавляет созданную БД в контекст и возвращает NULL.
* assume_metadata_exists - не проверять наличие файла с метаданными и не создавать его
@ -22,12 +23,22 @@ public:
*/
StoragePtr execute(bool assume_metadata_exists = false);
/** AST в список столбцов с типами и обратно. Столбцы типа Nested развернуты в список настоящих столбцов.
*/
static NamesAndTypesList parseColumns(ASTPtr expression_list, const DataTypeFactory & data_type_factory);
/// Список столбцов с типами в AST.
static ASTPtr formatColumns(const NamesAndTypesList & columns);
static ASTPtr formatColumns(
NamesAndTypesList columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults);
private:
/// AST в список столбцов с типами. Столбцы типа Nested развернуты в список настоящих столбцов.
using ColumnsAndDefaults = std::pair<NamesAndTypesList, ColumnDefaults>;
ColumnsAndDefaults parseColumns(ASTPtr expression_list);
/// removes columns from the columns list and return them in a separate list
static NamesAndTypesList removeAndReturnColumns(ColumnsAndDefaults & columns_and_defaults, ColumnDefaultType type);
ASTPtr query_ptr;
Context context;
};

View File

@ -10,6 +10,8 @@
#include <DB/DataTypes/DataTypesNumberFixed.h>
#include <DB/DataTypes/DataTypeString.h>
#include <DB/Columns/ColumnString.h>
#include <DB/Parsers/formatAST.h>
namespace DB
@ -59,13 +61,18 @@ private:
col.name = "name";
col.type = new DataTypeString;
col.column = col.type->createColumn();
block.insert(col);
col.name = "type";
block.insert(col);
col.name = "default_type";
block.insert(col);
col.name = "default_expression";
block.insert(col);
return block;
}
@ -74,27 +81,42 @@ private:
const ASTDescribeQuery & ast = typeid_cast<const ASTDescribeQuery &>(*query_ptr);
NamesAndTypesList columns;
ColumnDefaults column_defaults;
{
StoragePtr table = context.getTable(ast.database, ast.table);
auto table_lock = table->lockStructure(false);
columns = table->getColumnsList();
columns.insert(std::end(columns), std::begin(table->alias_columns), std::end(table->alias_columns));
column_defaults = table->column_defaults;
}
ColumnString * name_column = new ColumnString;
ColumnString * type_column = new ColumnString;
ColumnWithNameAndType name_column{new ColumnString, new DataTypeString, "name"};
ColumnWithNameAndType type_column{new ColumnString, new DataTypeString, "type" };
ColumnWithNameAndType default_type_column{new ColumnString, new DataTypeString, "default_type" };
ColumnWithNameAndType default_expression_column{new ColumnString, new DataTypeString, "default_expression" };;
Block block;
block.insert(ColumnWithNameAndType(name_column, new DataTypeString, "name"));
block.insert(ColumnWithNameAndType(type_column, new DataTypeString, "type"));
for (NamesAndTypesList::iterator it = columns.begin(); it != columns.end(); ++it)
for (const auto column : columns)
{
name_column->insert(it->name);
type_column->insert(it->type->getName());
name_column.column->insert(column.name);
type_column.column->insert(column.type->getName());
const auto it = column_defaults.find(column.name);
if (it == std::end(column_defaults))
{
default_type_column.column->insertDefault();
default_expression_column.column->insertDefault();
}
else
{
default_type_column.column->insert(toString(it->second.type));
default_expression_column.column->insert(queryToString(it->second.expression));
}
}
return new OneBlockInputStream(block);
return new OneBlockInputStream{
{name_column, type_column, default_type_column, default_expression_column}
};
}
};

View File

@ -0,0 +1,49 @@
#pragma once
#include <DB/Core/Block.h>
#include <DB/Storages/ColumnDefault.h>
#include <DB/Interpreters/ExpressionAnalyzer.h>
#include <utility>
namespace DB
{
inline void evaluateMissingDefaults(Block & block,
const NamesAndTypesList & required_columns,
const ColumnDefaults & column_defaults,
const Context & context)
{
if (column_defaults.empty())
return;
ASTPtr default_expr_list{stdext::make_unique<ASTExpressionList>().release()};
for (const auto & column : required_columns)
{
if (block.has(column.name))
continue;
const auto it = column_defaults.find(column.name);
/// expressions must be cloned to prevent modification by the ExpressionAnalyzer
if (it != column_defaults.end())
default_expr_list->children.emplace_back(
setAlias(it->second.expression->clone(), it->first));
}
/// nothing to evaluate
if (default_expr_list->children.empty())
return;
/** ExpressionAnalyzer eliminates "unused" columns, in order to ensure their safety
* we are going to operate on a copy instead of the original block */
Block copy_block{block};
/// evaluate default values for defaulted columns
ExpressionAnalyzer{default_expr_list, context, required_columns}.getActions(true)->execute(copy_block);
/// move evaluated columns to the original block
for (auto & column_name_type : copy_block.getColumns())
block.insert(std::move(column_name_type));
}
}

View File

@ -37,7 +37,7 @@ public:
* В запросе DROP это поле не используется
* В запросе MODIFY здесь хранится имя столбца и новый тип
*/
ASTPtr name_type;
ASTPtr col_decl;
/** В запросе ADD COLUMN здесь опционально хранится имя столбца, следующее после AFTER
* В запросе DROP здесь хранится имя столбца для удаления
@ -60,7 +60,7 @@ public:
void clone(Parameters & p) const
{
p = *this;
p.name_type = name_type->clone();
p.col_decl = col_decl->clone();
p.column = column->clone();
p.partition = partition->clone();
}
@ -74,8 +74,8 @@ public:
void addParameters(const Parameters & params)
{
parameters.push_back(params);
if (params.name_type)
children.push_back(params.name_type);
if (params.col_decl)
children.push_back(params.col_decl);
if (params.column)
children.push_back(params.column);
if (params.partition)

View File

@ -0,0 +1,43 @@
#pragma once
#include <DB/Parsers/IAST.h>
namespace DB
{
/** Name, type, default-specifier, default-expression.
* The type is optional if default-expression is specified.
*/
class ASTColumnDeclaration : public IAST
{
public:
String name;
ASTPtr type;
String default_specifier;
ASTPtr default_expression;
ASTColumnDeclaration() = default;
ASTColumnDeclaration(StringRange range) : IAST{range} {}
String getID() const { return "ColumnDeclaration_" + name; }
ASTPtr clone() const
{
const auto res = new ASTColumnDeclaration{*this};
res->children.clear();
if (type) {
res->type = type->clone();
res->children.push_back(res->type);
}
if (default_expression) {
res->default_expression = default_expression->clone();
res->children.push_back(res->default_expression);
}
return res;
}
};
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <DB/Parsers/ASTWithAlias.h>
#include <DB/Parsers/ASTExpressionList.h>
#include <DB/Functions/IFunction.h>
#include <DB/AggregateFunctions/IAggregateFunction.h>
#include <DB/IO/WriteBufferFromString.h>
@ -81,4 +82,37 @@ public:
}
};
template <typename... Args>
ASTPtr makeASTFunction(const String & name, Args &&... args)
{
const auto function = new ASTFunction{};
ASTPtr result{function};
function->name = name;
function->arguments = new ASTExpressionList{};
function->children.push_back(function->arguments);
function->arguments->children = { std::forward<Args>(args)... };
return result;
}
template <typename... Args>
ASTPtr makeASTFunction(const String & name, const StringRange & function_range,
const StringRange & arguments_range, Args &&... args)
{
const auto function = new ASTFunction{function_range};
ASTPtr result{function};
function->name = name;
function->arguments = new ASTExpressionList{arguments_range};
function->children.push_back(function->arguments);
function->arguments->children = { std::forward<Args>(args)... };
return result;
}
}

View File

@ -21,4 +21,12 @@ public:
void setAlias(const String & to) override { alias = to; }
};
/// helper for setting aliases and chaining result to other functions
inline ASTPtr setAlias(ASTPtr ast, const String & alias) {
dynamic_cast<ASTWithAlias &>(*ast).alias = alias;
return ast;
};
}

View File

@ -2,9 +2,12 @@
#include <DB/Parsers/IParserBase.h>
#include <DB/Parsers/ExpressionElementParsers.h>
#include <DB/Parsers/ExpressionListParsers.h>
#include <DB/Parsers/ASTNameTypePair.h>
#include <DB/Parsers/ASTColumnDeclaration.h>
#include <DB/Parsers/ASTIdentifier.h>
#include <DB/Parsers/CommonParsers.h>
#include <Poco/String.h>
namespace DB
@ -93,6 +96,101 @@ protected:
};
template <class NameParser>
class IParserColumnDeclaration : public IParserBase
{
protected:
const char * getName() const { return "column declaration"; }
bool parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & expected);
};
typedef IParserColumnDeclaration<ParserIdentifier> ParserColumnDeclaration;
typedef IParserColumnDeclaration<ParserCompoundIdentifier> ParserCompoundColumnDeclaration;
template <class NameParser>
bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & expected)
{
NameParser name_parser;
ParserIdentifierWithOptionalParameters type_parser;
ParserWhiteSpaceOrComments ws;
ParserString s_default{"DEFAULT", true, true};
ParserString s_materialized{"MATERIALIZED", true, true};
ParserString s_alias{"ALIAS", true, true};
ParserTernaryOperatorExpression expr_parser;
const auto begin = pos;
const auto reset_pos_and_return = [&pos, begin] {
pos = begin;
return false;
};
/// mandatory column name
ASTPtr name;
if (!name_parser.parse(pos, end, name, expected))
return false;
ws.ignore(pos, end, expected);
/** column name should be followed by type name if it
* is not immediately followed by {DEFAULT, MATERIALIZED, ALIAS} */
ASTPtr type;
const auto fallback_pos = pos;
if (!s_default.check(pos, end, expected) &&
!s_materialized.check(pos, end, expected) &&
!s_alias.check(pos, end, expected))
{
if (type_parser.parse(pos, end, type, expected))
ws.ignore(pos, end, expected);
}
else
pos = fallback_pos;
/// parse {DEFAULT, MATERIALIZED, ALIAS}
String default_specifier;
ASTPtr default_expression;
const auto pos_before_specifier = pos;
if (s_default.ignore(pos, end, expected) ||
s_materialized.ignore(pos, end, expected) ||
s_alias.ignore(pos, end, expected))
{
default_specifier = Poco::toUpper(std::string{pos_before_specifier, pos});
/// should be followed by an expression
ws.ignore(pos, end, expected);
if (!expr_parser.parse(pos, end, default_expression, expected))
return reset_pos_and_return();
}
else if (!type)
return reset_pos_and_return(); /// reject sole column name without type
const auto column_declaration = new ASTColumnDeclaration{StringRange{begin, pos}};
node = column_declaration;
column_declaration->name = typeid_cast<ASTIdentifier &>(*name).name;
if (type)
{
column_declaration->type = type;
column_declaration->children.push_back(std::move(type));
}
if (default_expression)
{
column_declaration->default_specifier = default_specifier;
column_declaration->default_expression = default_expression;
column_declaration->children.push_back(std::move(default_expression));
}
return true;
}
class ParserColumnDeclarationList : public IParserBase
{
protected:
const char * getName() const { return "column declaration list"; }
bool parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & expected);
};
/** ENGINE = name. */
class ParserEngine : public IParserBase
{

View File

@ -18,6 +18,7 @@
#include <DB/Parsers/ASTIdentifier.h>
#include <DB/Parsers/ASTLiteral.h>
#include <DB/Parsers/ASTNameTypePair.h>
#include <DB/Parsers/ASTColumnDeclaration.h>
#include <DB/Parsers/ASTAsterisk.h>
#include <DB/Parsers/ASTOrderByElement.h>
#include <DB/Parsers/ASTSubquery.h>
@ -54,6 +55,7 @@ void formatAST(const ASTFunction & ast, std::ostream & s, size_t indent = 0, b
void formatAST(const ASTIdentifier & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTLiteral & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTNameTypePair & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTColumnDeclaration & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTAsterisk & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTOrderByElement & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);
void formatAST(const ASTSubquery & ast, std::ostream & s, size_t indent = 0, bool hilite = true, bool one_line = false, bool need_parens = false);

View File

@ -0,0 +1,8 @@
#pragma once
#include <DB/Parsers/IAST.h>
namespace DB
{
String queryToString(const ASTPtr & query);
}

View File

@ -1,7 +1,7 @@
#pragma once
#include <DB/Core/NamesAndTypes.h>
#include <DB/DataTypes/DataTypeNested.h>
#include <DB/DataTypes/DataTypeArray.h>
#include <DB/Storages/ColumnDefault.h>
namespace DB
{
@ -23,6 +23,9 @@ struct AlterCommand
/// Для ADD и MODIFY - новый тип столбца.
DataTypePtr data_type;
ColumnDefaultType default_type{};
ASTPtr default_expression{};
/// Для ADD - после какого столбца добавить новый. Если пустая строка, добавить в конец. Добавить в начало сейчас нельзя.
String after_column;
@ -34,87 +37,32 @@ struct AlterCommand
return (name_with_dot == name_type.name.substr(0, name_without_dot.length() + 1) || name_without_dot == name_type.name);
}
void apply(NamesAndTypesList & columns) const
{
if (type == ADD)
{
if (std::count_if(columns.begin(), columns.end(), std::bind(namesEqual, column_name, std::placeholders::_1)))
throw Exception("Cannot add column " + column_name + ": column with this name already exisits.",
DB::ErrorCodes::ILLEGAL_COLUMN);
void apply(NamesAndTypesList & columns,
NamesAndTypesList & materialized_columns,
NamesAndTypesList & alias_columns,
ColumnDefaults & column_defaults) const;
if (DataTypeNested::extractNestedTableName(column_name) != column_name &&
!typeid_cast<const DataTypeArray *>(&*data_type))
throw Exception("Can't add nested column " + column_name + " of non-array type " + data_type->getName(),
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
NamesAndTypesList::iterator insert_it = columns.end();
if (!after_column.empty())
{
/// Пытаемся найти первую с конца колонку с именем column_name или с именем, начинающимся с column_name и ".".
/// Например "fruits.bananas"
/// одинаковыми считаются имена, если они совпадают целиком или name_without_dot совпадает с частью имени до точки
NamesAndTypesList::reverse_iterator reverse_insert_it = std::find_if(columns.rbegin(), columns.rend(),
std::bind(namesEqual, after_column, std::placeholders::_1));
if (reverse_insert_it == columns.rend())
throw Exception("Wrong column name. Cannot find column " + column_name + " to insert after",
DB::ErrorCodes::ILLEGAL_COLUMN);
else
{
/// base возвращает итератор, уже смещенный на один элемент вправо
insert_it = reverse_insert_it.base();
}
}
columns.insert(insert_it, NameAndTypePair(column_name, data_type));
/// Медленно, так как каждый раз копируется список
columns = *DataTypeNested::expandNestedColumns(columns);
}
else if (type == DROP)
{
bool is_first = true;
NamesAndTypesList::iterator column_it;
do
{
column_it = std::find_if(columns.begin(), columns.end(), std::bind(namesEqual, column_name, std::placeholders::_1));
if (column_it == columns.end())
{
if (is_first)
throw Exception("Wrong column name. Cannot find column " + column_name + " to drop",
DB::ErrorCodes::ILLEGAL_COLUMN);
}
else
columns.erase(column_it);
is_first = false;
}
while (column_it != columns.end());
}
else if (type == MODIFY)
{
NamesAndTypesList::iterator column_it = std::find_if(columns.begin(), columns.end(),
std::bind(namesEqual, column_name, std::placeholders::_1) );
if (column_it == columns.end())
throw Exception("Wrong column name. Cannot find column " + column_name + " to modify.",
DB::ErrorCodes::ILLEGAL_COLUMN);
column_it->type = data_type;
}
else
throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR);
}
AlterCommand() = default;
AlterCommand(const Type type, const String & column_name, const DataTypePtr & data_type,
const ColumnDefaultType default_type, const ASTPtr & default_expression,
const String & after_column = String{})
: type{type}, column_name{column_name}, data_type{data_type}, default_type{default_type},
default_expression{default_expression}, after_column{after_column}
{}
};
class IStorage;
class Context;
class AlterCommands : public std::vector<AlterCommand>
{
public:
void apply(NamesAndTypesList & columns) const
{
NamesAndTypesList new_columns = columns;
for (const AlterCommand & command : *this)
command.apply(new_columns);
columns = new_columns;
}
void apply(NamesAndTypesList & columns,
NamesAndTypesList & materialized_columns,
NamesAndTypesList & alias_columns,
ColumnDefaults & column_defaults) const;
void validate(IStorage * table, const Context & context);
};
}

View File

@ -0,0 +1,178 @@
#pragma once
#include <DB/Parsers/IAST.h>
#include <DB/Parsers/queryToString.h>
#include <DB/Parsers/ExpressionListParsers.h>
#include <DB/IO/WriteBufferFromString.h>
#include <unordered_map>
namespace DB
{
enum struct ColumnDefaultType
{
Default,
Materialized,
Alias
};
}
namespace std
{
template<> struct hash<DB::ColumnDefaultType>
{
size_t operator()(const DB::ColumnDefaultType type) const
{
return hash<int>{}(static_cast<int>(type));
}
};
}
namespace DB
{
inline ColumnDefaultType columnDefaultTypeFromString(const String & str)
{
static const std::unordered_map<String, ColumnDefaultType> map{
{ "DEFAULT", ColumnDefaultType::Default },
{ "MATERIALIZED", ColumnDefaultType::Materialized },
{ "ALIAS", ColumnDefaultType::Alias }
};
const auto it = map.find(str);
return it != std::end(map) ? it->second : throw Exception{"Unknown column default specifier: " + str};
}
inline String toString(const ColumnDefaultType type)
{
static const std::unordered_map<ColumnDefaultType, String> map{
{ ColumnDefaultType::Default, "DEFAULT" },
{ ColumnDefaultType::Materialized, "MATERIALIZED" },
{ ColumnDefaultType::Alias, "ALIAS" }
};
const auto it = map.find(type);
return it != std::end(map) ? it->second : throw Exception{"Invalid ColumnDefaultType"};
}
struct ColumnDefault
{
ColumnDefaultType type;
ASTPtr expression;
};
inline bool operator==(const ColumnDefault & lhs, const ColumnDefault & rhs)
{
return lhs.type == rhs.type && queryToString(lhs.expression) == queryToString(rhs.expression);
}
using ColumnDefaults = std::unordered_map<String, ColumnDefault>;
template <bool store>
struct ColumnsDescription
{
template <typename T> using by_value_or_cref = typename std::conditional<store, T, const T &>::type;
by_value_or_cref<NamesAndTypesList> columns;
by_value_or_cref<NamesAndTypesList> materialized;
by_value_or_cref<NamesAndTypesList> alias;
by_value_or_cref<ColumnDefaults> defaults;
String toString() const
{
String s;
WriteBufferFromString buf{s};
writeString("columns format version: 1\n", buf);
writeText(columns.size() + materialized.size() + alias.size(), buf);
writeString(" columns:\n", buf);
const auto write_columns = [this, &buf] (const NamesAndTypesList & columns) {
for (const auto & column : columns)
{
const auto it = defaults.find(column.name);
writeBackQuotedString(column.name, buf);
writeChar(' ', buf);
writeString(column.type->getName(), buf);
if (it == std::end(defaults))
{
writeChar('\n', buf);
continue;
}
else
writeChar('\t', buf);
writeString(DB::toString(it->second.type), buf);
writeChar('\t', buf);
writeString(queryToString(it->second.expression), buf);
writeChar('\n', buf);
}
};
write_columns(columns);
write_columns(materialized);
write_columns(alias);
return s;
}
static ColumnsDescription parse(const String & str, const DataTypeFactory & data_type_factory)
{
ReadBufferFromString buf{str};
assertString("columns format version: 1\n", buf);
size_t count{};
readText(count, buf);
assertString(" columns:\n", buf);
ParserTernaryOperatorExpression expr_parser;
ColumnsDescription result{};
for (size_t i = 0; i < count; ++i)
{
String column_name;
readBackQuotedString(column_name, buf);
assertString(" ", buf);
String type_name;
readString(type_name, buf);
auto type = data_type_factory.get(type_name);
if (*buf.position() == '\n')
{
assertString("\n", buf);
result.columns.emplace_back(column_name, std::move(type));
continue;
}
assertString("\t", buf);
String default_type_str;
readString(default_type_str, buf);
const auto default_type = columnDefaultTypeFromString(default_type_str);
assertString("\t", buf);
String default_expr_str;
readText(default_expr_str, buf);
assertString("\n", buf);
ASTPtr default_expr;
Expected expected{};
auto begin = default_expr_str.data();
const auto end = begin + default_expr_str.size();
if (!expr_parser.parse(begin, end, default_expr, expected))
throw Exception{"Could not parse default expression", DB::ErrorCodes::CANNOT_PARSE_TEXT};
if (ColumnDefaultType::Default == default_type)
result.columns.emplace_back(column_name, std::move(type));
else if (ColumnDefaultType::Materialized == default_type)
result.materialized.emplace_back(column_name, std::move(type));
else if (ColumnDefaultType::Alias == default_type)
result.alias.emplace_back(column_name, std::move(type));
result.defaults.emplace(column_name, ColumnDefault{default_type, default_expr});
}
assertEOF(buf);
return result;
}
};
}

View File

@ -1,7 +1,8 @@
#pragma once
#include <DB/Storages/StorageDistributed.h>
#include <DB/Storages/Distributed/queryToString.h>
#include <DB/Parsers/formatAST.h>
#include <DB/IO/WriteBufferFromFile.h>
#include <DB/IO/CompressedWriteBuffer.h>

View File

@ -1,14 +0,0 @@
#pragma once
#include <DB/Parsers/formatAST.h>
namespace DB
{
inline std::string queryToString(const ASTPtr & query)
{
std::ostringstream s;
formatAST(*query, s, 0, false, true);
return s.str();
}
}

View File

@ -14,6 +14,7 @@
#include <DB/Storages/AlterCommands.h>
#include <Poco/File.h>
#include <Poco/RWLock.h>
#include <statdaemons/stdext.h>
namespace DB
@ -107,8 +108,8 @@ public:
return res;
}
typedef Poco::SharedPtr<Poco::ScopedWriteRWLock> TableStructureWriteLockPtr;
typedef Poco::SharedPtr<Poco::ScopedWriteRWLock> TableDataWriteLockPtr;
typedef std::unique_ptr<Poco::ScopedWriteRWLock> TableStructureWriteLockPtr;
typedef std::unique_ptr<Poco::ScopedWriteRWLock> TableDataWriteLockPtr;
typedef std::pair<TableDataWriteLockPtr, TableStructureWriteLockPtr> TableFullWriteLockPtr;
/** Не дает читать структуру таблицы. Берется для ALTER, RENAME и DROP.
@ -124,7 +125,7 @@ public:
*/
TableDataWriteLockPtr lockDataForAlter()
{
TableDataWriteLockPtr res = new Poco::ScopedWriteRWLock(data_lock);
auto res = stdext::make_unique<Poco::ScopedWriteRWLock>(data_lock);
if (is_dropped)
throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED);
return res;
@ -132,7 +133,7 @@ public:
TableStructureWriteLockPtr lockStructureForAlter()
{
TableStructureWriteLockPtr res = new Poco::ScopedWriteRWLock(structure_lock);
auto res = stdext::make_unique<Poco::ScopedWriteRWLock>(structure_lock);
if (is_dropped)
throw Exception("Table is dropped", ErrorCodes::TABLE_IS_DROPPED);
return res;
@ -264,7 +265,7 @@ public:
return res;
}
bool is_dropped;
bool is_dropped{false};
/// Поддерживается ли индекс в секции IN
virtual bool supportsIndexForIn() const { return false; };
@ -273,7 +274,7 @@ public:
virtual bool checkData() const { throw DB::Exception("Check query is not supported for " + getName() + " storage"); }
protected:
IStorage() : is_dropped(false) {}
using ITableDeclaration::ITableDeclaration;
private:
std::weak_ptr<IStorage> this_ptr;

View File

@ -4,6 +4,7 @@
#include <DB/Core/NamesAndTypes.h>
#include <DB/Core/Exception.h>
#include <DB/Core/Block.h>
#include <DB/Storages/ColumnDefault.h>
namespace DB
{
@ -22,7 +23,8 @@ public:
/** Получить список имён и типов столбцов таблицы, только невиртуальные.
*/
virtual const NamesAndTypesList & getColumnsList() const = 0;
NamesAndTypesList getColumnsList() const;
const NamesAndTypesList & getColumnsListNonMaterialized() const { return getColumnsListImpl(); }
/** Получить список имён столбцов таблицы, только невиртуальные.
*/
@ -36,6 +38,9 @@ public:
*/
virtual bool hasRealColumn(const String & column_name) const;
NameAndTypePair getMaterializedColumn(const String & column_name) const;
bool hasMaterializedColumn(const String & column_name) const;
/** Получить описание любого столбца по его имени.
*/
virtual NameAndTypePair getColumn(const String & column_name) const;
@ -49,6 +54,7 @@ public:
/** То же самое, но в виде блока-образца.
*/
Block getSampleBlock() const;
Block getSampleBlockNonMaterialized() const;
/** Проверить, что все запрошенные имена есть в таблице и заданы корректно.
* (список имён не пустой и имена не повторяются)
@ -69,7 +75,25 @@ public:
*/
void check(const Block & block, bool need_all = false) const;
virtual ~ITableDeclaration() {}
virtual ~ITableDeclaration() = default;
ITableDeclaration() = default;
ITableDeclaration(
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults)
: materialized_columns{materialized_columns},
alias_columns{alias_columns},
column_defaults{column_defaults}
{}
NamesAndTypesList materialized_columns{};
NamesAndTypesList alias_columns{};
ColumnDefaults column_defaults{};
private:
virtual const NamesAndTypesList & getColumnsListImpl() const = 0;
};
}

View File

@ -107,6 +107,47 @@ protected:
/// Будем вызывать progressImpl самостоятельно.
void progress(const Progress & value) override {}
void injectRequiredColumns(NamesAndTypesList & columns) const {
std::set<NameAndTypePair> required_columns;
auto modified = false;
for (auto it = std::begin(columns); it != std::end(columns);)
{
required_columns.emplace(*it);
if (!owned_data_part->hasColumnFiles(it->name))
{
const auto default_it = storage.column_defaults.find(it->name);
if (default_it != std::end(storage.column_defaults))
{
IdentifierNameSet identifiers;
default_it->second.expression->collectIdentifierNames(identifiers);
for (const auto & identifier : identifiers)
{
if (storage.hasColumn(identifier))
{
NameAndTypePair column{identifier, storage.getDataTypeByName(identifier)};
if (required_columns.count(column) == 0)
{
it = columns.emplace(++it, std::move(column));
modified = true;
}
}
}
if (modified)
continue;
}
}
++it;
}
if (modified)
columns = NamesAndTypesList{std::begin(required_columns), std::end(required_columns)};
}
Block readImpl()
{
Block res;
@ -120,6 +161,9 @@ protected:
/// NOTE В конструкторе это делать не получилось бы, потому что тогда ещё не установлен progress_callback.
progressImpl(Progress(0, 0, total_rows));
injectRequiredColumns(columns);
injectRequiredColumns(pre_columns);
UncompressedCache * uncompressed_cache = use_uncompressed_cache ? storage.context.getUncompressedCache() : NULL;
reader.reset(new MergeTreeReader(path, owned_data_part->name, columns, uncompressed_cache, storage, all_mark_ranges));
if (prewhere_actions)

View File

@ -585,6 +585,9 @@ public:
* require_part_metadata - обязательно ли в директории с куском должны быть checksums.txt и columns.txt
*/
MergeTreeData( const String & full_path_, NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
@ -613,7 +616,7 @@ public:
throw Exception("Logical error: calling method getTableName of not a table.", ErrorCodes::LOGICAL_ERROR);
}
const NamesAndTypesList & getColumnsList() const { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
NameAndTypePair getColumn(const String & column_name) const
{
@ -621,14 +624,16 @@ public:
return NameAndTypePair("_part", new DataTypeString);
if (column_name == "_part_index")
return NameAndTypePair("_part_index", new DataTypeUInt64);
return getRealColumn(column_name);
return ITableDeclaration::getColumn(column_name);
}
bool hasColumn(const String & column_name) const
{
if (column_name == "_part") return true;
if (column_name == "_part_index") return true;
return hasRealColumn(column_name);
if (column_name == "_part")
return true;
if (column_name == "_part_index")
return true;
return ITableDeclaration::hasColumn(column_name);
}
String getFullPath() const { return full_path; }

View File

@ -10,6 +10,7 @@
#include <DB/IO/CompressedReadBufferFromFile.h>
#include <DB/Columns/ColumnArray.h>
#include <DB/Columns/ColumnNested.h>
#include <DB/Interpreters/evaluateMissingDefaults.h>
namespace DB
@ -124,7 +125,7 @@ public:
storage.reportBrokenPart(part_name);
/// Более хорошая диагностика.
throw Exception(e.message() + " (while reading from part " + path + " from mark " + toString(from_mark) + " to "
throw Exception(e.message() + "\n(while reading from part " + path + " from mark " + toString(from_mark) + " to "
+ toString(to_mark) + ")", e.code());
}
catch (...)
@ -161,7 +162,8 @@ public:
size_t pos = 0; /// Позиция, куда надо вставить недостающий столбец.
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it, ++pos)
{
if (!res.has(it->name))
/// insert default values only for columns without default expressions
if (!res.has(it->name) && storage.column_defaults.count(it->name) == 0)
{
ColumnWithNameAndType column;
column.name = it->name;
@ -192,11 +194,15 @@ public:
res.insert(pos, column);
}
}
/// evaluate defaulted columns
evaluateMissingDefaults(res, columns, storage.column_defaults, storage.context);
}
catch (const Exception & e)
{
/// Более хорошая диагностика.
throw Exception(e.message() + " (while reading from part " + path + ")", e.code());
throw Exception(e.message() + '\n' + e.getStackTrace().toString()
+ "\n(while reading from part " + path + ")", e.code());
}
}

View File

@ -56,7 +56,7 @@ public:
std::string getName() const override { return "Buffer"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -21,6 +21,9 @@ public:
const std::string & this_database_,/// Имя БД для этой таблицы.
const std::string & name_, /// Имя таблицы.
NamesAndTypesListPtr columns_, /// Список столбцов.
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_, /// В какой БД искать таблицы-источники.
const String & table_name_regexp_, /// Регексп имён таблиц-источников.
const std::string & destination_name_prefix_, /// Префикс имен создаваемых таблиц типа Chunks.
@ -30,7 +33,7 @@ public:
std::string getName() const override { return "ChunkMerger"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
NameAndTypePair getColumn(const String & column_name) const override;
bool hasColumn(const String & column_name) const override;
@ -70,6 +73,9 @@ private:
const std::string & this_database_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const std::string & destination_name_prefix_,

View File

@ -5,7 +5,7 @@
namespace DB
{
/** Ссылка на кусок данных в таблице типа Chunks.
* Запись не поддерживается.
*/
@ -13,11 +13,11 @@ class StorageChunkRef : public IStorage
{
public:
static StoragePtr create(const std::string & name_, const Context & context_, const std::string & source_database_name_, const std::string & source_table_name_, bool attach);
std::string getName() const override { return "ChunkRef"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return getSource().getColumnsList(); }
const NamesAndTypesList & getColumnsListImpl() const override { return getSource().getColumnsListImpl(); }
/// В таблице, на которую мы ссылаемся, могут быть виртуальные столбцы.
NameAndTypePair getColumn(const String & column_name) const override { return getSource().getColumn(column_name); };
bool hasColumn(const String & column_name) const override { return getSource().hasColumn(column_name); };
@ -31,23 +31,23 @@ public:
unsigned threads = 1) override;
ASTPtr getCustomCreateQuery(const Context & context) const;
void drop() override;
String source_database_name;
String source_table_name;
bool checkData() const override;
private:
String name;
const Context & context;
StorageChunkRef(const std::string & name_, const Context & context_, const std::string & source_database_name_, const std::string & source_table_name_, bool attach);
/// TODO: может быть, можно просто хранить указатель на родительскую таблицу?
StorageChunks & getSource();
const StorageChunks & getSource() const;
};
}

View File

@ -7,7 +7,7 @@
namespace DB
{
/** Хранит несколько кусков данных. Читает из всех кусков.
* Запись не поддерживается. Для записи используются таблицы типа ChunkMerger.
* Таблицы типа ChunkRef могут ссылаться на отдельные куски внутри таблицы типа Chunks.
@ -20,17 +20,20 @@ class StorageChunks : public StorageLog
using StorageLog::read;
public:
static StoragePtr create(const std::string & path_,
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
Context & context_,
bool attach);
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
bool attach);
void addReference();
void removeReference();
std::string getName() const override { return "Chunks"; }
BlockInputStreams read(
const Names & column_names,
ASTPtr query,
@ -53,13 +56,13 @@ public:
BlockOutputStreamPtr writeToNewChunk(
const std::string & chunk_name);
/// Если бы запись была разрешена, непонятно, как назвать новый чанк.
BlockOutputStreamPtr write(ASTPtr query) override
{
throw Exception("Table doesn't support writing", ErrorCodes::NOT_IMPLEMENTED);
}
/// Переименование испортило бы целостность количества ссылок из таблиц ChunkRef.
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override
{
@ -79,29 +82,32 @@ private:
typedef std::vector<String> ChunkNumToChunkName;
String database_name;
ChunkNumToMark chunk_num_to_marks;
ChunkIndices chunk_indices;
ChunkNumToChunkName chunk_names;
CounterInFile reference_counter;
Context & context;
Logger * log;
StorageChunks(const std::string & path_,
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
Context & context_,
bool attach);
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
bool attach);
void dropThis();
void loadIndex();
void appendChunkToIndex(const std::string & chunk_name, size_t mark);
Block getBlockWithVirtualColumns() const;
};
}

View File

@ -25,6 +25,9 @@ public:
static StoragePtr create(
const std::string & name_, /// Имя таблицы.
NamesAndTypesListPtr columns_, /// Список столбцов.
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & remote_database_, /// БД на удалённых серверах.
const String & remote_table_, /// Имя таблицы на удалённых серверах.
const String & cluster_name,
@ -46,7 +49,7 @@ public:
bool supportsFinal() const override { return true; }
bool supportsPrewhere() const override { return true; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
NameAndTypePair getColumn(const String & column_name) const override;
bool hasColumn(const String & column_name) const override;
@ -88,6 +91,19 @@ private:
const ASTPtr & sharding_key_ = nullptr,
const String & data_path_ = String{});
StorageDistributed(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & remote_database_,
const String & remote_table_,
Cluster & cluster_,
Context & context_,
const ASTPtr & sharding_key_ = nullptr,
const String & data_path_ = String{});
/// create directory monitor thread by subdirectory name
void createDirectoryMonitor(const std::string & name);

View File

@ -23,6 +23,9 @@ public:
Context & context,
ASTPtr & query,
NamesAndTypesListPtr columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults,
bool attach) const;
};

View File

@ -64,11 +64,11 @@ private:
if (offset)
plain.seek(offset);
}
ReadBufferFromFile plain;
CompressedReadBuffer compressed;
};
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
@ -97,7 +97,7 @@ private:
{
plain_offset = Poco::File(data_path).getSize();
}
WriteBufferFromFile plain;
CompressedWriteBuffer compressed;
@ -111,12 +111,12 @@ private:
};
typedef std::vector<std::pair<size_t, Mark> > MarksForColumns;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
typedef std::set<std::string> OffsetColumns;
WriteBufferFromFile marks_stream; /// Объявлен ниже lock, чтобы файл открывался при захваченном rwlock.
void addStream(const String & name, const IDataType & type, size_t level = 0);
@ -143,12 +143,21 @@ public:
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
size_t max_compress_block_size_ = DEFAULT_MAX_COMPRESS_BLOCK_SIZE);
static StoragePtr create(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
size_t max_compress_block_size_ = DEFAULT_MAX_COMPRESS_BLOCK_SIZE);
std::string getName() const override { return "Log"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
virtual BlockInputStreams read(
const Names & column_names,
@ -196,16 +205,23 @@ protected:
throw Exception("There is no column " + _table_column_name + " in table " + getTableName(), ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
StorageLog(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, size_t max_compress_block_size_);
StorageLog(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
size_t max_compress_block_size_);
/// Прочитать файлы с засечками, если они ещё не прочитаны.
/// Делается лениво, чтобы при большом количестве таблиц, сервер быстро стартовал.
/// Нельзя вызывать с залоченным на запись rwlock.
void loadMarks();
/// Можно вызывать при любом состоянии rwlock.
size_t marksCount();
BlockInputStreams read(
size_t from_mark,
size_t to_mark,
@ -215,14 +231,14 @@ protected:
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size = DEFAULT_BLOCK_SIZE,
unsigned threads = 1);
private:
Files_t files; /// name -> data
Names column_names; /// column_index -> name
Poco::File marks_file;
/// Порядок добавления файлов не должен меняться: он соответствует порядку столбцов в файле с засечками.
void addFile(const String & column_name, const IDataType & type, size_t level = 0);

View File

@ -9,8 +9,16 @@ namespace DB
class StorageMaterializedView : public StorageView {
public:
static StoragePtr create(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_, bool attach_);
static StoragePtr create(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach_);
std::string getName() const override { return "MaterializedView"; }
std::string getInnerTableName() const { return ".inner." + table_name; }
@ -37,8 +45,16 @@ public:
private:
StoragePtr data;
StorageMaterializedView(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_, bool attach_);
StorageMaterializedView(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach_);
};
}

View File

@ -62,12 +62,21 @@ friend class MemoryBlockInputStream;
friend class MemoryBlockOutputStream;
public:
static StoragePtr create(const std::string & name_, NamesAndTypesListPtr columns_);
static StoragePtr create(
const std::string & name_,
NamesAndTypesListPtr columns_);
static StoragePtr create(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_);
std::string getName() const override { return "Memory"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
size_t getSize() const { return data.size(); }
@ -93,7 +102,16 @@ private:
Poco::FastMutex mutex;
StorageMemory(const std::string & name_, NamesAndTypesListPtr columns_);
StorageMemory(
const std::string & name_,
NamesAndTypesListPtr columns_);
StorageMemory(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_);
};
}

View File

@ -25,6 +25,16 @@ public:
const String & table_name_regexp_, /// Регексп имён таблиц-источников.
const Context & context_); /// Известные таблицы.
static StoragePtr create(
const std::string & name_, /// Имя таблицы.
NamesAndTypesListPtr columns_, /// Список столбцов.
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_, /// В какой БД искать таблицы-источники.
const String & table_name_regexp_, /// Регексп имён таблиц-источников.
const Context & context_); /// Известные таблицы.
std::string getName() const override { return "Merge"; }
std::string getTableName() const override { return name; }
bool supportsSampling() const override { return true; }
@ -32,9 +42,9 @@ public:
/// Проверка откладывается до метода read. Там проверяется поддержка PREWHERE у использующихся таблиц.
bool supportsPrewhere() const override { return true; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
NameAndTypePair getColumn(const String & column_name) const override;
bool hasColumn(const String & column_name) const override;
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
NameAndTypePair getColumn(const String &column_name) const override;
bool hasColumn(const String &column_name) const override;
BlockInputStreams read(
const Names & column_names,
@ -65,6 +75,16 @@ private:
const String & table_name_regexp_,
const Context & context_);
StorageMerge(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const Context & context_);
void getSelectedTables(StorageVector & selected_tables) const;
Block getBlockWithVirtualColumns(const StorageVector & selected_tables) const;

View File

@ -24,7 +24,27 @@ public:
* date_column_name - имя столбца с датой;
* index_granularity - на сколько строчек пишется одно значение индекса.
*/
static StoragePtr create(const String & path_, const String & database_name_, const String & table_name_,
static StoragePtr create(
const String & path_,
const String & database_name_,
const String & table_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_ = MergeTreeData::Ordinary,
const String & sign_column_ = "",
const MergeTreeSettings & settings_ = MergeTreeSettings());
static StoragePtr create(
const String & path_,
const String & database_name_,
const String & table_name_,
NamesAndTypesListPtr columns_,
Context & context_,
ASTPtr & primary_expr_ast_,
@ -35,6 +55,7 @@ public:
const String & sign_column_ = "",
const MergeTreeSettings & settings_ = MergeTreeSettings());
void shutdown() override;
~StorageMergeTree() override;
@ -48,7 +69,7 @@ public:
bool supportsFinal() const override { return data.supportsFinal(); }
bool supportsPrewhere() const override { return data.supportsPrewhere(); }
const NamesAndTypesList & getColumnsList() const override { return data.getColumnsList(); }
const NamesAndTypesList & getColumnsListImpl() const override { return data.getColumnsListNonMaterialized(); }
NameAndTypePair getColumn(const String & column_name) const override
{
@ -156,16 +177,22 @@ private:
typedef Poco::SharedPtr<CurrentlyMergingPartsTagger> CurrentlyMergingPartsTaggerPtr;
StorageMergeTree(const String & path_, const String & database_name_, const String & table_name_,
NamesAndTypesListPtr columns_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_);
StorageMergeTree(
const String & path_,
const String & database_name_,
const String & table_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_);
/** Определяет, какие куски нужно объединять, и объединяет их.
* Если aggressive - выбрать куски, не обращая внимание на соотношение размеров и их новизну (для запроса OPTIMIZE).

View File

@ -15,15 +15,20 @@ namespace DB
class StorageNull : public IStorage
{
public:
static StoragePtr create(const std::string & name_, NamesAndTypesListPtr columns_)
static StoragePtr create(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
{
return (new StorageNull(name_, columns_))->thisPtr();
return (new StorageNull{name_, columns_, materialized_columns_, alias_columns_, column_defaults_})->thisPtr();
}
std::string getName() const override { return "Null"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
BlockInputStreams read(
const Names & column_names,
@ -47,8 +52,13 @@ private:
String name;
NamesAndTypesListPtr columns;
StorageNull(const std::string & name_, NamesAndTypesListPtr columns_)
: name(name_), columns(columns_) {}
StorageNull(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_}, name(name_), columns(columns_) {}
};
}

View File

@ -31,6 +31,9 @@ public:
bool attach,
const String & path_, const String & database_name_, const String & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
@ -53,7 +56,7 @@ public:
bool supportsFinal() const override { return data.supportsFinal(); }
bool supportsPrewhere() const override { return data.supportsPrewhere(); }
const NamesAndTypesList & getColumnsList() const override { return data.getColumnsList(); }
const NamesAndTypesList & getColumnsListImpl() const override { return data.getColumnsListNonMaterialized(); }
NameAndTypePair getColumn(const String & column_name) const override
{
@ -247,6 +250,9 @@ private:
bool attach,
const String & path_, const String & database_name_, const String & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemDatabases"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -21,7 +21,7 @@ public:
std::string getName() const override { return "SystemEvents"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -13,7 +13,7 @@ public:
std::string getName() const override { return "SystemMerges"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,
ASTPtr query,

View File

@ -19,11 +19,11 @@ class StorageSystemNumbers : public IStorage
{
public:
static StoragePtr create(const std::string & name_, bool multithreaded_ = false);
std::string getName() const override { return "SystemNumbers"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,
@ -37,7 +37,7 @@ private:
const std::string name;
NamesAndTypesList columns;
bool multithreaded;
StorageSystemNumbers(const std::string & name_, bool multithreaded_);
};

View File

@ -16,11 +16,11 @@ class StorageSystemOne : public IStorage
{
public:
static StoragePtr create(const std::string & name_);
std::string getName() const override { return "SystemOne"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,
@ -33,7 +33,7 @@ public:
private:
const std::string name;
NamesAndTypesList columns;
StorageSystemOne(const std::string & name_);
};

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override{ return "SystemParts"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemProcesses"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemReplicas"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemSettings"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemTables"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -22,7 +22,7 @@ public:
std::string getName() const override { return "SystemZooKeeper"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,

View File

@ -29,7 +29,7 @@ public:
String getName() const { return "TinyLogBlockInputStream"; }
String getID() const;
protected:
Block readImpl();
private:
@ -89,7 +89,7 @@ private:
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
typedef std::set<std::string> OffsetColumns;
void addStream(const String & name, const IDataType & type, size_t level = 0);
@ -115,13 +115,16 @@ public:
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach,
size_t max_compress_block_size_ = DEFAULT_MAX_COMPRESS_BLOCK_SIZE);
std::string getName() const override { return "TinyLog"; }
std::string getTableName() const override { return name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
BlockInputStreams read(
const Names & column_names,
@ -134,7 +137,7 @@ public:
BlockOutputStreamPtr write(ASTPtr query) override;
void drop() override;
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override;
bool checkData() const override;
@ -148,7 +151,7 @@ public:
Files_t & getFiles();
std::string full_path() { return path + escapeForFileName(name) + '/';}
std::string full_path() { return path + escapeForFileName(name) + '/';}
private:
String path;
@ -163,8 +166,16 @@ private:
Logger * log;
StorageTinyLog(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, bool attach, size_t max_compress_block_size_);
StorageTinyLog(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach,
size_t max_compress_block_size_);
void addFile(const String & column_name, const IDataType & type, size_t level = 0);
};

View File

@ -11,12 +11,19 @@ class StorageView : public IStorage
{
public:
static StoragePtr create(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_);
static StoragePtr create(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_);
std::string getName() const override { return "View"; }
std::string getTableName() const override { return table_name; }
const NamesAndTypesList & getColumnsList() const override { return *columns; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
ASTPtr getInnerQuery() const { return inner_query.clone(); };
/// Пробрасывается внутрь запроса и решается на его уровне.
@ -42,8 +49,15 @@ protected:
Context & context;
NamesAndTypesListPtr columns;
StorageView(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_);
StorageView(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_);
};
}

View File

@ -48,7 +48,8 @@ public:
/// Нам необходимо его пометить как имя базы данных, поскольку по умолчанию стоит значение column
typeid_cast<ASTIdentifier &>(*args[0]).kind = ASTIdentifier::Database;
return StorageMerge::create(getName(), chooseColumns(source_database, table_name_regexp, context), source_database, table_name_regexp, context);
return StorageMerge::create(getName(), chooseColumns(source_database, table_name_regexp, context),
source_database, table_name_regexp, context);
}
private:

View File

@ -5,9 +5,17 @@
#include <DB/Core/Block.h>
#include <DB/Storages/ColumnDefault.h>
#include <DB/Columns/ColumnArray.h>
#include <DB/DataTypes/DataTypeNested.h>
#include <DB/Parsers/ASTExpressionList.h>
#include <DB/Interpreters/ExpressionAnalyzer.h>
#include <statdaemons/stdext.h>
#include <DB/Parsers/formatAST.h>
namespace DB
{
@ -19,20 +27,11 @@ Block::Block(const Block & other)
}
void Block::addDefaults(NamesAndTypesListPtr required_columns)
void Block::addDefaults(const NamesAndTypesList & required_columns)
{
for (NamesAndTypesList::const_iterator it = required_columns->begin(); it != required_columns->end(); ++it)
{
if (!has(it->name))
{
ColumnWithNameAndType col;
col.name = it->name;
col.type = it->type;
col.column = dynamic_cast<IColumnConst &>(*it->type->createConstColumn(
rows(), it->type->getDefault())).convertToFullColumn();
insert(col);
}
}
for (const auto & column : required_columns)
if (!has(column.name))
insertDefault(column.name, column.type);
}
Block & Block::operator= (const Block & other)
@ -82,6 +81,16 @@ void Block::insert(const ColumnWithNameAndType & elem)
index_by_position.push_back(it);
}
void Block::insertDefault(const String & name, const DataTypePtr & type)
{
insert({
dynamic_cast<IColumnConst &>(*type->createConstColumn(rows(),
type->getDefault())).convertToFullColumn(),
type, name
});
}
void Block::insertUnique(const ColumnWithNameAndType & elem)
{

View File

@ -63,8 +63,10 @@ void ExpressionAnalyzer::init()
{
select_query = typeid_cast<ASTSelectQuery *>(&*ast);
addStorageAliases();
/// Создаёт словарь aliases: alias -> ASTPtr
createAliasesDict(ast);
addASTAliases(ast);
/// Common subexpression elimination. Rewrite rules.
normalizeTree();
@ -235,9 +237,19 @@ NamesAndTypesList::iterator ExpressionAnalyzer::findColumn(const String & name,
}
void ExpressionAnalyzer::addStorageAliases()
{
if (!storage)
return;
for (const auto & alias : storage->alias_columns)
aliases[alias.name] = storage->column_defaults[alias.name].expression;
}
/// ignore_levels - алиасы в скольки верхних уровнях поддерева нужно игнорировать.
/// Например, при ignore_levels=1 ast не может быть занесен в словарь, но его дети могут.
void ExpressionAnalyzer::createAliasesDict(ASTPtr & ast, int ignore_levels)
void ExpressionAnalyzer::addASTAliases(ASTPtr & ast, int ignore_levels)
{
ASTSelectQuery * select = typeid_cast<ASTSelectQuery *>(&*ast);
@ -252,7 +264,7 @@ void ExpressionAnalyzer::createAliasesDict(ASTPtr & ast, int ignore_levels)
new_ignore_levels = 2;
if (!typeid_cast<ASTSelectQuery *>(&*child))
createAliasesDict(child, new_ignore_levels);
addASTAliases(child, new_ignore_levels);
}
if (ignore_levels > 0)
@ -1689,18 +1701,34 @@ void ExpressionAnalyzer::collectUsedColumns()
++it;
}
/// Возможно, среди неизвестных столбцов есть виртуальные. Удаляем их из списка неизвестных и добавляем
/// в columns list, чтобы при дальнейшей обработке запроса они воспринимались как настоящие.
for (NameSet::iterator it = unknown_required_columns.begin(); it != unknown_required_columns.end();)
for (NamesAndTypesList::iterator it = columns.begin(); it != columns.end();)
{
if (storage && storage->hasColumn(*it))
unknown_required_columns.erase(it->name);
if (!required.count(it->name))
{
columns.push_back(storage->getColumn(*it));
unknown_required_columns.erase(it++);
required.erase(it->name);
columns.erase(it++);
}
else
++it;
}
/// Возможно, среди неизвестных столбцов есть виртуальные. Удаляем их из списка неизвестных и добавляем
/// в columns list, чтобы при дальнейшей обработке запроса они воспринимались как настоящие.
if (storage)
{
for (auto it = unknown_required_columns.begin(); it != unknown_required_columns.end();)
{
if (storage->hasColumn(*it))
{
columns.push_back(storage->getColumn(*it));
unknown_required_columns.erase(it++);
}
else
++it;
}
}
}
void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns, NamesAndTypesList & joined_columns_name_type)

View File

@ -28,15 +28,15 @@ InterpreterAlterQuery::InterpreterAlterQuery(ASTPtr query_ptr_, Context & contex
void InterpreterAlterQuery::execute()
{
ASTAlterQuery & alter = typeid_cast<ASTAlterQuery &>(*query_ptr);
String & table_name = alter.table;
auto & alter = typeid_cast<ASTAlterQuery &>(*query_ptr);
const String & table_name = alter.table;
String database_name = alter.database.empty() ? context.getCurrentDatabase() : alter.database;
StoragePtr table = context.getTable(database_name, table_name);
AlterCommands alter_commands;
PartitionCommands partition_commands;
parseAlter(alter.parameters, context.getDataTypeFactory(), alter_commands, partition_commands);
StoragePtr table = context.getTable(database_name, table_name);
for (const PartitionCommand & command : partition_commands)
{
switch (command.type)
@ -58,8 +58,12 @@ void InterpreterAlterQuery::execute()
}
}
if (!alter_commands.empty())
table->alter(alter_commands, database_name, table_name, context);
if (alter_commands.empty())
return;
alter_commands.validate(table.get(), context);
table->alter(alter_commands, database_name, table_name, context);
}
void InterpreterAlterQuery::parseAlter(
@ -73,12 +77,20 @@ void InterpreterAlterQuery::parseAlter(
AlterCommand command;
command.type = AlterCommand::ADD;
const ASTNameTypePair & ast_name_type = typeid_cast<const ASTNameTypePair &>(*params.name_type);
StringRange type_range = ast_name_type.type->range;
String type_string = String(type_range.first, type_range.second - type_range.first);
const auto & ast_col_decl = typeid_cast<const ASTColumnDeclaration &>(*params.col_decl);
command.column_name = ast_name_type.name;
command.data_type = data_type_factory.get(type_string);
command.column_name = ast_col_decl.name;
if (ast_col_decl.type)
{
StringRange type_range = ast_col_decl.type->range;
String type_string(type_range.first, type_range.second - type_range.first);
command.data_type = data_type_factory.get(type_string);
}
if (ast_col_decl.default_expression)
{
command.default_type = columnDefaultTypeFromString(ast_col_decl.default_specifier);
command.default_expression = ast_col_decl.default_expression;
}
if (params.column)
command.after_column = typeid_cast<const ASTIdentifier &>(*params.column).name;
@ -98,12 +110,21 @@ void InterpreterAlterQuery::parseAlter(
AlterCommand command;
command.type = AlterCommand::MODIFY;
const ASTNameTypePair & ast_name_type = typeid_cast<const ASTNameTypePair &>(*params.name_type);
StringRange type_range = ast_name_type.type->range;
String type_string = String(type_range.first, type_range.second - type_range.first);
const auto & ast_col_decl = typeid_cast<const ASTColumnDeclaration &>(*params.col_decl);
command.column_name = ast_name_type.name;
command.data_type = data_type_factory.get(type_string);
command.column_name = ast_col_decl.name;
if (ast_col_decl.type)
{
StringRange type_range = ast_col_decl.type->range;
String type_string(type_range.first, type_range.second - type_range.first);
command.data_type = data_type_factory.get(type_string);
}
if (ast_col_decl.default_expression)
{
command.default_type = columnDefaultTypeFromString(ast_col_decl.default_specifier);
command.default_expression = ast_col_decl.default_expression;
}
out_alter_commands.push_back(command);
}
@ -128,7 +149,13 @@ void InterpreterAlterQuery::parseAlter(
}
void InterpreterAlterQuery::updateMetadata(
const String & database_name, const String & table_name, const NamesAndTypesList & columns, Context & context)
const String & database_name,
const String & table_name,
const NamesAndTypesList & columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults,
Context & context)
{
String path = context.getPath();
@ -163,7 +190,7 @@ void InterpreterAlterQuery::updateMetadata(
ASTCreateQuery & attach = typeid_cast<ASTCreateQuery &>(*ast);
ASTPtr new_columns = InterpreterCreateQuery::formatColumns(columns);
ASTPtr new_columns = InterpreterCreateQuery::formatColumns(columns, materialized_columns, alias_columns, column_defaults);
*std::find(attach.children.begin(), attach.children.end(), attach.columns) = new_columns;
attach.columns = new_columns;

View File

@ -11,6 +11,7 @@
#include <DB/Parsers/ASTCreateQuery.h>
#include <DB/Parsers/ASTNameTypePair.h>
#include <DB/Parsers/ASTColumnDeclaration.h>
#include <DB/Storages/StorageLog.h>
#include <DB/Storages/StorageSystemNumbers.h>
@ -20,6 +21,8 @@
#include <DB/Interpreters/InterpreterSelectQuery.h>
#include <DB/Interpreters/InterpreterCreateQuery.h>
#include <DB/Interpreters/ExpressionAnalyzer.h>
#include <DB/DataTypes/DataTypesNumberFixed.h>
#include <DB/DataTypes/DataTypeNested.h>
@ -87,6 +90,9 @@ StoragePtr InterpreterCreateQuery::execute(bool assume_metadata_exists)
StoragePtr res;
String storage_name;
NamesAndTypesListPtr columns = new NamesAndTypesList;
NamesAndTypesList materialized_columns{};
NamesAndTypesList alias_columns{};
ColumnDefaults column_defaults{};
StoragePtr as_storage;
IStorage::TableStructureReadLockPtr as_storage_lock;
@ -115,10 +121,22 @@ StoragePtr InterpreterCreateQuery::execute(bool assume_metadata_exists)
/// Получаем список столбцов
if (create.columns)
{
columns = new NamesAndTypesList(parseColumns(create.columns, context.getDataTypeFactory()));
auto && columns_and_defaults = parseColumns(create.columns);
materialized_columns = removeAndReturnColumns(columns_and_defaults, ColumnDefaultType::Materialized);
alias_columns = removeAndReturnColumns(columns_and_defaults, ColumnDefaultType::Alias);
columns = new NamesAndTypesList{std::move(columns_and_defaults.first)};
column_defaults = std::move(columns_and_defaults.second);
if (columns->size() + materialized_columns.size() == 0)
throw Exception{"Cannot CREATE table without physical columns", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED};
}
else if (!create.as_table.empty())
columns = new NamesAndTypesList(as_storage->getColumnsList());
{
columns = new NamesAndTypesList(as_storage->getColumnsListNonMaterialized());
materialized_columns = as_storage->materialized_columns;
alias_columns = as_storage->alias_columns;
column_defaults = as_storage->column_defaults;
}
else if (create.select)
{
columns = new NamesAndTypesList;
@ -129,7 +147,7 @@ StoragePtr InterpreterCreateQuery::execute(bool assume_metadata_exists)
throw Exception("Incorrect CREATE query: required list of column descriptions or AS section or SELECT.", ErrorCodes::INCORRECT_QUERY);
/// Даже если в запросе был список столбцов, на всякий случай приведем его к стандартному виду (развернем Nested).
ASTPtr new_columns = formatColumns(*columns);
ASTPtr new_columns = formatColumns(*columns, materialized_columns, alias_columns, column_defaults);
if (create.columns)
{
auto it = std::find(create.children.begin(), create.children.end(), create.columns);
@ -178,7 +196,8 @@ StoragePtr InterpreterCreateQuery::execute(bool assume_metadata_exists)
res = context.getStorageFactory().get(
storage_name, data_path, table_name, database_name, context,
context.getGlobalContext(), query_ptr, columns, create.attach);
context.getGlobalContext(), query_ptr, columns,
materialized_columns, alias_columns, column_defaults, create.attach);
/// Проверка наличия метаданных таблицы на диске и создание метаданных
if (!assume_metadata_exists && !create.is_temporary)
@ -233,47 +252,195 @@ StoragePtr InterpreterCreateQuery::execute(bool assume_metadata_exists)
return res;
}
NamesAndTypesList InterpreterCreateQuery::parseColumns(ASTPtr expression_list, const DataTypeFactory & data_type_factory)
InterpreterCreateQuery::ColumnsAndDefaults InterpreterCreateQuery::parseColumns(ASTPtr expression_list)
{
NamesAndTypesList columns;
ASTExpressionList & columns_list = typeid_cast<ASTExpressionList &>(*expression_list);
for (const ASTPtr & ast : columns_list.children)
auto & column_list_ast = typeid_cast<ASTExpressionList &>(*expression_list);
/// list of table columns in correct order
NamesAndTypesList columns{};
ColumnDefaults defaults{};
/// Columns requiring type-deduction or default_expression type-check
std::vector<std::pair<NameAndTypePair *, ASTColumnDeclaration *>> defaulted_columns{};
/** all default_expressions as a single expression list,
* mixed with conversion-columns for each explicitly specified type */
ASTPtr default_expr_list{new ASTExpressionList};
default_expr_list->children.reserve(column_list_ast.children.size());
for (auto & ast : column_list_ast.children)
{
const ASTNameTypePair & name_and_type_pair = typeid_cast<const ASTNameTypePair &>(*ast);
StringRange type_range = name_and_type_pair.type->range;
columns.push_back(NameAndTypePair(
name_and_type_pair.name,
data_type_factory.get(String(type_range.first, type_range.second - type_range.first))));
auto & col_decl = typeid_cast<ASTColumnDeclaration &>(*ast);
if (col_decl.type)
{
const auto & type_range = col_decl.type->range;
columns.emplace_back(col_decl.name,
context.getDataTypeFactory().get({ type_range.first, type_range.second }));
}
else
/// we're creating dummy DataTypeUInt8 in order to prevent the NullPointerException in ExpressionActions
columns.emplace_back(col_decl.name, new DataTypeUInt8);
/// add column to postprocessing if there is a default_expression specified
if (col_decl.default_expression)
{
defaulted_columns.emplace_back(&columns.back(), &col_decl);
/** for columns with explicitly-specified type create two expressions:
* 1. default_expression aliased as column name with _tmp suffix
* 2. conversion of expression (1) to explicitly-specified type alias as column name */
if (col_decl.type)
{
const auto tmp_column_name = col_decl.name + "_tmp";
const auto & final_column_name = col_decl.name;
const auto conversion_function_name = "to" + columns.back().type->getName();
default_expr_list->children.emplace_back(setAlias(
makeASTFunction(conversion_function_name, ASTPtr{new ASTIdentifier{{}, tmp_column_name}}),
final_column_name));
default_expr_list->children.emplace_back(setAlias(col_decl.default_expression->clone(), tmp_column_name));
}
else
{
default_expr_list->children.emplace_back(setAlias(col_decl.default_expression->clone(), col_decl.name));
}
}
}
columns = *DataTypeNested::expandNestedColumns(columns);
return columns;
/// set missing types and wrap default_expression's in a conversion-function if necessary
if (!defaulted_columns.empty())
{
const auto actions = ExpressionAnalyzer{default_expr_list, context, columns}.getActions(true);
const auto block = actions->getSampleBlock();
for (auto & column : defaulted_columns)
{
const auto name_and_type_ptr = column.first;
const auto col_decl_ptr = column.second;
if (col_decl_ptr->type)
{
const auto & tmp_column = block.getByName(col_decl_ptr->name + "_tmp");
/// type mismatch between explicitly specified and deduced type, add conversion
if (typeid(*name_and_type_ptr->type) != typeid(*tmp_column.type))
{
col_decl_ptr->default_expression = makeASTFunction(
"to" + name_and_type_ptr->type->getName(),
col_decl_ptr->default_expression);
col_decl_ptr->children.clear();
col_decl_ptr->children.push_back(col_decl_ptr->type);
col_decl_ptr->children.push_back(col_decl_ptr->default_expression);
}
}
else
name_and_type_ptr->type = block.getByName(name_and_type_ptr->name).type;
defaults.emplace(col_decl_ptr->name, ColumnDefault{
columnDefaultTypeFromString(col_decl_ptr->default_specifier),
col_decl_ptr->default_expression
});
}
}
return { *DataTypeNested::expandNestedColumns(columns), defaults };
}
NamesAndTypesList InterpreterCreateQuery::removeAndReturnColumns(ColumnsAndDefaults & columns_and_defaults,
const ColumnDefaultType type)
{
auto & columns = columns_and_defaults.first;
auto & defaults = columns_and_defaults.second;
NamesAndTypesList removed{};
for (auto it = std::begin(columns); it != std::end(columns);)
{
const auto jt = defaults.find(it->name);
if (jt != std::end(defaults) && jt->second.type == type)
{
removed.push_back(*it);
it = columns.erase(it);
}
else
++it;
}
return removed;
}
ASTPtr InterpreterCreateQuery::formatColumns(const NamesAndTypesList & columns)
{
ASTPtr columns_list_ptr = new ASTExpressionList;
ASTPtr columns_list_ptr{new ASTExpressionList};
ASTExpressionList & columns_list = typeid_cast<ASTExpressionList &>(*columns_list_ptr);
for (const NameAndTypePair & it : columns)
for (const auto & column : columns)
{
ASTPtr name_and_type_pair_ptr = new ASTNameTypePair;
ASTNameTypePair & name_and_type_pair = typeid_cast<ASTNameTypePair &>(*name_and_type_pair_ptr);
name_and_type_pair.name = it.name;
StringPtr type_name = new String(it.type->getName());
const auto column_declaration = new ASTColumnDeclaration;
ASTPtr column_declaration_ptr{column_declaration};
column_declaration->name = column.name;
StringPtr type_name{new String(column.type->getName())};
auto pos = type_name->data();
const auto end = pos + type_name->size();
ParserIdentifierWithOptionalParameters storage_p;
Expected expected = "";
const char * pos = type_name->data();
const char * end = pos + type_name->size();
if (!storage_p.parse(pos, end, name_and_type_pair.type, expected))
Expected expected{""};
if (!storage_p.parse(pos, end, column_declaration->type, expected))
throw Exception("Cannot parse data type.", ErrorCodes::SYNTAX_ERROR);
name_and_type_pair.type->query_string = type_name;
columns_list.children.push_back(name_and_type_pair_ptr);
column_declaration->type->query_string = type_name;
columns_list.children.push_back(column_declaration_ptr);
}
return columns_list_ptr;
}
ASTPtr InterpreterCreateQuery::formatColumns(NamesAndTypesList columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults)
{
columns.insert(std::end(columns), std::begin(materialized_columns), std::end(materialized_columns));
columns.insert(std::end(columns), std::begin(alias_columns), std::end(alias_columns));
ASTPtr columns_list_ptr{new ASTExpressionList};
ASTExpressionList & columns_list = typeid_cast<ASTExpressionList &>(*columns_list_ptr);
for (const auto & column : columns)
{
const auto column_declaration = new ASTColumnDeclaration;
ASTPtr column_declaration_ptr{column_declaration};
column_declaration->name = column.name;
StringPtr type_name{new String(column.type->getName())};
auto pos = type_name->data();
const auto end = pos + type_name->size();
ParserIdentifierWithOptionalParameters storage_p;
Expected expected{""};
if (!storage_p.parse(pos, end, column_declaration->type, expected))
throw Exception("Cannot parse data type.", ErrorCodes::SYNTAX_ERROR);
column_declaration->type->query_string = type_name;
const auto it = column_defaults.find(column.name);
if (it != std::end(column_defaults))
{
column_declaration->default_specifier = toString(it->second.type);
column_declaration->default_expression = it->second.expression->clone();
}
columns_list.children.push_back(column_declaration_ptr);
}
return columns_list_ptr;
}
}

View File

@ -1,7 +1,8 @@
#include <DB/IO/ConcatReadBuffer.h>
#include <DB/DataStreams/ProhibitColumnsBlockOutputStream.h>
#include <DB/DataStreams/MaterializingBlockOutputStream.h>
#include <DB/DataStreams/AddingDefaultBlockOutputStream.h>
#include <DB/DataStreams/MaterializingBlockInputStream.h>
#include <DB/DataStreams/PushingToViewsBlockOutputStream.h>
#include <DB/DataStreams/NullAndDoCopyBlockInputStream.h>
#include <DB/DataStreams/copyData.h>
@ -35,11 +36,12 @@ StoragePtr InterpreterInsertQuery::getTable()
Block InterpreterInsertQuery::getSampleBlock()
{
ASTInsertQuery & query = typeid_cast<ASTInsertQuery &>(*query_ptr);
Block db_sample = getTable()->getSampleBlock();
/// Если в запросе не указана информация о столбцах
if (!query.columns)
return db_sample;
return getTable()->getSampleBlockNonMaterialized();
Block db_sample = getTable()->getSampleBlock();
/// Формируем блок, основываясь на именах столбцов из запроса
Block res;
@ -68,19 +70,31 @@ void InterpreterInsertQuery::execute(ReadBuffer * remaining_data_istr)
auto table_lock = table->lockStructure(true);
BlockInputStreamPtr in;
NamesAndTypesListPtr required_columns = new NamesAndTypesList(table->getSampleBlock().getColumnsList());
/** @note looks suspicious, first we ask to create block from NamesAndTypesList (internally in ITableDeclaration),
* then we compose the same list from the resulting block */
NamesAndTypesListPtr required_columns = new NamesAndTypesList(table->getColumnsList());
/// Надо убедиться, что запрос идет в таблицу, которая поддерживает вставку.
/// TODO Плохо - исправить.
table->write(query_ptr);
/// Создаем кортеж из нескольких стримов, в которые будем писать данные.
BlockOutputStreamPtr out = new AddingDefaultBlockOutputStream(new PushingToViewsBlockOutputStream(query.database, query.table, context, query_ptr), required_columns);
BlockOutputStreamPtr out{
new ProhibitColumnsBlockOutputStream{
new AddingDefaultBlockOutputStream{
new MaterializingBlockOutputStream{
new PushingToViewsBlockOutputStream{query.database, query.table, context, query_ptr}
},
required_columns, table->column_defaults, context
},
table->materialized_columns
}
};
/// Какой тип запроса: INSERT VALUES | INSERT FORMAT | INSERT SELECT?
if (!query.select)
{
String format = query.format;
if (format.empty())
format = "Values";
@ -101,14 +115,17 @@ void InterpreterInsertQuery::execute(ReadBuffer * remaining_data_istr)
ConcatReadBuffer istr(buffers);
Block sample = getSampleBlock();
in = context.getFormatFactory().getInput(format, istr, sample, context.getSettings().max_insert_block_size, context.getDataTypeFactory());
BlockInputStreamPtr in{
context.getFormatFactory().getInput(
format, istr, sample, context.getSettings().max_insert_block_size,
context.getDataTypeFactory())
};
copyData(*in, *out);
}
else
{
InterpreterSelectQuery interpreter_select(query.select, context);
in = interpreter_select.execute();
in = new MaterializingBlockInputStream(in);
BlockInputStreamPtr in{interpreter_select.execute()};
copyData(*in, *out);
}
@ -122,15 +139,24 @@ BlockIO InterpreterInsertQuery::execute()
auto table_lock = table->lockStructure(true);
NamesAndTypesListPtr required_columns = new NamesAndTypesList(table->getSampleBlock().getColumnsList());
NamesAndTypesListPtr required_columns = new NamesAndTypesList(table->getColumnsList());
/// Надо убедиться, что запрос идет в таблицу, которая поддерживает вставку.
/// TODO Плохо - исправить.
table->write(query_ptr);
/// Создаем кортеж из нескольких стримов, в которые будем писать данные.
BlockOutputStreamPtr out = new AddingDefaultBlockOutputStream(
new PushingToViewsBlockOutputStream(query.database, query.table, context, query_ptr), required_columns);
BlockOutputStreamPtr out{
new ProhibitColumnsBlockOutputStream{
new AddingDefaultBlockOutputStream{
new MaterializingBlockOutputStream{
new PushingToViewsBlockOutputStream{query.database, query.table, context, query_ptr}
},
required_columns, table->column_defaults, context
},
table->materialized_columns
}
};
BlockIO res;
res.out_sample = getSampleBlock();
@ -142,9 +168,9 @@ BlockIO InterpreterInsertQuery::execute()
}
else
{
InterpreterSelectQuery interpreter_select(query.select, context);
BlockInputStreamPtr in = new MaterializingBlockInputStream(interpreter_select.execute());
res.in = new NullAndDoCopyBlockInputStream(in, out);
InterpreterSelectQuery interpreter_select{query.select, context};
BlockInputStreamPtr in{interpreter_select.execute()};
res.in = new NullAndDoCopyBlockInputStream{in, out};
}
return res;

View File

@ -67,7 +67,7 @@ void InterpreterSelectQuery::init(BlockInputStreamPtr input_, const NamesAndType
table_lock = storage->lockStructure(false);
if (table_column_names.empty())
context.setColumns(storage->getColumnsList());
context.setColumns(storage->getColumnsListNonMaterialized());
}
if (!table_column_names.empty())

View File

@ -210,14 +210,8 @@ bool ParserVariableArityOperatorList::parseImpl(Pos & pos, Pos end, ASTPtr & nod
if (!arguments)
{
ASTFunction * function = new ASTFunction;
ASTPtr function_node = function;
arguments = new ASTExpressionList;
function->arguments = arguments;
function->children.push_back(arguments);
function->name = function_name;
arguments->children.push_back(node);
node = function_node;
node = makeASTFunction(function_name, node);
arguments = static_cast<ASTFunction &>(*node).arguments;
}
ASTPtr elem;

View File

@ -35,7 +35,7 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
ParserIdentifier table_parser;
ParserCompoundIdentifier parser_name;
ParserCompoundNameTypePair parser_name_type;
ParserCompoundColumnDeclaration parser_col_decl;
ParserLiteral parser_literal;
ParserStringLiteral parser_string_literal;
@ -89,7 +89,7 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
return false;
ws.ignore(pos, end);
parser_name_type.parse(pos, end, params.name_type, expected);
parser_col_decl.parse(pos, end, params.col_decl, expected);
ws.ignore(pos, end);
if (s_after.ignore(pos, end, expected))
@ -198,7 +198,7 @@ bool ParserAlterQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & e
return false;
ws.ignore(pos, end);
if (!parser_name_type.parse(pos, end, params.name_type, expected))
if (!parser_col_decl.parse(pos, end, params.col_decl, expected))
return false;
ws.ignore(pos, end);

View File

@ -103,6 +103,11 @@ bool ParserNameTypePairList::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expect
return ParserList(ParserPtr(new ParserNameTypePair), ParserPtr(new ParserString(",")), false).parse(pos, end, node, expected);
}
bool ParserColumnDeclarationList::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected & expected)
{
return ParserList{ParserPtr{new ParserColumnDeclaration}, ParserPtr{new ParserString{","}}, false}.parse(pos, end, node, expected);
}
bool ParserEngine::parseImpl(Pos & pos, Pos end, ASTPtr & storage, Expected & expected)
{
@ -155,7 +160,7 @@ bool ParserCreateQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Expected &
ParserString s_populate("POPULATE", true, true);
ParserEngine engine_p;
ParserIdentifier name_p;
ParserNameTypePairList columns_p;
ParserColumnDeclarationList columns_p;
ASTPtr database;
ASTPtr table;

View File

@ -39,6 +39,12 @@ String backQuoteIfNeed(const String & x)
}
String hightlight(const String & keyword, const String & color_sequence, const bool hilite)
{
return hilite ? color_sequence + keyword + hilite_none : keyword;
}
void formatAST(const IAST & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
@ -64,6 +70,7 @@ void formatAST(const IAST & ast, std::ostream & s, size_t indent, bool hilite, b
DISPATCH(Identifier)
DISPATCH(Literal)
DISPATCH(NameTypePair)
DISPATCH(ColumnDeclaration)
DISPATCH(Asterisk)
DISPATCH(OrderByElement)
DISPATCH(Subquery)
@ -691,6 +698,25 @@ void formatAST(const ASTNameTypePair & ast, std::ostream & s, size_t indent, bo
formatAST(*ast.type, s, indent, hilite, one_line);
}
void formatAST(const ASTColumnDeclaration & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
std::string indent_str = one_line ? "" : std::string(4 * indent, ' ');
std::string nl_or_ws = one_line ? " " : "\n";
s << nl_or_ws << indent_str << backQuoteIfNeed(ast.name);
if (ast.type)
{
s << ' ';
formatAST(*ast.type, s, indent, hilite, one_line);
}
if (ast.default_expression)
{
s << ' ' << hightlight(ast.default_specifier, hilite_keyword, hilite) << ' ';
formatAST(*ast.default_expression, s, indent, hilite, one_line);
}
}
void formatAST(const ASTAsterisk & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
s << "*";
@ -734,7 +760,7 @@ void formatAST(const ASTAlterQuery & ast, std::ostream & s, size_t indent, bo
if (p.type == ASTAlterQuery::ADD_COLUMN)
{
s << (hilite ? hilite_keyword : "") << indent_str << "ADD COLUMN " << (hilite ? hilite_none : "");
formatAST(*p.name_type, s, indent, hilite, true);
formatAST(*p.col_decl, s, indent, hilite, true);
/// AFTER
if (p.column)
@ -751,7 +777,7 @@ void formatAST(const ASTAlterQuery & ast, std::ostream & s, size_t indent, bo
else if (p.type == ASTAlterQuery::MODIFY_COLUMN)
{
s << (hilite ? hilite_keyword : "") << indent_str << "MODIFY COLUMN " << (hilite ? hilite_none : "");
formatAST(*p.name_type, s, indent, hilite, true);
formatAST(*p.col_decl, s, indent, hilite, true);
}
else if (p.type == ASTAlterQuery::DROP_PARTITION)
{
@ -863,4 +889,3 @@ String formatColumnsForCreateQuery(NamesAndTypesList & columns)
}
}

View File

@ -0,0 +1,14 @@
#include <DB/Parsers/queryToString.h>
#include <DB/Parsers/formatAST.h>
#include <sstream>
namespace DB
{
String queryToString(const ASTPtr & query)
{
std::ostringstream out;
formatAST(*query, out, 0, false, true);
return out.str();
}
}

View File

@ -0,0 +1,328 @@
#include <DB/Storages/AlterCommands.h>
#include <DB/Storages/IStorage.h>
#include <DB/DataTypes/DataTypesNumberFixed.h>
#include <DB/DataTypes/DataTypeNested.h>
#include <DB/DataTypes/DataTypeArray.h>
#include <DB/Interpreters/Context.h>
#include <DB/Interpreters/ExpressionAnalyzer.h>
#include <DB/Parsers/ASTIdentifier.h>
namespace DB
{
void AlterCommand::apply(NamesAndTypesList & columns,
NamesAndTypesList & materialized_columns,
NamesAndTypesList & alias_columns,
ColumnDefaults & column_defaults) const
{
if (type == ADD)
{
const auto exists_in = [this] (const NamesAndTypesList & columns) {
return columns.end() != std::find_if(columns.begin(), columns.end(),
std::bind(namesEqual, std::cref(column_name), std::placeholders::_1));
};
if (exists_in(columns) ||
exists_in(materialized_columns) ||
exists_in(alias_columns))
{
throw Exception{
"Cannot add column " + column_name + ": column with this name already exisits.",
DB::ErrorCodes::ILLEGAL_COLUMN
};
}
const auto add_column = [this] (NamesAndTypesList & columns) {
auto insert_it = columns.end();
if (!after_column.empty())
{
/// Пытаемся найти первую с конца колонку с именем column_name или с именем, начинающимся с column_name и ".".
/// Например "fruits.bananas"
/// одинаковыми считаются имена, если они совпадают целиком или name_without_dot совпадает с частью имени до точки
const auto reverse_insert_it = std::find_if(columns.rbegin(), columns.rend(),
std::bind(namesEqual, std::cref(after_column), std::placeholders::_1));
if (reverse_insert_it == columns.rend())
throw Exception("Wrong column name. Cannot find column " + column_name + " to insert after",
DB::ErrorCodes::ILLEGAL_COLUMN);
else
{
/// base возвращает итератор, уже смещенный на один элемент вправо
insert_it = reverse_insert_it.base();
}
}
columns.emplace(insert_it, column_name, data_type);
};
if (default_type == ColumnDefaultType::Default)
add_column(columns);
else if (default_type == ColumnDefaultType::Materialized)
add_column(materialized_columns);
else if (default_type == ColumnDefaultType::Alias)
add_column(alias_columns);
else
throw Exception{"Unknown ColumnDefaultType value", ErrorCodes::LOGICAL_ERROR};
if (default_expression)
column_defaults.emplace(column_name, ColumnDefault{default_type, default_expression});
/// Медленно, так как каждый раз копируется список
columns = *DataTypeNested::expandNestedColumns(columns);
}
else if (type == DROP)
{
/// look for a column in list and remove it if present, also removing corresponding entry from column_defaults
const auto remove_column = [&column_defaults, this] (NamesAndTypesList & columns) {
auto removed = false;
NamesAndTypesList::iterator column_it;
while (columns.end() != (column_it = std::find_if(columns.begin(), columns.end(),
std::bind(namesEqual, std::cref(column_name), std::placeholders::_1))))
{
removed = true;
column_it = columns.erase(column_it);
column_defaults.erase(column_name);
}
return removed;
};
if (!remove_column(columns) &&
!remove_column(materialized_columns) &&
!remove_column(alias_columns))
{
throw Exception("Wrong column name. Cannot find column " + column_name + " to drop",
DB::ErrorCodes::ILLEGAL_COLUMN);
}
}
else if (type == MODIFY)
{
const auto it = column_defaults.find(column_name);
const auto had_default_expr = it != column_defaults.end();
const auto old_default_type = had_default_expr ? it->second.type : ColumnDefaultType{};
/// allow conversion between DEFAULT and MATERIALIZED
const auto default_materialized_conversion =
(old_default_type == ColumnDefaultType::Default && default_type == ColumnDefaultType::Materialized) ||
(old_default_type == ColumnDefaultType::Materialized && default_type == ColumnDefaultType::Default);
if (old_default_type != default_type && !default_materialized_conversion)
throw Exception{"Cannot change column default specifier from " + toString(old_default_type) +
" to " + toString(default_type), ErrorCodes::INCORRECT_QUERY};
/// find column or throw exception
const auto find_column = [this] (NamesAndTypesList & columns) {
const auto it = std::find_if(columns.begin(), columns.end(),
std::bind(namesEqual, std::cref(column_name), std::placeholders::_1) );
if (it == columns.end())
throw Exception("Wrong column name. Cannot find column " + column_name + " to modify.",
DB::ErrorCodes::ILLEGAL_COLUMN);
return it;
};
/// remove from the old list, add to the new list in case of DEFAULT <-> MATERIALIZED alteration
if (default_materialized_conversion)
{
const auto was_default = old_default_type == ColumnDefaultType::Default;
auto & old_columns = was_default ? columns : materialized_columns;
auto & new_columns = was_default ? materialized_columns : columns;
const auto column_it = find_column(old_columns);
new_columns.emplace_back(*column_it);
old_columns.erase(column_it);
/// do not forget to change the default type of old column
if (had_default_expr)
column_defaults[column_name].type = default_type;
}
/// find column in one of three column lists
const auto column_it = find_column(
default_type == ColumnDefaultType::Default ? columns :
default_type == ColumnDefaultType::Materialized ? materialized_columns :
alias_columns);
column_it->type = data_type;
/// remove, add or update default_expression
if (!default_expression && had_default_expr)
column_defaults.erase(column_name);
else if (default_expression && !had_default_expr)
column_defaults.emplace(column_name, ColumnDefault{default_type, default_expression});
else if (had_default_expr)
column_defaults[column_name].expression = default_expression;
}
else
throw Exception("Wrong parameter type in ALTER query", ErrorCodes::LOGICAL_ERROR);
}
void AlterCommands::apply(NamesAndTypesList & columns,
NamesAndTypesList & materialized_columns,
NamesAndTypesList & alias_columns,
ColumnDefaults & column_defaults) const
{
auto new_columns = columns;
auto new_materialized_columns = materialized_columns;
auto new_alias_columns = alias_columns;
auto new_column_defaults = column_defaults;
for (const AlterCommand & command : *this)
command.apply(new_columns, new_materialized_columns, new_alias_columns, new_column_defaults);
columns = std::move(new_columns);
materialized_columns = std::move(new_materialized_columns);
alias_columns = std::move(new_alias_columns);
column_defaults = std::move(new_column_defaults);
}
void AlterCommands::validate(IStorage * table, const Context & context)
{
auto lock = table->lockDataForAlter();
auto columns = table->getColumnsList();
columns.insert(std::end(columns), std::begin(table->alias_columns), std::end(table->alias_columns));
auto defaults = table->column_defaults;
lock.reset();
std::vector<std::pair<String, AlterCommand *>> defaulted_columns{};
ASTPtr default_expr_list{new ASTExpressionList};
default_expr_list->children.reserve(defaults.size());
for (AlterCommand & command : *this)
{
if (command.type == AlterCommand::ADD || command.type == AlterCommand::MODIFY)
{
if (command.type == AlterCommand::MODIFY)
{
const auto it = std::find_if(std::begin(columns), std::end(columns),
std::bind(AlterCommand::namesEqual, std::cref(command.column_name), std::placeholders::_1));
if (it == std::end(columns))
throw Exception("Wrong column name. Cannot find column " + command.column_name + " to modify.",
DB::ErrorCodes::ILLEGAL_COLUMN);
columns.erase(it);
defaults.erase(command.column_name);
}
/// we're creating dummy DataTypeUInt8 in order to prevent the NullPointerException in ExpressionActions
columns.emplace_back(command.column_name, command.data_type ? command.data_type : new DataTypeUInt8);
if (command.default_expression)
{
if (command.data_type)
{
const auto & column_name = command.column_name;
const auto tmp_column_name = column_name + "_tmp";
const auto conversion_function_name = "to" + command.data_type->getName();
default_expr_list->children.emplace_back(setAlias(
makeASTFunction(conversion_function_name, ASTPtr{new ASTIdentifier{{}, tmp_column_name}}),
column_name));
default_expr_list->children.emplace_back(setAlias(command.default_expression->clone(), tmp_column_name));
defaulted_columns.emplace_back(command.column_name, &command);
}
else
{
default_expr_list->children.emplace_back(
setAlias(command.default_expression->clone(), command.column_name));
defaulted_columns.emplace_back(command.column_name, &command);
}
}
}
else if (command.type == AlterCommand::DROP)
{
auto found = false;
for (auto it = std::begin(columns); it != std::end(columns);)
if (AlterCommand::namesEqual(command.column_name, *it))
{
found = true;
it = columns.erase(it);
}
else
++it;
for (auto it = std::begin(defaults); it != std::end(defaults);)
if (AlterCommand::namesEqual(command.column_name, { it->first, nullptr }))
it = defaults.erase(it);
else
++it;
if (!found)
throw Exception("Wrong column name. Cannot find column " + command.column_name + " to drop.",
DB::ErrorCodes::ILLEGAL_COLUMN);
}
}
/** Existing defaulted columns may require default expression extensions with a type conversion,
* therefore we add them to defaulted_columns to allow further processing */
for (const auto & col_def : defaults)
{
const auto & column_name = col_def.first;
const auto column_it = std::find_if(columns.begin(), columns.end(), [&] (const NameAndTypePair & name_type) {
return AlterCommand::namesEqual(column_name, name_type);
});
const auto tmp_column_name = column_name + "_tmp";
const auto conversion_function_name = "to" + column_it->type->getName();
default_expr_list->children.emplace_back(setAlias(
makeASTFunction(conversion_function_name, ASTPtr{new ASTIdentifier{{}, tmp_column_name}}),
column_name));
default_expr_list->children.emplace_back(setAlias(col_def.second.expression->clone(), tmp_column_name));
defaulted_columns.emplace_back(column_name, nullptr);
}
const auto actions = ExpressionAnalyzer{default_expr_list, context, columns}.getActions(true);
const auto block = actions->getSampleBlock();
/// set deduced types, modify default expression if necessary
for (auto & defaulted_column : defaulted_columns)
{
const auto & column_name = defaulted_column.first;
const auto command_ptr = defaulted_column.second;
const auto & column = block.getByName(column_name);
/// default expression on old column
if (!command_ptr)
{
const auto & tmp_column = block.getByName(column_name + "_tmp");
// column not specified explicitly in the ALTER query may require default_expression modification
if (typeid(*column.type) != typeid(*tmp_column.type))
{
const auto it = defaults.find(column_name);
this->push_back(AlterCommand{
AlterCommand::MODIFY, column_name, column.type, it->second.type,
makeASTFunction("to" + column.type->getName(), it->second.expression),
});
}
}
else if (command_ptr && command_ptr->data_type)
{
const auto & tmp_column = block.getByName(column_name + "_tmp");
/// type mismatch between explicitly specified and deduced type, add conversion
if (typeid(*column.type) != typeid(*tmp_column.type))
{
command_ptr->default_expression = makeASTFunction(
"to" + column.type->getName(),
command_ptr->default_expression->clone());
}
}
else
{
/// just set deduced type
command_ptr->data_type = column.type;
}
}
}
}

View File

@ -10,6 +10,15 @@
namespace DB
{
NamesAndTypesList ITableDeclaration::getColumnsList() const
{
auto columns = getColumnsListImpl();
columns.insert(std::end(columns), std::begin(materialized_columns), std::end(materialized_columns));
return columns;
}
bool ITableDeclaration::hasRealColumn(const String & column_name) const
{
const NamesAndTypesList & real_columns = getColumnsList();
@ -39,13 +48,29 @@ NameAndTypePair ITableDeclaration::getRealColumn(const String & column_name) con
throw Exception("There is no column " + column_name + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
NameAndTypePair ITableDeclaration::getMaterializedColumn(const String & column_name) const
{
for (auto & column : materialized_columns)
if (column.name == column_name)
return column;
throw Exception("There is no column " + column_name + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
bool ITableDeclaration::hasMaterializedColumn(const String & column_name) const
{
for (auto & column : materialized_columns)
if (column.name == column_name)
return true;
return false;
}
bool ITableDeclaration::hasColumn(const String & column_name) const
{
return hasRealColumn(column_name); /// По умолчанию считаем, что виртуальных столбцов в сторадже нет.
}
NameAndTypePair ITableDeclaration::getColumn(const String & column_name) const
{
return getRealColumn(column_name); /// По умолчанию считаем, что виртуальных столбцов в сторадже нет.
@ -54,10 +79,9 @@ NameAndTypePair ITableDeclaration::getColumn(const String & column_name) const
const DataTypePtr ITableDeclaration::getDataTypeByName(const String & column_name) const
{
const NamesAndTypesList & names_and_types = getColumnsList();
for (NamesAndTypesList::const_iterator it = names_and_types.begin(); it != names_and_types.end(); ++it)
if (it->name == column_name)
return it->type;
for (const auto & column : getColumnsList())
if (column.name == column_name)
return column.type;
throw Exception("There is no column " + column_name + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
@ -66,16 +90,20 @@ const DataTypePtr ITableDeclaration::getDataTypeByName(const String & column_nam
Block ITableDeclaration::getSampleBlock() const
{
Block res;
const NamesAndTypesList & names_and_types = getColumnsList();
for (NamesAndTypesList::const_iterator it = names_and_types.begin(); it != names_and_types.end(); ++it)
{
ColumnWithNameAndType col;
col.name = it->name;
col.type = it->type;
col.column = col.type->createColumn();
res.insert(col);
}
for (const auto & col : getColumnsList())
res.insert({ col.type->createColumn(), col.type, col.name });
return res;
}
Block ITableDeclaration::getSampleBlockNonMaterialized() const
{
Block res;
for (const auto & col : getColumnsListNonMaterialized())
res.insert({ col.type->createColumn(), col.type, col.name });
return res;
}
@ -96,16 +124,26 @@ static std::string listOfColumns(const NamesAndTypesList & available_columns)
typedef google::dense_hash_map<StringRef, const IDataType *, StringRefHash> NamesAndTypesMap;
static NamesAndTypesMap & getColumnsMapImpl(NamesAndTypesMap & res) { return res; }
static NamesAndTypesMap getColumnsMap(const NamesAndTypesList & available_columns)
template <typename Arg, typename... Args>
static NamesAndTypesMap & getColumnsMapImpl(NamesAndTypesMap & res, const Arg & arg, const Args &... args)
{
static_assert(std::is_same<Arg, NamesAndTypesList>::value, "getColumnsMap requires arguments of type NamesAndTypesList");
for (const auto & column : arg)
res.insert({column.name, column.type.get()});
return getColumnsMapImpl(res, args...);
}
template <typename... Args>
static NamesAndTypesMap getColumnsMap(const Args &... args)
{
NamesAndTypesMap res;
res.set_empty_key(StringRef());
for (NamesAndTypesList::const_iterator it = available_columns.begin(); it != available_columns.end(); ++it)
res.insert(NamesAndTypesMap::value_type(it->name, &*it->type));
return res;
return getColumnsMapImpl(res, args...);
}
@ -117,7 +155,7 @@ void ITableDeclaration::check(const Names & column_names) const
throw Exception("Empty list of columns queried. There are columns: " + listOfColumns(available_columns),
ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED);
const NamesAndTypesMap & columns_map = getColumnsMap(available_columns);
const auto columns_map = getColumnsMap(available_columns);
typedef google::dense_hash_set<StringRef, StringRefHash> UniqueStrings;
UniqueStrings unique_names;
@ -140,7 +178,7 @@ void ITableDeclaration::check(const Names & column_names) const
void ITableDeclaration::check(const NamesAndTypesList & columns) const
{
const NamesAndTypesList & available_columns = getColumnsList();
const NamesAndTypesMap & columns_map = getColumnsMap(available_columns);
const auto columns_map = getColumnsMap(available_columns);
typedef google::dense_hash_set<StringRef, StringRefHash> UniqueStrings;
UniqueStrings unique_names;
@ -168,7 +206,7 @@ void ITableDeclaration::check(const NamesAndTypesList & columns) const
void ITableDeclaration::check(const NamesAndTypesList & columns, const Names & column_names) const
{
const NamesAndTypesList & available_columns = getColumnsList();
const NamesAndTypesMap & available_columns_map = getColumnsMap(available_columns);
const auto available_columns_map = getColumnsMap(available_columns);
const NamesAndTypesMap & provided_columns_map = getColumnsMap(columns);
if (column_names.empty())
@ -205,7 +243,7 @@ void ITableDeclaration::check(const NamesAndTypesList & columns, const Names & c
void ITableDeclaration::check(const Block & block, bool need_all) const
{
const NamesAndTypesList & available_columns = getColumnsList();
const NamesAndTypesMap & columns_map = getColumnsMap(available_columns);
const auto columns_map = getColumnsMap(available_columns);
typedef std::unordered_set<String> NameSet;
NameSet names_in_block;

View File

@ -23,6 +23,9 @@ namespace DB
MergeTreeData::MergeTreeData(
const String & full_path_, NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_, const ASTPtr & sampling_expression_,
@ -33,7 +36,7 @@ MergeTreeData::MergeTreeData(
const String & log_name_,
bool require_part_metadata_,
BrokenPartCallback broken_part_callback_)
: context(context_),
: ITableDeclaration{materialized_columns_, alias_columns_, column_defaults_}, context(context_),
date_column_name(date_column_name_), sampling_expression(sampling_expression_),
index_granularity(index_granularity_),
mode(mode_), sign_column(sign_column_),
@ -44,23 +47,27 @@ MergeTreeData::MergeTreeData(
log_name(log_name_), log(&Logger::get(log_name + " (Data)"))
{
/// Проверяем, что столбец с датой существует и имеет тип Date.
{
auto it = columns->begin();
for (; it != columns->end(); ++it)
const auto check_date_exists = [this] (const NamesAndTypesList & columns) {
for (const auto & column : columns)
{
if (it->name == date_column_name)
if (column.name == date_column_name)
{
if (!typeid_cast<const DataTypeDate *>(&*it->type))
if (!typeid_cast<const DataTypeDate *>(column.type.get()))
throw Exception("Date column (" + date_column_name + ") for storage of MergeTree family must have type Date."
" Provided column of type " + it->type->getName() + "."
" You may have separate column with type " + it->type->getName() + ".", ErrorCodes::BAD_TYPE_OF_FIELD);
break;
" Provided column of type " + column.type->getName() + "."
" You may have separate column with type " + column.type->getName() + ".", ErrorCodes::BAD_TYPE_OF_FIELD);
return true;
}
}
if (it == columns->end())
throw Exception("Date column (" + date_column_name + ") does not exist in table declaration.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
return false;
};
if (!check_date_exists(*columns) && !check_date_exists(materialized_columns))
throw Exception{
"Date column (" + date_column_name + ") does not exist in table declaration.",
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE
};
/// создаём директорию, если её нет
Poco::File(full_path).createDirectories();
@ -74,9 +81,9 @@ MergeTreeData::MergeTreeData(
sort_descr.push_back(SortColumnDescription(name, 1));
}
primary_expr = ExpressionAnalyzer(primary_expr_ast, context, *columns).getActions(false);
primary_expr = ExpressionAnalyzer(primary_expr_ast, context, getColumnsList()).getActions(false);
ExpressionActionsPtr projected_expr = ExpressionAnalyzer(primary_expr_ast, context, *columns).getActions(true);
ExpressionActionsPtr projected_expr = ExpressionAnalyzer(primary_expr_ast, context, getColumnsList()).getActions(true);
primary_key_sample = projected_expr->getSampleBlock();
}
@ -355,8 +362,11 @@ void MergeTreeData::dropAllData()
void MergeTreeData::checkAlter(const AlterCommands & params)
{
/// Проверим, что указанные преобразования можно совершить над списком столбцов без учета типов.
NamesAndTypesList new_columns = *columns;
params.apply(new_columns);
auto new_columns = *columns;
auto new_materialized_columns = materialized_columns;
auto new_alias_columns = alias_columns;
auto new_column_defaults = column_defaults;
params.apply(new_columns, new_materialized_columns, new_alias_columns, new_column_defaults);
/// Список столбцов, которые нельзя трогать.
/// sampling_expression можно не учитывать, потому что он обязан содержаться в первичном ключе.
@ -373,7 +383,11 @@ void MergeTreeData::checkAlter(const AlterCommands & params)
/// Проверим, что преобразования типов возможны.
ExpressionActionsPtr unused_expression;
NameToNameMap unused_map;
createConvertExpression(nullptr, *columns, new_columns, unused_expression, unused_map);
/// augment plain columns with materialized columns for convert expression creation
new_columns.insert(std::end(new_columns),
std::begin(new_materialized_columns), std::end(new_materialized_columns));
createConvertExpression(nullptr, getColumnsList(), new_columns, unused_expression, unused_map);
}
void MergeTreeData::createConvertExpression(const DataPartPtr & part, const NamesAndTypesList & old_columns, const NamesAndTypesList & new_columns,

View File

@ -7,6 +7,7 @@
#include <DB/DataStreams/CollapsingSortedBlockInputStream.h>
#include <DB/DataStreams/SummingSortedBlockInputStream.h>
#include <DB/DataStreams/AggregatingSortedBlockInputStream.h>
#include <DB/DataStreams/MaterializingBlockInputStream.h>
namespace DB
@ -358,7 +359,8 @@ MergeTreeData::DataPartPtr MergeTreeDataMerger::mergeParts(
__sync_add_and_fetch(&merge_entry->bytes_read_uncompressed, value.bytes);
});
src_streams.push_back(new ExpressionBlockInputStream(input.release(), data.getPrimaryExpression()));
src_streams.push_back(new MaterializingBlockInputStream{
new ExpressionBlockInputStream(input.release(), data.getPrimaryExpression())});
sum_rows_approx += parts[i]->size * data.index_granularity;
}

View File

@ -5,6 +5,8 @@
#include <DB/DataTypes/DataTypesNumberFixed.h>
#include <DB/DataTypes/DataTypeFixedString.h>
#include <DB/DataTypes/DataTypeAggregateFunction.h>
#include <DB/DataTypes/DataTypeArray.h>
#include <DB/DataTypes/DataTypeNested.h>
#include <DB/IO/CompressedReadBuffer.h>
#include <DB/IO/HashingReadBuffer.h>
#include <DB/Columns/ColumnsNumber.h>

View File

@ -425,8 +425,9 @@ void StorageBuffer::alter(const AlterCommands & params, const String & database_
/// Чтобы не осталось блоков старой структуры.
optimize();
params.apply(*columns);
InterpreterAlterQuery::updateMetadata(database_name, table_name, *columns, context);
params.apply(*columns, materialized_columns, alias_columns, column_defaults);
InterpreterAlterQuery::updateMetadata(database_name, table_name,
*columns, materialized_columns, alias_columns, column_defaults, context);
}
}

View File

@ -30,26 +30,33 @@ StoragePtr StorageChunkMerger::create(
const std::string & this_database_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const std::string & destination_name_prefix_,
size_t chunks_to_merge_,
Context & context_)
{
return (new StorageChunkMerger(this_database_, name_, columns_, source_database_, table_name_regexp_, destination_name_prefix_, chunks_to_merge_, context_))->thisPtr();
return (new StorageChunkMerger{
this_database_, name_, columns_, materialized_columns_, alias_columns_, column_defaults_,
source_database_, table_name_regexp_, destination_name_prefix_,
chunks_to_merge_, context_
})->thisPtr();
}
NameAndTypePair StorageChunkMerger::getColumn(const String & column_name) const
{
if (column_name == _table_column_name)
return NameAndTypePair(_table_column_name, new DataTypeString);
return getRealColumn(column_name);
return IStorage::getColumn(column_name);
}
bool StorageChunkMerger::hasColumn(const String & column_name) const
{
if (column_name == _table_column_name) return true;
return hasRealColumn(column_name);
return IStorage::hasColumn(column_name);
}
BlockInputStreams StorageChunkMerger::read(
@ -222,12 +229,16 @@ StorageChunkMerger::StorageChunkMerger(
const std::string & this_database_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const std::string & destination_name_prefix_,
size_t chunks_to_merge_,
Context & context_)
: this_database(this_database_), name(name_), columns(columns_), source_database(source_database_),
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
this_database(this_database_), name(name_), columns(columns_), source_database(source_database_),
table_name_regexp(table_name_regexp_), destination_name_prefix(destination_name_prefix_), chunks_to_merge(chunks_to_merge_),
context(context_), settings(context.getSettings()),
log(&Logger::get("StorageChunkMerger")), shutdown_called(false)
@ -359,7 +370,7 @@ bool StorageChunkMerger::mergeChunks(const Storages & chunks)
/// Объединим множества столбцов сливаемых чанков.
ColumnsMap known_columns_types;
for (const NameAndTypePair & column : *columns)
for (const NameAndTypePair & column : getColumnsList())
known_columns_types.insert(std::make_pair(column.name, column.type));
NamesAndTypesListPtr required_columns = new NamesAndTypesList;
@ -458,7 +469,10 @@ bool StorageChunkMerger::mergeChunks(const Storages & chunks)
processed_stage,
DEFAULT_MERGE_BLOCK_SIZE);
BlockInputStreamPtr input = new AddingDefaultBlockInputStream(new ConcatBlockInputStream(input_streams), required_columns);
BlockInputStreamPtr input{new AddingDefaultBlockInputStream{
new ConcatBlockInputStream{input_streams},
required_columns, src_storage->column_defaults, context
}};
input->readPrefix();
output->writePrefix();

View File

@ -10,16 +10,23 @@
namespace DB
{
StoragePtr StorageChunks::create(
const std::string & path_,
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
bool attach)
{
return (new StorageChunks(path_, name_, database_name_, columns_, context_, attach))->thisPtr();
return (new StorageChunks{
path_, name_, database_name_, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_, attach
})->thisPtr();
}
void StorageChunks::addReference()
@ -92,10 +99,10 @@ BlockInputStreams StorageChunks::readFromChunk(
{
size_t mark1;
size_t mark2;
{
Poco::ScopedReadRWLock lock(rwlock);
if (!chunk_indices.count(chunk_name))
throw Exception("No chunk " + chunk_name + " in table " + name, ErrorCodes::CHUNK_NOT_FOUND);
size_t index = chunk_indices[chunk_name];
@ -105,7 +112,7 @@ BlockInputStreams StorageChunks::readFromChunk(
return read(mark1, mark2, column_names, query, settings, processed_stage, max_block_size, threads);
}
BlockOutputStreamPtr StorageChunks::writeToNewChunk(
const std::string & chunk_name)
{
@ -121,19 +128,24 @@ BlockOutputStreamPtr StorageChunks::writeToNewChunk(
chunk_num_to_marks.push_back(mark);
chunk_names.push_back(chunk_name);
}
return StorageLog::write(nullptr);
}
StorageChunks::StorageChunks(
const std::string & path_,
const std::string & name_,
const std::string & database_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
bool attach)
:
StorageLog(path_, name_, columns_, context_.getSettings().max_compress_block_size),
StorageLog(path_, name_, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_.getSettings().max_compress_block_size),
database_name(database_name_),
reference_counter(path_ + escapeForFileName(name_) + "/refcount.txt"),
context(context_),
@ -173,7 +185,7 @@ bool StorageChunks::hasColumn(const String & column_name) const
{
if (column_name == _table_column_name)
return true;
return hasRealColumn(column_name);
return IStorage::hasColumn(column_name);
}
std::pair<String, size_t> StorageChunks::getTableFromMark(size_t mark) const
@ -186,15 +198,15 @@ std::pair<String, size_t> StorageChunks::getTableFromMark(size_t mark) const
last = chunk_num_to_marks[pos + 1] - 1;
return std::make_pair(chunk_names[pos], last);
}
void StorageChunks::loadIndex()
{
loadMarks();
Poco::ScopedWriteRWLock lock(rwlock);
String index_path = path + escapeForFileName(name) + "/chunks.chn";
if (!Poco::File(index_path).exists())
return;
@ -203,10 +215,10 @@ void StorageChunks::loadIndex()
{
String name;
size_t mark;
readStringBinary(name, index);
readIntBinary<UInt64>(mark, index);
chunk_indices[name] = chunk_num_to_marks.size();
chunk_num_to_marks.push_back(mark);
chunk_names.push_back(name);
@ -226,14 +238,14 @@ void StorageChunks::appendChunkToIndex(const std::string & chunk_name, size_t ma
void StorageChunks::dropThis()
{
LOG_TRACE(log, "Table " << name << " will drop itself.");
ASTDropQuery * query = new ASTDropQuery();
ASTPtr query_ptr = query;
query->detach = false;
query->if_exists = false;
query->database = database_name;
query->table = name;
InterpreterDropQuery interpreter(query_ptr, context);
interpreter.execute();
}

View File

@ -5,7 +5,6 @@
#include <DB/Storages/VirtualColumnFactory.h>
#include <DB/Storages/Distributed/DistributedBlockOutputStream.h>
#include <DB/Storages/Distributed/DirectoryMonitor.h>
#include <DB/Storages/Distributed/queryToString.h>
#include <DB/Common/escapeForFileName.h>
#include <DB/Interpreters/InterpreterSelectQuery.h>
@ -70,9 +69,36 @@ StorageDistributed::StorageDistributed(
createDirectoryMonitors();
}
StorageDistributed::StorageDistributed(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & remote_database_,
const String & remote_table_,
Cluster & cluster_,
Context & context_,
const ASTPtr & sharding_key_,
const String & data_path_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
name(name_), columns(columns_),
remote_database(remote_database_), remote_table(remote_table_),
context(context_), cluster(cluster_),
sharding_key_expr(sharding_key_ ? ExpressionAnalyzer(sharding_key_, context, *columns).getActions(false) : nullptr),
sharding_key_column_name(sharding_key_ ? sharding_key_->getColumnName() : String{}),
write_enabled(cluster.getLocalNodesNum() + cluster.pools.size() < 2 || sharding_key_),
path(data_path_ + escapeForFileName(name) + '/')
{
createDirectoryMonitors();
}
StoragePtr StorageDistributed::create(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & remote_database_,
const String & remote_table_,
const String & cluster_name,
@ -83,7 +109,9 @@ StoragePtr StorageDistributed::create(
context_.initClusters();
return (new StorageDistributed{
name_, columns_, remote_database_, remote_table_,
name_, columns_,
materialized_columns_, alias_columns_, column_defaults_,
remote_database_, remote_table_,
context_.getCluster(cluster_name), context_,
sharding_key_, data_path_
})->thisPtr();
@ -100,7 +128,8 @@ StoragePtr StorageDistributed::create(
{
auto res = new StorageDistributed{
name_, columns_, remote_database_,
remote_table_, *owned_cluster_, context_};
remote_table_, *owned_cluster_, context_
};
/// Захватываем владение объектом-кластером.
res->owned_cluster = owned_cluster_;
@ -174,8 +203,9 @@ BlockOutputStreamPtr StorageDistributed::write(ASTPtr query)
void StorageDistributed::alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context)
{
auto lock = lockStructureForAlter();
params.apply(*columns);
InterpreterAlterQuery::updateMetadata(database_name, table_name, *columns, context);
params.apply(*columns, materialized_columns, alias_columns, column_defaults);
InterpreterAlterQuery::updateMetadata(database_name, table_name,
*columns, materialized_columns, alias_columns, column_defaults, context);
}
void StorageDistributed::shutdown()
@ -193,7 +223,7 @@ NameAndTypePair StorageDistributed::getColumn(const String & column_name) const
bool StorageDistributed::hasColumn(const String & column_name) const
{
return VirtualColumnFactory::hasColumn(column_name) || hasRealColumn(column_name);
return VirtualColumnFactory::hasColumn(column_name) || IStorage::hasColumn(column_name);
}
void StorageDistributed::createDirectoryMonitor(const std::string & name)

View File

@ -73,15 +73,24 @@ StoragePtr StorageFactory::get(
Context & context,
ASTPtr & query,
NamesAndTypesListPtr columns,
const NamesAndTypesList & materialized_columns,
const NamesAndTypesList & alias_columns,
const ColumnDefaults & column_defaults,
bool attach) const
{
if (name == "Log")
{
return StorageLog::create(data_path, table_name, columns, context.getSettings().max_compress_block_size);
return StorageLog::create(
data_path, table_name, columns,
materialized_columns, alias_columns, column_defaults,
context.getSettings().max_compress_block_size);
}
else if (name == "Chunks")
{
return StorageChunks::create(data_path, table_name, database_name, columns, context, attach);
return StorageChunks::create(
data_path, table_name, database_name, columns,
materialized_columns, alias_columns, column_defaults,
context, attach);
}
else if (name == "ChunkRef")
{
@ -89,11 +98,16 @@ StoragePtr StorageFactory::get(
}
else if (name == "View")
{
return StorageView::create(table_name, database_name, context, query, columns);
return StorageView::create(
table_name, database_name, context, query, columns,
materialized_columns, alias_columns, column_defaults);
}
else if (name == "MaterializedView")
{
return StorageMaterializedView::create(table_name, database_name, context, query, columns, attach);
return StorageMaterializedView::create(
table_name, database_name, context, query, columns,
materialized_columns, alias_columns, column_defaults,
attach);
}
else if (name == "ChunkMerger")
{
@ -119,8 +133,12 @@ StoragePtr StorageFactory::get(
if (args.size() > 3)
destination_name_prefix = typeid_cast<ASTIdentifier &>(*args[3]).name;
return StorageChunkMerger::create(database_name, table_name, columns, source_database, source_table_name_regexp, destination_name_prefix, chunks_to_merge, context);
} while(false);
return StorageChunkMerger::create(
database_name, table_name, columns,
materialized_columns, alias_columns, column_defaults,
source_database, source_table_name_regexp,
destination_name_prefix, chunks_to_merge, context);
} while (false);
throw Exception("Storage ChunkMerger requires from 3 to 4 parameters:"
" source database, regexp for source table names, number of chunks to merge, [destination tables name prefix].",
@ -128,15 +146,18 @@ StoragePtr StorageFactory::get(
}
else if (name == "TinyLog")
{
return StorageTinyLog::create(data_path, table_name, columns, attach, context.getSettings().max_compress_block_size);
return StorageTinyLog::create(
data_path, table_name, columns,
materialized_columns, alias_columns, column_defaults,
attach, context.getSettings().max_compress_block_size);
}
else if (name == "Memory")
{
return StorageMemory::create(table_name, columns);
return StorageMemory::create(table_name, columns, materialized_columns, alias_columns, column_defaults);
}
else if (name == "Null")
{
return StorageNull::create(table_name, columns);
return StorageNull::create(table_name, columns, materialized_columns, alias_columns, column_defaults);
}
else if (name == "Merge")
{
@ -160,7 +181,10 @@ StoragePtr StorageFactory::get(
String source_database = reinterpretAsIdentifier(args[0], local_context).name;
String table_name_regexp = safeGet<const String &>(typeid_cast<ASTLiteral &>(*args[1]).value);
return StorageMerge::create(table_name, columns, source_database, table_name_regexp, context);
return StorageMerge::create(
table_name, columns,
materialized_columns, alias_columns, column_defaults,
source_database, table_name_regexp, context);
}
else if (name == "Distributed")
{
@ -189,7 +213,10 @@ StoragePtr StorageFactory::get(
const auto & sharding_key = args.size() == 4 ? args[3] : nullptr;
return StorageDistributed::create(
table_name, columns, remote_database, remote_table, cluster_name, context, sharding_key, data_path);
table_name, columns,
materialized_columns, alias_columns, column_defaults,
remote_database, remote_table, cluster_name,
context, sharding_key, data_path);
}
else if (name == "Buffer")
{
@ -343,12 +370,16 @@ StoragePtr StorageFactory::get(
throw Exception("Index granularity must be a positive integer", ErrorCodes::BAD_ARGUMENTS);
if (replicated)
return StorageReplicatedMergeTree::create(zookeeper_path, replica_name, attach, data_path, database_name, table_name,
columns, context, primary_expr_list, date_column_name,
return StorageReplicatedMergeTree::create(
zookeeper_path, replica_name, attach, data_path, database_name, table_name,
columns, materialized_columns, alias_columns, column_defaults,
context, primary_expr_list, date_column_name,
sampling_expression, index_granularity, mode, sign_column_name);
else
return StorageMergeTree::create(data_path, database_name, table_name,
columns, context, primary_expr_list, date_column_name,
return StorageMergeTree::create(
data_path, database_name, table_name,
columns, materialized_columns, alias_columns, column_defaults,
context, primary_expr_list, date_column_name,
sampling_expression, index_granularity, mode, sign_column_name);
}
else

View File

@ -250,8 +250,8 @@ LogBlockOutputStream::LogBlockOutputStream(StorageLog & storage_)
: storage(storage_),
lock(storage.rwlock), marks_stream(storage.marks_file.path(), 4096, O_APPEND | O_CREAT | O_WRONLY)
{
for (NamesAndTypesList::const_iterator it = storage.columns->begin(); it != storage.columns->end(); ++it)
addStream(it->name, *it->type);
for (const auto & column : storage.getColumnsList())
addStream(column.name, *column.type);
}
@ -409,8 +409,17 @@ void LogBlockOutputStream::writeMarks(MarksForColumns marks)
}
StorageLog::StorageLog(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, size_t max_compress_block_size_)
: path(path_), name(name_), columns(columns_), loaded_marks(false), max_compress_block_size(max_compress_block_size_),
StorageLog::StorageLog(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
size_t max_compress_block_size_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
path(path_), name(name_), columns(columns_),
loaded_marks(false), max_compress_block_size(max_compress_block_size_),
file_checker(path + escapeForFileName(name) + '/' + "sizes.json", *this)
{
if (columns->empty())
@ -419,15 +428,39 @@ StorageLog::StorageLog(const std::string & path_, const std::string & name_, Nam
/// создаём файлы, если их нет
Poco::File(path + escapeForFileName(name) + '/').createDirectories();
for (NamesAndTypesList::const_iterator it = columns->begin(); it != columns->end(); ++it)
addFile(it->name, *it->type);
for (const auto & column : getColumnsList())
addFile(column.name, *column.type);
marks_file = Poco::File(path + escapeForFileName(name) + '/' + DBMS_STORAGE_LOG_MARKS_FILE_NAME);
}
StoragePtr StorageLog::create(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, size_t max_compress_block_size_)
StoragePtr StorageLog::create(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
size_t max_compress_block_size_)
{
return (new StorageLog(path_, name_, columns_, max_compress_block_size_))->thisPtr();
return (new StorageLog{
path_, name_, columns_,
materialized_columns_, alias_columns_, column_defaults_,
max_compress_block_size_
})->thisPtr();
}
StoragePtr StorageLog::create(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
size_t max_compress_block_size_)
{
return (new StorageLog{
path_, name_, columns_,
{}, {}, {},
max_compress_block_size_
})->thisPtr();
}

View File

@ -11,15 +11,35 @@
namespace DB
{
StoragePtr StorageMaterializedView::create(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_, bool attach_)
StoragePtr StorageMaterializedView::create(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach_)
{
return (new StorageMaterializedView(table_name_, database_name_, context_, query_, columns_, attach_))->thisPtr();
return (new StorageMaterializedView{
table_name_, database_name_, context_, query_,
columns_, materialized_columns_, alias_columns_, column_defaults_,
attach_
})->thisPtr();
}
StorageMaterializedView::StorageMaterializedView(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_, bool attach_):
StorageView(table_name_, database_name_, context_, query_, columns_)
StorageMaterializedView::StorageMaterializedView(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach_)
: StorageView{table_name_, database_name_, context_, query_, columns_, materialized_columns_, alias_columns_, column_defaults_}
{
ASTCreateQuery & create = typeid_cast<ASTCreateQuery &>(*query_);
@ -73,7 +93,7 @@ NameAndTypePair StorageMaterializedView::getColumn(const String & column_name) c
bool StorageMaterializedView::hasColumn(const String & column_name) const
{
return VirtualColumnFactory::hasColumn(column_name) || hasRealColumn(column_name);
return VirtualColumnFactory::hasColumn(column_name) || IStorage::hasColumn(column_name);
}
BlockInputStreams StorageMaterializedView::read(

View File

@ -53,14 +53,46 @@ void MemoryBlockOutputStream::write(const Block & block)
}
StorageMemory::StorageMemory(const std::string & name_, NamesAndTypesListPtr columns_)
StorageMemory::StorageMemory(
const std::string & name_,
NamesAndTypesListPtr columns_)
: name(name_), columns(columns_)
{
}
StoragePtr StorageMemory::create(const std::string & name_, NamesAndTypesListPtr columns_)
StorageMemory::StorageMemory(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
name(name_), columns(columns_)
{
return (new StorageMemory(name_, columns_))->thisPtr();
}
StoragePtr StorageMemory::create(
const std::string & name_,
NamesAndTypesListPtr columns_)
{
return (new StorageMemory{
name_, columns_
})->thisPtr();
}
StoragePtr StorageMemory::create(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
{
return (new StorageMemory{
name_, columns_,
materialized_columns_, alias_columns_, column_defaults_
})->thisPtr();
}
@ -78,7 +110,7 @@ BlockInputStreams StorageMemory::read(
Poco::ScopedLock<Poco::FastMutex> lock(mutex);
size_t size = data.size();
if (threads > size)
threads = size;
@ -91,14 +123,14 @@ BlockInputStreams StorageMemory::read(
std::advance(begin, thread * size / threads);
std::advance(end, (thread + 1) * size / threads);
res.push_back(new MemoryBlockInputStream(column_names, begin, end));
}
return res;
}
BlockOutputStreamPtr StorageMemory::write(
ASTPtr query)
{

View File

@ -13,7 +13,23 @@ StorageMerge::StorageMerge(
const String & source_database_,
const String & table_name_regexp_,
const Context & context_)
: name(name_), columns(columns_), source_database(source_database_), table_name_regexp(table_name_regexp_), context(context_)
: name(name_), columns(columns_), source_database(source_database_),
table_name_regexp(table_name_regexp_), context(context_)
{
}
StorageMerge::StorageMerge(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const Context & context_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
name(name_), columns(columns_), source_database(source_database_),
table_name_regexp(table_name_regexp_), context(context_)
{
}
@ -24,7 +40,26 @@ StoragePtr StorageMerge::create(
const String & table_name_regexp_,
const Context & context_)
{
return (new StorageMerge(name_, columns_, source_database_, table_name_regexp_, context_))->thisPtr();
return (new StorageMerge{
name_, columns_,
source_database_, table_name_regexp_, context_
})->thisPtr();
}
StoragePtr StorageMerge::create(
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
const String & source_database_,
const String & table_name_regexp_,
const Context & context_)
{
return (new StorageMerge{
name_, columns_, materialized_columns_, alias_columns_, column_defaults_,
source_database_, table_name_regexp_, context_
})->thisPtr();
}
NameAndTypePair StorageMerge::getColumn(const String & column_name) const
@ -33,12 +68,12 @@ NameAndTypePair StorageMerge::getColumn(const String & column_name) const
if (type)
return NameAndTypePair(column_name, type);
return getRealColumn(column_name);
return IStorage::getColumn(column_name);
}
bool StorageMerge::hasColumn(const String & column_name) const
{
return VirtualColumnFactory::hasColumn(column_name) || hasRealColumn(column_name);
return VirtualColumnFactory::hasColumn(column_name) || IStorage::hasColumn(column_name);
}
BlockInputStreams StorageMerge::read(
@ -168,9 +203,9 @@ void StorageMerge::getSelectedTables(StorageVector & selected_tables) const
void StorageMerge::alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context)
{
auto lock = lockStructureForAlter();
params.apply(*columns);
InterpreterAlterQuery::updateMetadata(database_name, table_name, *columns, context);
params.apply(*columns, materialized_columns, alias_columns, column_defaults);
InterpreterAlterQuery::updateMetadata(database_name, table_name, *columns,
materialized_columns, alias_columns, column_defaults, context);
}
}

View File

@ -7,20 +7,30 @@
namespace DB
{
StorageMergeTree::StorageMergeTree(const String & path_, const String & database_name_, const String & table_name_,
NamesAndTypesListPtr columns_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_)
: path(path_), database_name(database_name_), table_name(table_name_), full_path(path + escapeForFileName(table_name) + '/'),
StorageMergeTree::StorageMergeTree(
const String & path_,
const String & database_name_,
const String & table_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_, /// nullptr, если семплирование не поддерживается.
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
path(path_), database_name(database_name_), table_name(table_name_), full_path(path + escapeForFileName(table_name) + '/'),
increment(full_path + "increment.txt"), context(context_), background_pool(context_.getBackgroundPool()),
data(full_path, columns_, context_, primary_expr_ast_, date_column_name_, sampling_expression_,
index_granularity_,mode_, sign_column_, settings_, database_name_ + "." + table_name, false),
data(full_path, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_,mode_, sign_column_,
settings_, database_name_ + "." + table_name, false),
reader(data), writer(data), merger(data),
log(&Logger::get(database_name_ + "." + table_name + " (StorageMergeTree)")),
shutdown_called(false)
@ -31,6 +41,34 @@ StorageMergeTree::StorageMergeTree(const String & path_, const String & database
data.clearOldParts();
}
StoragePtr StorageMergeTree::create(
const String & path_, const String & database_name_, const String & table_name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
const ASTPtr & sampling_expression_,
size_t index_granularity_,
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_)
{
auto res = new StorageMergeTree{
path_, database_name_, table_name_,
columns_, materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_, mode_, sign_column_, settings_
};
StoragePtr res_ptr = res->thisPtr();
res->merge_task_handle = res->background_pool.addTask(std::bind(&StorageMergeTree::mergeTask, res, std::placeholders::_1));
return res_ptr;
}
StoragePtr StorageMergeTree::create(
const String & path_, const String & database_name_, const String & table_name_,
NamesAndTypesListPtr columns_,
@ -43,14 +81,10 @@ StoragePtr StorageMergeTree::create(
const String & sign_column_,
const MergeTreeSettings & settings_)
{
StorageMergeTree * res = new StorageMergeTree(
path_, database_name_, table_name_, columns_, context_, primary_expr_ast_, date_column_name_,
return create(path_, database_name_, table_name_,
columns_, {}, {}, {},
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_, mode_, sign_column_, settings_);
StoragePtr res_ptr = res->thisPtr();
res->merge_task_handle = res->background_pool.addTask(std::bind(&StorageMergeTree::mergeTask, res, std::placeholders::_1));
return res_ptr;
}
void StorageMergeTree::shutdown()
@ -113,22 +147,38 @@ void StorageMergeTree::alter(const AlterCommands & params, const String & databa
data.checkAlter(params);
NamesAndTypesList new_columns = data.getColumnsList();
params.apply(new_columns);
auto new_columns = data.getColumnsListNonMaterialized();
auto new_materialized_columns = data.materialized_columns;
auto new_alias_columns = data.alias_columns;
auto new_column_defaults = data.column_defaults;
params.apply(new_columns, new_materialized_columns, new_alias_columns, new_column_defaults);
auto columns_for_parts = new_columns;
columns_for_parts.insert(std::end(columns_for_parts),
std::begin(new_materialized_columns), std::end(new_materialized_columns));
MergeTreeData::DataParts parts = data.getDataParts();
std::vector<MergeTreeData::AlterDataPartTransactionPtr> transactions;
for (const MergeTreeData::DataPartPtr & part : parts)
{
auto transaction = data.alterDataPart(part, new_columns);
if (transaction)
if (auto transaction = data.alterDataPart(part, columns_for_parts))
transactions.push_back(std::move(transaction));
}
auto table_hard_lock = lockStructureForAlter();
InterpreterAlterQuery::updateMetadata(database_name, table_name, new_columns, context);
InterpreterAlterQuery::updateMetadata(database_name, table_name, new_columns,
new_materialized_columns, new_alias_columns, new_column_defaults, context);
materialized_columns = new_materialized_columns;
alias_columns = new_alias_columns;
column_defaults = new_column_defaults;
data.setColumnsList(new_columns);
data.materialized_columns = std::move(new_materialized_columns);
data.alias_columns = std::move(new_alias_columns);
data.column_defaults = std::move(new_column_defaults);
for (auto & transaction : transactions)
{

View File

@ -26,6 +26,9 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
bool attach,
const String & path_, const String & database_name_, const String & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
@ -34,14 +37,17 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
MergeTreeData::Mode mode_,
const String & sign_column_,
const MergeTreeSettings & settings_)
:
context(context_), zookeeper(context.getZooKeeper()), database_name(database_name_),
: IStorage{materialized_columns_, alias_columns_, column_defaults_}, context(context_),
zookeeper(context.getZooKeeper()), database_name(database_name_),
table_name(name_), full_path(path_ + escapeForFileName(table_name) + '/'),
zookeeper_path(context.getMacros().expand(zookeeper_path_)),
replica_name(context.getMacros().expand(replica_name_)),
data( full_path, columns_, context_, primary_expr_ast_, date_column_name_, sampling_expression_,
index_granularity_, mode_, sign_column_, settings_, database_name + "." + table_name, true,
std::bind(&StorageReplicatedMergeTree::enqueuePartForCheck, this, std::placeholders::_1)),
data(full_path, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_, mode_, sign_column_,
settings_, database_name_ + "." + table_name, true,
std::bind(&StorageReplicatedMergeTree::enqueuePartForCheck, this, std::placeholders::_1)),
reader(data), writer(data), merger(data), fetcher(data),
log(&Logger::get(database_name + "." + table_name + " (StorageReplicatedMergeTree)")),
shutdown_event(false)
@ -106,7 +112,9 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
{
LOG_INFO(log, "Have unreplicated data");
unreplicated_data.reset(new MergeTreeData(unreplicated_path, columns_, context_, primary_expr_ast_,
unreplicated_data.reset(new MergeTreeData(unreplicated_path, columns_,
materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_,
date_column_name_, sampling_expression_, index_granularity_, mode_, sign_column_, settings_,
database_name_ + "." + table_name + "[unreplicated]", false));
@ -129,6 +137,9 @@ StoragePtr StorageReplicatedMergeTree::create(
bool attach,
const String & path_, const String & database_name_, const String & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
Context & context_,
ASTPtr & primary_expr_ast_,
const String & date_column_name_,
@ -138,10 +149,14 @@ StoragePtr StorageReplicatedMergeTree::create(
const String & sign_column_,
const MergeTreeSettings & settings_)
{
StorageReplicatedMergeTree * res = new StorageReplicatedMergeTree(zookeeper_path_, replica_name_, attach,
path_, database_name_, name_, columns_, context_, primary_expr_ast_, date_column_name_, sampling_expression_,
index_granularity_, mode_, sign_column_, settings_);
auto res = new StorageReplicatedMergeTree{
zookeeper_path_, replica_name_, attach,
path_, database_name_, name_,
columns_, materialized_columns_, alias_columns_, column_defaults_,
context_, primary_expr_ast_, date_column_name_,
sampling_expression_, index_granularity_, mode_,
sign_column_, settings_
};
StoragePtr res_ptr = res->thisPtr();
if (!res->is_read_only)
@ -189,7 +204,9 @@ void StorageReplicatedMergeTree::createTableIfNotExists()
zookeeper->getDefaultACL(), zkutil::CreateMode::Persistent));
ops.push_back(new zkutil::Op::Create(zookeeper_path + "/metadata", metadata.str(),
zookeeper->getDefaultACL(), zkutil::CreateMode::Persistent));
ops.push_back(new zkutil::Op::Create(zookeeper_path + "/columns", data.getColumnsList().toString(),
ops.push_back(new zkutil::Op::Create(zookeeper_path + "/columns", ColumnsDescription<false>{
data.getColumnsListNonMaterialized(), data.materialized_columns,
data.alias_columns, data.column_defaults}.toString(),
zookeeper->getDefaultACL(), zkutil::CreateMode::Persistent));
ops.push_back(new zkutil::Op::Create(zookeeper_path + "/log", "",
zookeeper->getDefaultACL(), zkutil::CreateMode::Persistent));
@ -238,17 +255,34 @@ void StorageReplicatedMergeTree::checkTableStructure(bool skip_sanity_checks, bo
assertEOF(buf);
zkutil::Stat stat;
auto columns = NamesAndTypesList::parse(zookeeper->get(zookeeper_path + "/columns", &stat), context.getDataTypeFactory());
auto columns_desc = ColumnsDescription<true>::parse(
zookeeper->get(zookeeper_path + "/columns", &stat), context.getDataTypeFactory());
auto & columns = columns_desc.columns;
auto & materialized_columns = columns_desc.materialized;
auto & alias_columns = columns_desc.alias;
auto & column_defaults = columns_desc.defaults;
columns_version = stat.version;
if (columns != data.getColumnsList())
if (columns != data.getColumnsListNonMaterialized() ||
materialized_columns != data.materialized_columns ||
alias_columns != data.alias_columns ||
column_defaults != data.column_defaults)
{
if (allow_alter && (data.getColumnsList().sizeOfDifference(columns) <= 2 || skip_sanity_checks))
if (allow_alter &&
(skip_sanity_checks ||
data.getColumnsListNonMaterialized().sizeOfDifference(columns) +
data.materialized_columns.sizeOfDifference(materialized_columns) <= 2))
{
LOG_WARNING(log, "Table structure in ZooKeeper is a little different from local table structure. Assuming ALTER.");
/// Без всяких блокировок, потому что таблица еще не создана.
InterpreterAlterQuery::updateMetadata(database_name, table_name, columns, context);
InterpreterAlterQuery::updateMetadata(database_name, table_name, columns,
materialized_columns, alias_columns, column_defaults, context);
data.setColumnsList(columns);
data.materialized_columns = std::move(materialized_columns);
data.alias_columns = std::move(alias_columns);
data.column_defaults = std::move(column_defaults);
}
else
{
@ -384,7 +418,12 @@ void StorageReplicatedMergeTree::createReplica()
LOG_DEBUG(log, "Copied " << source_queue.size() << " queue entries");
}
zookeeper->create(replica_path + "/columns", data.getColumnsList().toString(), zkutil::CreateMode::Persistent);
zookeeper->create(replica_path + "/columns", ColumnsDescription<false>{
data.getColumnsListNonMaterialized(),
data.materialized_columns,
data.alias_columns,
data.column_defaults
}.toString(), zkutil::CreateMode::Persistent);
}
@ -1405,8 +1444,13 @@ void StorageReplicatedMergeTree::alterThread()
*/
zkutil::Stat stat;
String columns_str = zookeeper->get(zookeeper_path + "/columns", &stat, alter_thread_event);
NamesAndTypesList columns = NamesAndTypesList::parse(columns_str, context.getDataTypeFactory());
const String columns_str = zookeeper->get(zookeeper_path + "/columns", &stat, alter_thread_event);
auto columns_desc = ColumnsDescription<true>::parse(columns_str, context.getDataTypeFactory());
auto & columns = columns_desc.columns;
auto & materialized_columns = columns_desc.materialized;
auto & alias_columns = columns_desc.alias;
auto & column_defaults = columns_desc.defaults;
bool changed_version = (stat.version != columns_version);
@ -1417,14 +1461,45 @@ void StorageReplicatedMergeTree::alterThread()
{
auto table_lock = lockStructureForAlter();
if (columns != data.getColumnsList())
const auto columns_changed = columns != data.getColumnsListNonMaterialized();
const auto materialized_columns_changed = materialized_columns != data.materialized_columns;
const auto alias_columns_changed = alias_columns != data.alias_columns;
const auto column_defaults_changed = column_defaults != data.column_defaults;
if (columns_changed || materialized_columns_changed || alias_columns_changed ||
column_defaults_changed)
{
LOG_INFO(log, "Columns list changed in ZooKeeper. Applying changes locally.");
InterpreterAlterQuery::updateMetadata(database_name, table_name, columns, context);
data.setColumnsList(columns);
if (unreplicated_data)
unreplicated_data->setColumnsList(columns);
InterpreterAlterQuery::updateMetadata(database_name, table_name, columns,
materialized_columns, alias_columns, column_defaults, context);
if (columns_changed)
{
data.setColumnsList(columns);
if (unreplicated_data)
unreplicated_data->setColumnsList(columns);
}
if (materialized_columns_changed)
{
this->materialized_columns = materialized_columns;
data.materialized_columns = std::move(materialized_columns);
}
if (alias_columns_changed)
{
this->alias_columns = alias_columns;
data.alias_columns = std::move(alias_columns);
}
if (column_defaults_changed)
{
this->column_defaults = column_defaults;
data.column_defaults = std::move(column_defaults);
}
LOG_INFO(log, "Applied changes to table.");
}
else
@ -1451,12 +1526,14 @@ void StorageReplicatedMergeTree::alterThread()
if (!changed_version)
parts = data.getDataParts();
const auto columns_plus_materialized = data.getColumnsList();
for (const MergeTreeData::DataPartPtr & part : parts)
{
/// Обновим кусок и запишем результат во временные файлы.
/// TODO: Можно пропускать проверку на слишком большие изменения, если в ZooKeeper есть, например,
/// нода /flags/force_alter.
auto transaction = data.alterDataPart(part, columns);
auto transaction = data.alterDataPart(part, columns_plus_materialized);
if (!transaction)
continue;
@ -1480,7 +1557,7 @@ void StorageReplicatedMergeTree::alterThread()
for (const MergeTreeData::DataPartPtr & part : parts)
{
auto transaction = unreplicated_data->alterDataPart(part, columns);
auto transaction = unreplicated_data->alterDataPart(part, columns_plus_materialized);
if (!transaction)
continue;
@ -1492,7 +1569,7 @@ void StorageReplicatedMergeTree::alterThread()
}
/// Список столбцов для конкретной реплики.
zookeeper->set(replica_path + "/columns", columns.toString());
zookeeper->set(replica_path + "/columns", columns_str);
if (changed_version)
{
@ -1990,6 +2067,9 @@ void StorageReplicatedMergeTree::alter(const AlterCommands & params,
LOG_DEBUG(log, "Doing ALTER");
NamesAndTypesList new_columns;
NamesAndTypesList new_materialized_columns;
NamesAndTypesList new_alias_columns;
ColumnDefaults new_column_defaults;
String new_columns_str;
int new_columns_version;
zkutil::Stat stat;
@ -2002,10 +2082,16 @@ void StorageReplicatedMergeTree::alter(const AlterCommands & params,
data.checkAlter(params);
new_columns = data.getColumnsList();
params.apply(new_columns);
new_columns = data.getColumnsListNonMaterialized();
new_materialized_columns = data.materialized_columns;
new_alias_columns = data.alias_columns;
new_column_defaults = data.column_defaults;
params.apply(new_columns, new_materialized_columns, new_alias_columns, new_column_defaults);
new_columns_str = new_columns.toString();
new_columns_str = ColumnsDescription<false>{
new_columns, new_materialized_columns,
new_alias_columns, new_column_defaults
}.toString();
/// Делаем ALTER.
zookeeper->set(zookeeper_path + "/columns", new_columns_str, -1, &stat);

View File

@ -28,7 +28,7 @@ BlockInputStreams StorageSystemDatabases::read(
processed_stage = QueryProcessingStage::FetchColumns;
Block block;
ColumnWithNameAndType col_name;
col_name.name = "name";
col_name.type = new DataTypeString;

View File

@ -30,7 +30,7 @@ BlockInputStreams StorageSystemTables::read(
processed_stage = QueryProcessingStage::FetchColumns;
Block block;
ColumnWithNameAndType col_db;
col_db.name = "database";
col_db.type = new DataTypeString;
@ -50,7 +50,7 @@ BlockInputStreams StorageSystemTables::read(
block.insert(col_engine);
Poco::ScopedLock<Poco::Mutex> lock(context.getMutex());
for (Databases::const_iterator it = context.getDatabases().begin(); it != context.getDatabases().end(); ++it)
{
for (Tables::const_iterator jt = it->second.begin(); jt != it->second.end(); ++jt)
@ -60,7 +60,7 @@ BlockInputStreams StorageSystemTables::read(
col_engine.column->insert(jt->second->getName());
}
}
return BlockInputStreams(1, new OneBlockInputStream(block));
}

View File

@ -194,8 +194,8 @@ void TinyLogBlockInputStream::readData(const String & name, const IDataType & ty
TinyLogBlockOutputStream::TinyLogBlockOutputStream(StorageTinyLog & storage_)
: storage(storage_)
{
for (NamesAndTypesList::const_iterator it = storage.columns->begin(); it != storage.columns->end(); ++it)
addStream(it->name, *it->type);
for (const auto & col : storage.getColumnsList())
addStream(col.name, *col.type);
}
@ -297,11 +297,20 @@ void TinyLogBlockOutputStream::write(const Block & block)
}
StorageTinyLog::StorageTinyLog(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, bool attach, size_t max_compress_block_size_)
: path(path_), name(name_), columns(columns_),
max_compress_block_size(max_compress_block_size_),
file_checker(path + escapeForFileName(name) + '/' + "sizes.json", *this),
log(&Logger::get("StorageTinyLog"))
StorageTinyLog::StorageTinyLog(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach,
size_t max_compress_block_size_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
path(path_), name(name_), columns(columns_),
max_compress_block_size(max_compress_block_size_),
file_checker(path + escapeForFileName(name) + '/' + "sizes.json", *this),
log(&Logger::get("StorageTinyLog"))
{
if (columns->empty())
throw Exception("Empty list of columns passed to StorageTinyLog constructor", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
@ -314,13 +323,25 @@ StorageTinyLog::StorageTinyLog(const std::string & path_, const std::string & na
throwFromErrno("Cannot create directory " + full_path, ErrorCodes::CANNOT_CREATE_DIRECTORY);
}
for (NamesAndTypesList::const_iterator it = columns->begin(); it != columns->end(); ++it)
addFile(it->name, *it->type);
for (const auto & col : getColumnsList())
addFile(col.name, *col.type);
}
StoragePtr StorageTinyLog::create(const std::string & path_, const std::string & name_, NamesAndTypesListPtr columns_, bool attach, size_t max_compress_block_size_)
StoragePtr StorageTinyLog::create(
const std::string & path_,
const std::string & name_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_,
bool attach,
size_t max_compress_block_size_)
{
return (new StorageTinyLog(path_, name_, columns_, attach, max_compress_block_size_))->thisPtr();
return (new StorageTinyLog{
path_, name_, columns_,
materialized_columns_, alias_columns_, column_defaults_,
attach, max_compress_block_size_
})->thisPtr();
}

View File

@ -10,16 +10,34 @@ namespace DB
{
StoragePtr StorageView::create(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_)
StoragePtr StorageView::create(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
{
return (new StorageView(table_name_, database_name_, context_, query_, columns_))->thisPtr();
return (new StorageView{
table_name_, database_name_, context_, query_,
columns_, materialized_columns_, alias_columns_, column_defaults_
})->thisPtr();
}
StorageView::StorageView(const String & table_name_, const String & database_name_,
Context & context_, ASTPtr & query_, NamesAndTypesListPtr columns_):
table_name(table_name_), database_name(database_name_), context(context_), columns(columns_)
StorageView::StorageView(
const String & table_name_,
const String & database_name_,
Context & context_,
ASTPtr & query_,
NamesAndTypesListPtr columns_,
const NamesAndTypesList & materialized_columns_,
const NamesAndTypesList & alias_columns_,
const ColumnDefaults & column_defaults_)
: IStorage{materialized_columns_, alias_columns_, column_defaults_}, table_name(table_name_),
database_name(database_name_), context(context_), columns(columns_)
{
ASTCreateQuery & create = typeid_cast<ASTCreateQuery &>(*query_);
ASTSelectQuery & select = typeid_cast<ASTSelectQuery &>(*create.select);

View File

@ -1,15 +1,15 @@
CounterID UInt32
StartDate Date
UserID UInt32
VisitID UInt32
NestedColumn.A Array(UInt8)
NestedColumn.S Array(String)
ToDrop UInt32
Added0 UInt32
Added1 UInt32
Added2 UInt32
AddedNested1.A Array(UInt32)
AddedNested1.B Array(UInt64)
AddedNested1.C Array(String)
AddedNested2.A Array(UInt32)
AddedNested2.B Array(UInt64)
CounterID UInt32
StartDate Date
UserID UInt32
VisitID UInt32
NestedColumn.A Array(UInt8)
NestedColumn.S Array(String)
ToDrop UInt32
Added0 UInt32
Added1 UInt32
Added2 UInt32
AddedNested1.A Array(UInt32)
AddedNested1.B Array(UInt64)
AddedNested1.C Array(String)
AddedNested2.A Array(UInt32)
AddedNested2.B Array(UInt64)

View File

@ -1,11 +1,11 @@
CounterID UInt32
StartDate Date
UserID UInt32
VisitID UInt32
Added0 String
Added1 UInt32
Added2 UInt32
AddedNested1.A Array(UInt32)
AddedNested1.C Array(String)
AddedNested2.A Array(UInt32)
AddedNested2.B Array(UInt64)
CounterID UInt32
StartDate Date
UserID UInt32
VisitID UInt32
Added0 String
Added1 UInt32
Added2 UInt32
AddedNested1.A Array(UInt32)
AddedNested1.C Array(String)
AddedNested2.A Array(UInt32)
AddedNested2.B Array(UInt64)

View File

@ -1,72 +1,72 @@
d Date
k UInt64
i32 Int32
d Date
k UInt64
i32 Int32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 10 42
d Date
k UInt64
i32 Int32
dt DateTime
d Date
k UInt64
i32 Int32
dt DateTime
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = MergeTree(d, k, 8192)
2015-01-01 9 41 1992-01-01 08:00:00
2015-01-01 10 42 0000-00-00 00:00:00
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14']
2015-01-01 9 41 1992-01-01 08:00:00 [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0
2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0
2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
@ -78,58 +78,58 @@ CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8`
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 []
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0
2015-01-01 9 41 1992-01-01 08:00:00 [] 0
2015-01-01 10 42 0000-00-00 00:00:00 [] 0
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0
2015-01-01 9 41 1992-01-01 08:00:00 0
2015-01-01 10 42 0000-00-00 00:00:00 0
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500 [] []
2015-01-01 7 39 2014-07-14 13:26:50 0 [] []
2015-01-01 8 40 2012-12-12 12:12:12 0 [] []
2015-01-01 9 41 1992-01-01 08:00:00 0 [] []
2015-01-01 10 42 0000-00-00 00:00:00 0 [] []
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0
2015-01-01 9 41 1992-01-01 08:00:00 0
2015-01-01 10 42 0000-00-00 00:00:00 0
d Date
k UInt64
i32 Int32
dt Date
s DateTime
d Date
k UInt64
i32 Int32
dt Date
s DateTime
CREATE TABLE test.alter ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = MergeTree(d, k, 8192)
2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00
2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00

View File

@ -1,122 +1,122 @@
d Date
k UInt64
i32 Int32
d Date
k UInt64
i32 Int32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
d Date
k UInt64
i32 Int32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 10 42
d Date
k UInt64
i32 Int32
dt DateTime
d Date
k UInt64
i32 Int32
dt DateTime
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
d Date
k UInt64
i32 Int32
dt DateTime
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 9 41 1992-01-01 08:00:00
2015-01-01 10 42 0000-00-00 00:00:00
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14']
2015-01-01 9 41 1992-01-01 08:00:00 [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
n.d Array(Date)
s String
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), s String) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03']
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] []
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s Int64
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s Int64) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0
2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0
2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
n.ui8 Array(UInt8)
n.s Array(String)
s UInt32
n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), s UInt32, `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['0000-00-00','0000-00-00','0000-00-00']
@ -128,91 +128,91 @@ CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTim
2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['0000-00-00','0000-00-00','0000-00-00']
2015-01-01 9 41 1992-01-01 08:00:00 [] [] 0 []
2015-01-01 10 42 0000-00-00 00:00:00 [] [] 0 []
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
n.s Array(String)
s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, `n.s` Array(String), s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500
2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0
2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0
2015-01-01 9 41 1992-01-01 08:00:00 [] 0
2015-01-01 10 42 0000-00-00 00:00:00 [] 0
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0
2015-01-01 9 41 1992-01-01 08:00:00 0
2015-01-01 10 42 0000-00-00 00:00:00 0
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
n.s Array(String)
n.d Array(Date)
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32, `n.s` Array(String), `n.d` Array(Date)) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500 [] []
2015-01-01 7 39 2014-07-14 13:26:50 0 [] []
2015-01-01 8 40 2012-12-12 12:12:12 0 [] []
2015-01-01 9 41 1992-01-01 08:00:00 0 [] []
2015-01-01 10 42 0000-00-00 00:00:00 0 [] []
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
d Date
k UInt64
i32 Int32
dt DateTime
s UInt32
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt DateTime, s UInt32) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 13:26:50 100500
2015-01-01 7 39 2014-07-14 13:26:50 0
2015-01-01 8 40 2012-12-12 12:12:12 0
2015-01-01 9 41 1992-01-01 08:00:00 0
2015-01-01 10 42 0000-00-00 00:00:00 0
d Date
k UInt64
i32 Int32
dt Date
s DateTime
d Date
k UInt64
i32 Int32
dt Date
s DateTime
CREATE TABLE test.replicated_alter1 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r1\', d, k, 8192)
d Date
k UInt64
i32 Int32
dt Date
s DateTime
d Date
k UInt64
i32 Int32
dt Date
s DateTime
CREATE TABLE test.replicated_alter2 ( d Date, k UInt64, i32 Int32, dt Date, s DateTime) ENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test/alter\', \'r2\', d, k, 8192)
2015-01-01 6 38 2014-07-15 1970-01-02 06:55:00
2015-01-01 7 39 2014-07-14 0000-00-00 00:00:00

View File

@ -0,0 +1,41 @@
col1 UInt8 DEFAULT 0
col1 UInt32
col2 UInt64 DEFAULT col1 + 1
col3 UInt64 MATERIALIZED col1 + 2
col4 UInt64 ALIAS col1 + 3
10 11
12 13
payload String
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
hello clickhouse
payload String
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
payload_length UInt64 MATERIALIZED length(payload)
hello clickhouse 16
hello clickhouse 16
some string 11
hello clickhouse 16
some string 11
payload String
payload_length UInt64 DEFAULT length(payload)
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
hello clickhouse 16
some string 11
payload String
payload_length UInt16 DEFAULT length(payload) % 65535
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
hello clickhouse 16
some string 11
payload String
payload_length UInt16 DEFAULT toUInt16(length(payload))
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
payload String
date Date MATERIALIZED today()
key UInt64 MATERIALIZED 0 * rand()
hello clickhouse
some string

View File

@ -0,0 +1,36 @@
drop table if exists defaulted_test;
create table defaulted_test (col1 default 0) engine=Memory;
desc table defaulted_test;
drop table defaulted_test;
create table defaulted_test (col1 UInt32, col2 default col1 + 1, col3 materialized col1 + 2, col4 alias col1 + 3) engine=Memory;
desc table defaulted_test;
insert into defaulted_test (col1) values (10);
select * from defaulted_test;
select col3, col4 from defaulted_test;
drop table defaulted_test;
create table defaulted_test (payload String, date materialized today(), key materialized 0 * rand()) engine=MergeTree(date, key, 8192);
desc table defaulted_test;
insert into defaulted_test (payload) values ('hello clickhouse');
select * from defaulted_test;
alter table defaulted_test add column payload_length materialized length(payload);
desc table defaulted_test;
select *, payload_length from defaulted_test;
insert into defaulted_test (payload) values ('some string');
select *, payload_length from defaulted_test order by payload;
optimize table defaulted_test;
select *, payload_length from defaulted_test order by payload;
alter table defaulted_test modify column payload_length default length(payload);
desc table defaulted_test;
select * from defaulted_test order by payload;
alter table defaulted_test modify column payload_length default length(payload) % 65535;
desc table defaulted_test;
select * from defaulted_test order by payload;
alter table defaulted_test modify column payload_length UInt16 default length(payload);
desc table defaulted_test;
alter table defaulted_test drop column payload_length;
desc table defaulted_test;
select * from defaulted_test order by payload;
drop table defaulted_test;