This commit is contained in:
Roman Peshkurov 2015-07-29 13:00:21 +03:00
commit d4b880dbaf
42 changed files with 804 additions and 126 deletions

View File

@ -14,10 +14,10 @@ class FormatFactory
{
public:
BlockInputStreamPtr getInput(const String & name, ReadBuffer & buf,
Block & sample, size_t max_block_size) const;
const Block & sample, size_t max_block_size) const;
BlockOutputStreamPtr getOutput(const String & name, WriteBuffer & buf,
Block & sample) const;
const Block & sample) const;
};
}

View File

@ -84,6 +84,16 @@ public:
if (!is_cancelled.compare_exchange_strong(old_val, true, std::memory_order_seq_cst, std::memory_order_relaxed))
return;
{
std::lock_guard<std::mutex> lock(external_tables_mutex);
/// Останавливаем отправку внешних данных.
for (auto & vec : external_tables_data)
for (auto & elem : vec)
if (IProfilingBlockInputStream * stream = dynamic_cast<IProfilingBlockInputStream *>(elem.first.get()))
stream->cancel();
}
if (!isQueryPending() || hasThrownException())
return;
@ -107,27 +117,30 @@ protected:
{
size_t count = parallel_replicas->size();
std::vector<ExternalTablesData> instances;
instances.reserve(count);
for (size_t i = 0; i < count; ++i)
{
ExternalTablesData res;
for (const auto & table : external_tables)
std::lock_guard<std::mutex> lock(external_tables_mutex);
external_tables_data.reserve(count);
for (size_t i = 0; i < count; ++i)
{
StoragePtr cur = table.second;
QueryProcessingStage::Enum stage = QueryProcessingStage::Complete;
DB::BlockInputStreams input = cur->read(cur->getColumnNamesList(), ASTPtr(), context, settings,
stage, DEFAULT_BLOCK_SIZE, 1);
if (input.size() == 0)
res.push_back(std::make_pair(new OneBlockInputStream(cur->getSampleBlock()), table.first));
else
res.push_back(std::make_pair(input[0], table.first));
ExternalTablesData res;
for (const auto & table : external_tables)
{
StoragePtr cur = table.second;
QueryProcessingStage::Enum stage = QueryProcessingStage::Complete;
DB::BlockInputStreams input = cur->read(cur->getColumnNamesList(), ASTPtr(), context, settings,
stage, DEFAULT_BLOCK_SIZE, 1);
if (input.size() == 0)
res.push_back(std::make_pair(new OneBlockInputStream(cur->getSampleBlock()), table.first));
else
res.push_back(std::make_pair(input[0], table.first));
}
external_tables_data.push_back(std::move(res));
}
instances.push_back(std::move(res));
}
parallel_replicas->sendExternalTablesData(instances);
parallel_replicas->sendExternalTablesData(external_tables_data);
}
Block readImpl() override
@ -302,6 +315,10 @@ private:
QueryProcessingStage::Enum stage;
Context context;
/// Потоки для чтения из временных таблиц - для последующей отправки данных на удалённые серверы для GLOBAL-подзапросов.
std::vector<ExternalTablesData> external_tables_data;
std::mutex external_tables_mutex;
/// Установили соединения с репликами, но ещё не отправили запрос.
std::atomic<bool> established { false };

View File

@ -43,6 +43,8 @@ namespace DB
* Например: arrayEnumerateUniq([10, 20, 10, 30]) = [1, 1, 2, 1]
* arrayEnumerateUniq(arr1, arr2...)
* - для кортежей из элементов на соответствующих позициях в нескольких массивах.
*
* emptyArrayToSingle(arr) - заменить пустые массивы на массивы из одного элемента со значением "по-умолчанию".
*/
@ -1695,13 +1697,263 @@ private:
};
class FunctionEmptyArrayToSingle : public IFunction
{
public:
static constexpr auto name = "emptyArrayToSingle";
static IFunction * create(const Context & context) { return new FunctionEmptyArrayToSingle; }
/// Получить имя функции.
String getName() const
{
return name;
}
/// Получить типы результата по типам аргументов. Если функция неприменима для данных аргументов - кинуть исключение.
DataTypePtr getReturnType(const DataTypes & arguments) const
{
if (arguments.size() != 1)
throw Exception("Number of arguments for function " + getName() + " doesn't match: passed "
+ toString(arguments.size()) + ", should be 1.",
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
const DataTypeArray * array_type = typeid_cast<const DataTypeArray *>(arguments[0].get());
if (!array_type)
throw Exception("Argument for function " + getName() + " must be array.",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
return arguments[0]->clone();
}
/// Выполнить функцию над блоком.
void execute(Block & block, const ColumnNumbers & arguments, size_t result)
{
if (executeConst(block, arguments, result))
return;
const ColumnArray * array = typeid_cast<const ColumnArray *>(block.getByPosition(arguments[0]).column.get());
if (!array)
throw Exception("Illegal column " + block.getByPosition(arguments[0]).column->getName() + " of first argument of function " + getName(),
ErrorCodes::ILLEGAL_COLUMN);
ColumnPtr res_ptr = array->cloneEmpty();
block.getByPosition(result).column = res_ptr;
ColumnArray & res = static_cast<ColumnArray &>(*res_ptr);
const IColumn & src_data = array->getData();
const ColumnArray::Offsets_t & src_offsets = array->getOffsets();
IColumn & res_data = res.getData();
ColumnArray::Offsets_t & res_offsets = res.getOffsets();
if (!( executeNumber<UInt8> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<UInt16> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<UInt32> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<UInt64> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Int8> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Int16> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Int32> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Int64> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Float32> (src_data, src_offsets, res_data, res_offsets)
|| executeNumber<Float64> (src_data, src_offsets, res_data, res_offsets)
|| executeString (src_data, src_offsets, res_data, res_offsets)
|| executeFixedString (src_data, src_offsets, res_data, res_offsets)))
throw Exception("Illegal column " + block.getByPosition(arguments[0]).column->getName()
+ " of first argument of function " + getName(),
ErrorCodes::ILLEGAL_COLUMN);
}
private:
bool executeConst(Block & block, const ColumnNumbers & arguments, size_t result)
{
if (const ColumnConstArray * const_array = typeid_cast<const ColumnConstArray *>(block.getByPosition(arguments[0]).column.get()))
{
if (const_array->getData().empty())
{
auto nested_type = typeid_cast<const DataTypeArray &>(*block.getByPosition(arguments[0]).type).getNestedType();
block.getByPosition(result).column = new ColumnConstArray(
block.rowsInFirstColumn(),
{nested_type->getDefault()},
nested_type->clone());
}
else
block.getByPosition(result).column = block.getByPosition(arguments[0]).column;
return true;
}
else
return false;
}
template <typename T>
bool executeNumber(
const IColumn & src_data, const ColumnArray::Offsets_t & src_offsets,
IColumn & res_data_col, ColumnArray::Offsets_t & res_offsets)
{
if (const ColumnVector<T> * src_data_concrete = typeid_cast<const ColumnVector<T> *>(&src_data))
{
const PODArray<T> & src_data = src_data_concrete->getData();
PODArray<T> & res_data = typeid_cast<ColumnVector<T> &>(res_data_col).getData();
size_t size = src_offsets.size();
res_offsets.resize(size);
res_data.reserve(src_data.size());
ColumnArray::Offset_t src_prev_offset = 0;
ColumnArray::Offset_t res_prev_offset = 0;
for (size_t i = 0; i < size; ++i)
{
if (src_offsets[i] != src_prev_offset)
{
size_t size_to_write = src_offsets[i] - src_prev_offset;
size_t prev_res_data_size = res_data.size();
res_data.resize(prev_res_data_size + size_to_write);
memcpy(&res_data[prev_res_data_size], &src_data[src_prev_offset], size_to_write * sizeof(T));
res_prev_offset += size_to_write;
res_offsets[i] = res_prev_offset;
}
else
{
res_data.push_back(T());
++res_prev_offset;
res_offsets[i] = res_prev_offset;
}
src_prev_offset = src_offsets[i];
}
return true;
}
else
return false;
}
bool executeFixedString(
const IColumn & src_data, const ColumnArray::Offsets_t & src_offsets,
IColumn & res_data_col, ColumnArray::Offsets_t & res_offsets)
{
if (const ColumnFixedString * src_data_concrete = typeid_cast<const ColumnFixedString *>(&src_data))
{
const size_t n = src_data_concrete->getN();
const ColumnFixedString::Chars_t & src_data = src_data_concrete->getChars();
ColumnFixedString::Chars_t & res_data = typeid_cast<ColumnFixedString &>(res_data_col).getChars();
size_t size = src_offsets.size();
res_offsets.resize(size);
res_data.reserve(src_data.size());
ColumnArray::Offset_t src_prev_offset = 0;
ColumnArray::Offset_t res_prev_offset = 0;
for (size_t i = 0; i < size; ++i)
{
if (src_offsets[i] != src_prev_offset)
{
size_t size_to_write = src_offsets[i] - src_prev_offset;
size_t prev_res_data_size = res_data.size();
res_data.resize(prev_res_data_size + size_to_write * n);
memcpy(&res_data[prev_res_data_size], &src_data[src_prev_offset], size_to_write * n);
res_prev_offset += size_to_write;
res_offsets[i] = res_prev_offset;
}
else
{
size_t prev_res_data_size = res_data.size();
res_data.resize(prev_res_data_size + n);
memset(&res_data[prev_res_data_size], 0, n);
++res_prev_offset;
res_offsets[i] = res_prev_offset;
}
src_prev_offset = src_offsets[i];
}
return true;
}
else
return false;
}
bool executeString(
const IColumn & src_data, const ColumnArray::Offsets_t & src_array_offsets,
IColumn & res_data_col, ColumnArray::Offsets_t & res_array_offsets)
{
if (const ColumnString * src_data_concrete = typeid_cast<const ColumnString *>(&src_data))
{
const ColumnString::Offsets_t & src_string_offsets = src_data_concrete->getOffsets();
ColumnString::Offsets_t & res_string_offsets = typeid_cast<ColumnString &>(res_data_col).getOffsets();
const ColumnString::Chars_t & src_data = src_data_concrete->getChars();
ColumnString::Chars_t & res_data = typeid_cast<ColumnString &>(res_data_col).getChars();
size_t size = src_array_offsets.size();
res_array_offsets.resize(size);
res_string_offsets.reserve(src_string_offsets.size());
res_data.reserve(src_data.size());
ColumnArray::Offset_t src_array_prev_offset = 0;
ColumnArray::Offset_t res_array_prev_offset = 0;
ColumnString::Offset_t src_string_prev_offset = 0;
ColumnString::Offset_t res_string_prev_offset = 0;
for (size_t i = 0; i < size; ++i)
{
if (src_array_offsets[i] != src_array_prev_offset)
{
size_t array_size = src_array_offsets[i] - src_array_prev_offset;
size_t bytes_to_copy = 0;
size_t from_string_prev_offset_local = src_string_prev_offset;
for (size_t j = 0; j < array_size; ++j)
{
size_t string_size = src_string_offsets[src_array_prev_offset + j] - from_string_prev_offset_local;
res_string_prev_offset += string_size;
res_string_offsets.push_back(res_string_prev_offset);
from_string_prev_offset_local += string_size;
bytes_to_copy += string_size;
}
size_t res_data_old_size = res_data.size();
res_data.resize(res_data_old_size + bytes_to_copy);
memcpy(&res_data[res_data_old_size], &src_data[src_string_prev_offset], bytes_to_copy);
res_array_prev_offset += array_size;
res_array_offsets[i] = res_array_prev_offset;
}
else
{
res_data.push_back(0); /// Пустая строка, включая ноль на конце.
++res_string_prev_offset;
res_string_offsets.push_back(res_string_prev_offset);
++res_array_prev_offset;
res_array_offsets[i] = res_array_prev_offset;
}
src_array_prev_offset = src_array_offsets[i];
if (src_array_prev_offset)
src_string_prev_offset = src_string_offsets[src_array_prev_offset - 1];
}
return true;
}
else
return false;
}
};
struct NameHas { static constexpr auto name = "has"; };
struct NameIndexOf { static constexpr auto name = "indexOf"; };
struct NameCountEqual { static constexpr auto name = "countEqual"; };
typedef FunctionArrayIndex<IndexToOne, NameHas> FunctionHas;
typedef FunctionArrayIndex<IndexToOne, NameHas> FunctionHas;
typedef FunctionArrayIndex<IndexIdentity, NameIndexOf> FunctionIndexOf;
typedef FunctionArrayIndex<IndexCount, NameCountEqual> FunctionCountEqual;
typedef FunctionArrayIndex<IndexCount, NameCountEqual> FunctionCountEqual;
using FunctionEmptyArrayUInt8 = FunctionEmptyArray<DataTypeUInt8>;
using FunctionEmptyArrayUInt16 = FunctionEmptyArray<DataTypeUInt16>;

View File

@ -404,31 +404,36 @@ struct ExtractURLParameterImpl
{
size_t cur_offset = offsets[i];
const char * str = reinterpret_cast<const char *>(&data[prev_offset]);
const char * pos = nullptr;
do
const char * begin = strchr(str, '?');
if (begin != nullptr)
{
const char * str = reinterpret_cast<const char *>(&data[prev_offset]);
const char * begin = strchr(str, '?');
if (begin == nullptr)
break;
pos = strstr(begin + 1, param_str);
if (pos == nullptr)
break;
if (pos != begin + 1 && *(pos - 1) != ';' && *(pos - 1) != '&')
pos = begin + 1;
while (true)
{
pos = nullptr;
break;
}
pos = strstr(pos, param_str);
pos += param_len;
} while (false);
if (pos == nullptr)
break;
if (pos[-1] != '?' && pos[-1] != '&')
{
pos += param_len;
continue;
}
else
{
pos += param_len;
break;
}
}
}
if (pos != nullptr)
{
const char * end = strpbrk(pos, "&;#");
const char * end = strpbrk(pos, "&#");
if (end == nullptr)
end = pos + strlen(pos);

View File

@ -66,6 +66,7 @@ public:
/// Для ARRAY_JOIN
NameSet array_joined_columns;
bool array_join_is_left;
/// Для JOIN
const Join * join = nullptr;
@ -122,13 +123,14 @@ public:
return a;
}
static ExpressionAction arrayJoin(const NameSet & array_joined_columns)
static ExpressionAction arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left)
{
if (array_joined_columns.empty())
throw Exception("No arrays to join", ErrorCodes::LOGICAL_ERROR);
ExpressionAction a;
a.type = ARRAY_JOIN;
a.array_joined_columns = array_joined_columns;
a.array_join_is_left = array_join_is_left;
return a;
}

View File

@ -226,6 +226,10 @@ private:
/// Превратить перечисление значений или подзапрос в ASTSet. node - функция in или notIn.
void makeSet(ASTFunction * node, const Block & sample_block);
/// Замена скалярных подзапросов на значения-константы.
void executeScalarSubqueries();
void executeScalarSubqueriesImpl(ASTPtr & ast);
/// Находит глобальные подзапросы в секциях GLOBAL IN/JOIN. Заполняет external_tables.
void initGlobalSubqueriesAndExternalTables();
void initGlobalSubqueries(ASTPtr & ast);

View File

@ -24,8 +24,7 @@ public:
*/
BlockIO execute() override
{
executeImpl(false);
return {};
return executeImpl(false);
}
/** assume_metadata_exists - не проверять наличие файла с метаданными и не создавать его
@ -45,7 +44,7 @@ public:
const ColumnDefaults & column_defaults);
private:
void executeImpl(bool assume_metadata_exists);
BlockIO executeImpl(bool assume_metadata_exists);
/// AST в список столбцов с типами. Столбцы типа Nested развернуты в список настоящих столбцов.
using ColumnsAndDefaults = std::pair<NamesAndTypesList, ColumnDefaults>;

View File

@ -50,6 +50,7 @@ public:
ASTPtr select_expression_list;
ASTPtr database;
ASTPtr table; /// Идентификатор, табличная функция или подзапрос (рекурсивно ASTSelectQuery)
bool array_join_is_left = false; /// LEFT ARRAY JOIN
ASTPtr array_join_expression_list; /// ARRAY JOIN
ASTPtr join; /// Обычный (не ARRAY) JOIN.
bool final = false;

View File

@ -2,7 +2,7 @@
#include <DB/DataTypes/IDataType.h>
#include <DB/Parsers/IAST.h>
#include <DB/Parsers/ASTWithAlias.h>
namespace DB
@ -11,12 +11,12 @@ namespace DB
/** Подзарос SELECT
*/
class ASTSubquery : public IAST
class ASTSubquery : public ASTWithAlias
{
public:
ASTSubquery() = default;
ASTSubquery(const StringRange range_) : IAST(range_) {}
ASTSubquery(const StringRange range_) : ASTWithAlias(range_) {}
/** Получить текст, который идентифицирует этот элемент. */
String getID() const override { return "Subquery"; }

View File

@ -779,6 +779,13 @@ public:
return it == std::end(column_sizes) ? 0 : it->second;
}
using ColumnSizes = std::unordered_map<std::string, size_t>;
ColumnSizes getColumnSizes() const
{
Poco::ScopedLock<Poco::FastMutex> lock{data_parts_mutex};
return column_sizes;
}
/// Для ATTACH/DETACH/DROP PARTITION.
static String getMonthName(const Field & partition);
static DayNum_t getMonthDayNum(const Field & partition);
@ -810,7 +817,7 @@ private:
NamesAndTypesListPtr columns;
/// Актуальные размеры столбцов в сжатом виде
std::unordered_map<std::string, size_t> column_sizes;
ColumnSizes column_sizes;
BrokenPartCallback broken_part_callback;

View File

@ -247,7 +247,7 @@ private:
++right;
}
/// Если правее засечек нет, просто используем DEFAULT_BUFFER_SIZE
/// Если правее засечек нет, просто используем max_read_buffer_size
if (right >= (*marks).size() || (right + 1 == (*marks).size() &&
(*marks)[right].offset_in_compressed_file == (*marks)[all_mark_ranges[i].end].offset_in_compressed_file))
{

View File

@ -652,7 +652,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get(const String & name, const Da
AggregateFunctionPtr nested = get(String(name.data(), name.size() - strlen("State")), argument_types, recursion_level + 1);
return new AggregateFunctionState(nested);
}
else if (recursion_level == 0 && name.size() > strlen("Merge") && !(strcmp(name.data() + name.size() - strlen("Merge"), "Merge")))
else if (recursion_level <= 1 && name.size() > strlen("Merge") && !(strcmp(name.data() + name.size() - strlen("Merge"), "Merge")))
{
/// Для агрегатных функций вида aggMerge, где agg - имя другой агрегатной функции.
if (argument_types.size() != 1)
@ -668,7 +668,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get(const String & name, const Da
return new AggregateFunctionMerge(nested);
}
else if (recursion_level <= 1 && name.size() >= 3 && name[name.size() - 2] == 'I' && name[name.size() - 1] == 'f')
else if (recursion_level <= 2 && name.size() >= 3 && name[name.size() - 2] == 'I' && name[name.size() - 1] == 'f')
{
if (argument_types.empty())
throw Exception{
@ -682,7 +682,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get(const String & name, const Da
AggregateFunctionPtr nested = get(String(name.data(), name.size() - 2), nested_dt, recursion_level + 1);
return new AggregateFunctionIf(nested);
}
else if (recursion_level <= 2 && name.size() > strlen("Array") && !(strcmp(name.data() + name.size() - strlen("Array"), "Array")))
else if (recursion_level <= 3 && name.size() > strlen("Array") && !(strcmp(name.data() + name.size() - strlen("Array"), "Array")))
{
/// Для агрегатных функций вида aggArray, где agg - имя другой агрегатной функции.
size_t num_agruments = argument_types.size();
@ -695,7 +695,7 @@ AggregateFunctionPtr AggregateFunctionFactory::get(const String & name, const Da
else
throw Exception("Illegal type " + argument_types[i]->getName() + " of argument #" + toString(i + 1) + " for aggregate function " + name + ". Must be array.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}
AggregateFunctionPtr nested = get(String(name.data(), name.size() - strlen("Array")), nested_arguments, recursion_level + 2); /// + 2, чтобы ни один другой модификатор не мог идти перед Array
AggregateFunctionPtr nested = get(String(name.data(), name.size() - strlen("Array")), nested_arguments, recursion_level + 3); /// + 3, чтобы ни один другой модификатор не мог идти перед Array
return new AggregateFunctionArray(nested);
}
else
@ -765,14 +765,14 @@ bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int
if (recursion_level <= 0 && name.size() > strlen("State") && !(strcmp(name.data() + name.size() - strlen("State"), "State")))
return isAggregateFunctionName(String(name.data(), name.size() - strlen("State")), recursion_level + 1);
/// Для агрегатных функций вида aggMerge, где agg - имя другой агрегатной функции.
if (recursion_level <= 0 && name.size() > strlen("Merge") && !(strcmp(name.data() + name.size() - strlen("Merge"), "Merge")))
if (recursion_level <= 1 && name.size() > strlen("Merge") && !(strcmp(name.data() + name.size() - strlen("Merge"), "Merge")))
return isAggregateFunctionName(String(name.data(), name.size() - strlen("Merge")), recursion_level + 1);
/// Для агрегатных функций вида aggIf, где agg - имя другой агрегатной функции.
if (recursion_level <= 1 && name.size() >= 3 && name[name.size() - 2] == 'I' && name[name.size() - 1] == 'f')
if (recursion_level <= 2 && name.size() >= 3 && name[name.size() - 2] == 'I' && name[name.size() - 1] == 'f')
return isAggregateFunctionName(String(name.data(), name.size() - 2), recursion_level + 1);
/// Для агрегатных функций вида aggArray, где agg - имя другой агрегатной функции.
if (recursion_level <= 2 && name.size() > strlen("Array") && !(strcmp(name.data() + name.size() - strlen("Array"), "Array")))
return isAggregateFunctionName(String(name.data(), name.size() - strlen("Array")), recursion_level + 2); /// + 2, чтобы ни один другой модификатор не мог идти перед Array
if (recursion_level <= 3 && name.size() > strlen("Array") && !(strcmp(name.data() + name.size() - strlen("Array"), "Array")))
return isAggregateFunctionName(String(name.data(), name.size() - strlen("Array")), recursion_level + 3); /// + 3, чтобы ни один другой модификатор не мог идти перед Array
return false;
}

View File

@ -369,6 +369,19 @@ private:
}
/** Проверка для случая, когда в терминал вставляется многострочный запрос из буфера обмена.
* Позволяет не начинать выполнение одной строчки запроса, пока весь запрос не будет вставлен.
*/
static bool hasDataInSTDIN()
{
timeval timeout = { 0, 0 };
fd_set fds;
FD_ZERO(&fds);
FD_SET(STDIN_FILENO, &fds);
return select(1, &fds, 0, 0, &timeout) == 1;
}
void loop()
{
String query;
@ -395,7 +408,7 @@ private:
query += line;
if (!ends_with_backslash && (ends_with_semicolon || has_vertical_output_suffix || !config().has("multiline")))
if (!ends_with_backslash && (ends_with_semicolon || has_vertical_output_suffix || (!config().has("multiline") && !hasDataInSTDIN())))
{
if (query != prev_query)
{
@ -464,6 +477,12 @@ private:
copyData(in, out);
}
process(line);
}
bool process(const String & line)
{
if (config().has("multiquery"))
{
/// Несколько запросов, разделенных ';'.
@ -494,17 +513,20 @@ private:
while (isWhitespace(*begin) || *begin == ';')
++begin;
process(query, ast);
if (!processSingleQuery(query, ast))
return false;
}
return true;
}
else
{
process(line);
return processSingleQuery(line);
}
}
bool process(const String & line, ASTPtr parsed_query_ = nullptr)
bool processSingleQuery(const String & line, ASTPtr parsed_query_ = nullptr)
{
if (exit_strings.end() != exit_strings.find(line))
return false;
@ -838,15 +860,8 @@ private:
}
void onData(Block & block)
void initBlockOutputStream(const Block & block)
{
if (written_progress_chars)
clearProgress();
if (!block)
return;
processed_rows += block.rows();
if (!block_std_out)
{
String current_format = format;
@ -869,8 +884,21 @@ private:
block_std_out = context.getFormatFactory().getOutput(current_format, std_out, block);
block_std_out->writePrefix();
}
}
/// Загаловочный блок с нулем строк использовался для инициализации block_std_out,
void onData(Block & block)
{
if (written_progress_chars)
clearProgress();
if (!block)
return;
processed_rows += block.rows();
initBlockOutputStream(block);
/// Заголовочный блок с нулем строк использовался для инициализации block_std_out,
/// выводить его не нужно
if (block.rows() != 0)
{
@ -885,11 +913,13 @@ private:
void onTotals(Block & block)
{
initBlockOutputStream(block);
block_std_out->setTotals(block);
}
void onExtremes(Block & block)
{
initBlockOutputStream(block);
block_std_out->setExtremes(block);
}

View File

@ -302,7 +302,13 @@ std::string Block::dumpStructure() const
{
if (it != data.begin())
res << ", ";
res << it->name << ' ' << it->type->getName() << ' ' << it->column->getName() << ' ' << it->column->size();
res << it->name << ' ' << it->type->getName();
if (it->column)
res << ' ' << it->column->getName() << ' ' << it->column->size();
else
res << "nullptr";
}
return res.str();
}

View File

@ -26,7 +26,7 @@ namespace DB
{
BlockInputStreamPtr FormatFactory::getInput(const String & name, ReadBuffer & buf,
Block & sample, size_t max_block_size) const
const Block & sample, size_t max_block_size) const
{
if (name == "Native")
return new NativeBlockInputStream(buf);
@ -48,7 +48,7 @@ BlockInputStreamPtr FormatFactory::getInput(const String & name, ReadBuffer & bu
BlockOutputStreamPtr FormatFactory::getOutput(const String & name, WriteBuffer & buf,
Block & sample) const
const Block & sample) const
{
if (name == "Native")
return new NativeBlockOutputStream(buf);

View File

@ -27,6 +27,7 @@ void registerFunctionsArray(FunctionFactory & factory)
factory.registerFunction<FunctionEmptyArrayDate>();
factory.registerFunction<FunctionEmptyArrayDateTime>();
factory.registerFunction<FunctionEmptyArrayString>();
factory.registerFunction<FunctionEmptyArrayToSingle>();
factory.registerFunction<FunctionRange>();
}

View File

@ -5,6 +5,7 @@
#include <Poco/SharedPtr.h>
#include <Poco/Mutex.h>
#include <Poco/File.h>
#include <Poco/UUIDGenerator.h>
#include <Yandex/logger_useful.h>
@ -96,6 +97,8 @@ struct ContextShared
/// Создаются при создании Distributed таблиц, так как нужно дождаться пока будут выставлены Settings
Poco::SharedPtr<Clusters> clusters;
Poco::UUIDGenerator uuid_generator;
bool shutdown_called = false;
@ -587,8 +590,12 @@ void Context::setCurrentDatabase(const String & name)
void Context::setCurrentQueryId(const String & query_id)
{
String query_id_to_set = query_id;
if (query_id_to_set.empty()) /// Если пользователь не передал свой query_id, то генерируем его самостоятельно.
query_id_to_set = shared->uuid_generator.createRandom().toString();
Poco::ScopedLock<Poco::Mutex> lock(shared->mutex);
current_query_id = query_id;
current_query_id = query_id_to_set;
}

View File

@ -6,6 +6,7 @@
#include <DB/DataTypes/DataTypeNested.h>
#include <DB/DataTypes/DataTypeArray.h>
#include <DB/Functions/IFunction.h>
#include <DB/Functions/FunctionsArray.h>
#include <set>
@ -268,6 +269,24 @@ void ExpressionAction::execute(Block & block) const
if (!any_array)
throw Exception("ARRAY JOIN of not array: " + *array_joined_columns.begin(), ErrorCodes::TYPE_MISMATCH);
/// Если LEFT ARRAY JOIN, то создаём столбцы, в которых пустые массивы заменены на массивы с одним элементом - значением по-умолчанию.
std::map<String, ColumnPtr> non_empty_array_columns;
if (array_join_is_left)
{
for (const auto & name : array_joined_columns)
{
auto src_col = block.getByName(name);
Block tmp_block{src_col, {{}, src_col.type, {}}};
FunctionEmptyArrayToSingle().execute(tmp_block, {0}, 1);
non_empty_array_columns[name] = tmp_block.getByPosition(1).column;
}
any_array_ptr = non_empty_array_columns.begin()->second;
any_array = typeid_cast<const ColumnArray *>(&*any_array_ptr);
}
size_t columns = block.columns();
for (size_t i = 0; i < columns; ++i)
{
@ -278,7 +297,8 @@ void ExpressionAction::execute(Block & block) const
if (!typeid_cast<const DataTypeArray *>(&*current.type))
throw Exception("ARRAY JOIN of not array: " + current.name, ErrorCodes::TYPE_MISMATCH);
ColumnPtr array_ptr = current.column;
ColumnPtr array_ptr = array_join_is_left ? non_empty_array_columns[current.name] : current.column;
if (array_ptr->isConst())
array_ptr = dynamic_cast<const IColumnConst &>(*array_ptr).convertToFullColumn();
@ -379,7 +399,7 @@ std::string ExpressionAction::toString() const
break;
case ARRAY_JOIN:
ss << "ARRAY JOIN ";
ss << (array_join_is_left ? "LEFT " : "") << "ARRAY JOIN ";
for (NameSet::const_iterator it = array_joined_columns.begin(); it != array_joined_columns.end(); ++it)
{
if (it != array_joined_columns.begin())
@ -761,7 +781,7 @@ std::string ExpressionActions::getID() const
ss << actions[i].result_name;
if (actions[i].type == ExpressionAction::ARRAY_JOIN)
{
ss << "{";
ss << (actions[i].array_join_is_left ? "LEFT ARRAY JOIN" : "ARRAY JOIN") << "{";
for (NameSet::const_iterator it = actions[i].array_joined_columns.begin();
it != actions[i].array_joined_columns.end(); ++it)
{

View File

@ -87,6 +87,18 @@ const std::unordered_set<String> possibly_injective_function_names
"dictGetDateTime"
};
static bool functionIsInOperator(const String & name)
{
return name == "in" || name == "notIn";
}
static bool functionIsInOrGlobalInOperator(const String & name)
{
return name == "in" || name == "notIn" || name == "globalIn" || name == "globalNotIn";
}
void ExpressionAnalyzer::init()
{
select_query = typeid_cast<ASTSelectQuery *>(&*ast);
@ -95,6 +107,7 @@ void ExpressionAnalyzer::init()
LogicalExpressionsOptimizer logical_expressions_optimizer(select_query, settings);
logical_expressions_optimizer.optimizeDisjunctiveEqualityChains();
/// Добавляет в множество известных алиасов те, которые объявлены в структуре таблицы (ALIAS-столбцы).
addStorageAliases();
/// Создаёт словарь aliases: alias -> ASTPtr
@ -103,6 +116,9 @@ void ExpressionAnalyzer::init()
/// Common subexpression elimination. Rewrite rules.
normalizeTree();
/// Выполнение скалярных подзапросов - замена их на значения-константы.
executeScalarSubqueries();
/// GROUP BY injective function elimination.
optimizeGroupBy();
@ -388,7 +404,7 @@ void ExpressionAnalyzer::normalizeTreeImpl(
}
/// Может быть указано IN t, где t - таблица, что равносильно IN (SELECT * FROM t).
if (func_node->name == "in" || func_node->name == "notIn" || func_node->name == "globalIn" || func_node->name == "globalNotIn")
if (functionIsInOrGlobalInOperator(func_node->name))
if (ASTIdentifier * right = typeid_cast<ASTIdentifier *>(&*func_node->arguments->children.at(1)))
right->kind = ASTIdentifier::Table;
@ -528,6 +544,145 @@ void ExpressionAnalyzer::normalizeTreeImpl(
finished_asts[initial_ast] = ast;
}
void ExpressionAnalyzer::executeScalarSubqueries()
{
if (!select_query)
executeScalarSubqueriesImpl(ast);
else
{
for (auto & child : ast->children)
{
/// Не опускаемся в FROM и JOIN.
if (child.get() != select_query->table.get() && child.get() != select_query->join.get())
executeScalarSubqueriesImpl(child);
}
}
}
static ASTPtr addTypeConversion(ASTLiteral * ast_, const String & type_name)
{
if (0 == type_name.compare(0, strlen("Array"), "Array"))
return ast_; /// Преобразование типов для массивов пока не поддерживаем.
auto ast = std::unique_ptr<ASTLiteral>(ast_);
ASTFunction * func = new ASTFunction(ast->range);
ASTPtr res = func;
func->alias = ast->alias;
ast->alias.clear();
func->kind = ASTFunction::FUNCTION;
func->name = "to" + type_name;
ASTExpressionList * exp_list = new ASTExpressionList(ast->range);
func->arguments = exp_list;
func->children.push_back(func->arguments);
exp_list->children.push_back(ast.release());
return res;
}
void ExpressionAnalyzer::executeScalarSubqueriesImpl(ASTPtr & ast)
{
/** Заменяем подзапросы, возвращающие ровно одну строку
* ("скалярные" подзапросы) на соответствующие константы.
*
* Если подзапрос возвращает более одного столбца, то он заменяется на кортеж констант.
*
* Особенности:
*
* Замена происходит во время анализа запроса, а не во время основной стадии выполнения.
* Это значит, что не будет работать индикатор прогресса во время выполнения этих запросов,
* а также такие запросы нельзя будет прервать.
*
* Зато результат запросов может быть использован для индекса в таблице.
*
* Скалярные подзапросы выполняются на сервере-инициаторе запроса.
* На удалённые серверы запрос отправляется с уже подставленными константами.
*/
if (ASTSubquery * subquery = typeid_cast<ASTSubquery *>(ast.get()))
{
Context subquery_context = context;
Settings subquery_settings = context.getSettings();
subquery_settings.limits.max_result_rows = 1;
subquery_settings.extremes = 0;
subquery_context.setSettings(subquery_settings);
ASTPtr query = subquery->children.at(0);
BlockIO res = InterpreterSelectQuery(query, subquery_context, QueryProcessingStage::Complete, subquery_depth + 1).execute();
Block block;
try
{
block = res.in->read();
if (!block)
throw Exception("Scalar subquery returned empty result", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
if (block.rows() != 1 || res.in->read())
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
}
catch (const Exception & e)
{
if (e.code() == ErrorCodes::TOO_MUCH_ROWS)
throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY);
else
throw;
}
size_t columns = block.columns();
if (columns == 1)
{
ASTLiteral * lit = new ASTLiteral(ast->range, (*block.getByPosition(0).column)[0]);
lit->alias = subquery->alias;
ast = addTypeConversion(lit, block.getByPosition(0).type->getName());
}
else
{
ASTFunction * tuple = new ASTFunction(ast->range);
tuple->alias = subquery->alias;
ast = tuple;
tuple->kind = ASTFunction::FUNCTION;
tuple->name = "tuple";
ASTExpressionList * exp_list = new ASTExpressionList(ast->range);
tuple->arguments = exp_list;
tuple->children.push_back(tuple->arguments);
exp_list->children.resize(columns);
for (size_t i = 0; i < columns; ++i)
{
exp_list->children[i] = addTypeConversion(
new ASTLiteral(ast->range, (*block.getByPosition(i).column)[0]),
block.getByPosition(i).type->getName());
}
}
}
else
{
/** Не опускаемся в подзапросы в аргументах IN.
* Но если аргумент - не подзапрос, то глубже внутри него могут быть подзапросы, и в них надо опускаться.
*/
ASTFunction * func = typeid_cast<ASTFunction *>(ast.get());
if (func && func->kind == ASTFunction::FUNCTION
&& functionIsInOrGlobalInOperator(func->name))
{
for (auto & child : ast->children)
{
if (child.get() != func->arguments)
executeScalarSubqueriesImpl(child);
else
for (size_t i = 0, size = func->arguments->children.size(); i < size; ++i)
if (i != 1 || !typeid_cast<ASTSubquery *>(func->arguments->children[i].get()))
executeScalarSubqueriesImpl(func->arguments->children[i]);
}
}
else
for (auto & child : ast->children)
executeScalarSubqueriesImpl(child);
}
}
void ExpressionAnalyzer::optimizeGroupBy()
{
if (!(select_query && select_query->group_expression_list))
@ -657,7 +812,7 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(ASTPtr & node, const Block & sampl
makeSetsForIndexImpl(child, sample_block);
ASTFunction * func = typeid_cast<ASTFunction *>(node.get());
if (func && func->kind == ASTFunction::FUNCTION && (func->name == "in" || func->name == "notIn"))
if (func && func->kind == ASTFunction::FUNCTION && functionIsInOperator(func->name))
{
IAST & args = *func->arguments;
ASTPtr & arg = args.children.at(1);
@ -693,7 +848,8 @@ static SharedPtr<InterpreterSelectQuery> interpretSubquery(
* Так как результат этого поздапроса - ещё не результат всего запроса.
* Вместо этого работают ограничения
* max_rows_in_set, max_bytes_in_set, set_overflow_mode,
* max_rows_in_join, max_bytes_in_join, join_overflow_mode.
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
* которые проверяются отдельно (в объектах Set, Join).
*/
Context subquery_context = context;
Settings subquery_settings = context.getSettings();
@ -1213,7 +1369,7 @@ void ExpressionAnalyzer::getActionsImpl(ASTPtr ast, bool no_subqueries, bool onl
actions_stack.addAction(ExpressionAction::copyColumn(arg->getColumnName(), result_name));
NameSet joined_columns;
joined_columns.insert(result_name);
actions_stack.addAction(ExpressionAction::arrayJoin(joined_columns));
actions_stack.addAction(ExpressionAction::arrayJoin(joined_columns, false));
}
return;
@ -1221,7 +1377,7 @@ void ExpressionAnalyzer::getActionsImpl(ASTPtr ast, bool no_subqueries, bool onl
if (node->kind == ASTFunction::FUNCTION)
{
if (node->name == "in" || node->name == "notIn" || node->name == "globalIn" || node->name == "globalNotIn")
if (functionIsInOrGlobalInOperator(node->name))
{
if (!no_subqueries)
{
@ -1510,7 +1666,7 @@ void ExpressionAnalyzer::addMultipleArrayJoinAction(ExpressionActionsPtr & actio
result_columns.insert(result_source.first);
}
actions->add(ExpressionAction::arrayJoin(result_columns));
actions->add(ExpressionAction::arrayJoin(result_columns, select_query->array_join_is_left));
}
bool ExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, bool only_types)

View File

@ -7,7 +7,7 @@
#include <DB/IO/WriteHelpers.h>
#include <DB/DataStreams/MaterializingBlockInputStream.h>
#include <DB/DataStreams/copyData.h>
#include <DB/DataStreams/NullAndDoCopyBlockInputStream.h>
#include <DB/Parsers/ASTCreateQuery.h>
#include <DB/Parsers/ASTNameTypePair.h>
@ -42,7 +42,7 @@ InterpreterCreateQuery::InterpreterCreateQuery(ASTPtr query_ptr_, Context & cont
}
void InterpreterCreateQuery::executeImpl(bool assume_metadata_exists)
BlockIO InterpreterCreateQuery::executeImpl(bool assume_metadata_exists)
{
String path = context.getPath();
String current_database = context.getCurrentDatabase();
@ -81,7 +81,7 @@ void InterpreterCreateQuery::executeImpl(bool assume_metadata_exists)
if (!create.if_not_exists || !context.isDatabaseExist(database_name))
context.addDatabase(database_name);
return;
return {};
}
SharedPtr<InterpreterSelectQuery> interpreter_select;
@ -118,7 +118,7 @@ void InterpreterCreateQuery::executeImpl(bool assume_metadata_exists)
if (context.isTableExist(database_name, table_name))
{
if (create.if_not_exists)
return;
return {};
else
throw Exception("Table " + database_name + "." + table_name + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
}
@ -251,9 +251,16 @@ void InterpreterCreateQuery::executeImpl(bool assume_metadata_exists)
/// Если запрос CREATE SELECT, то вставим в таблицу данные
if (create.select && storage_name != "View" && (storage_name != "MaterializedView" || create.is_populate))
{
BlockInputStreamPtr from = new MaterializingBlockInputStream(interpreter_select->execute().in);
copyData(*from, *res->write(query_ptr));
BlockIO io;
io.in_sample = select_sample;
io.in = new NullAndDoCopyBlockInputStream(
new MaterializingBlockInputStream(interpreter_select->execute().in),
res->write(query_ptr));
return io;
}
return {};
}
InterpreterCreateQuery::ColumnsAndDefaults InterpreterCreateQuery::parseColumns(ASTPtr expression_list)

View File

@ -100,6 +100,7 @@ BlockIO InterpreterInsertQuery::execute()
InterpreterSelectQuery interpreter_select{query.select, context};
BlockInputStreamPtr in{interpreter_select.execute().in};
res.in = new NullAndDoCopyBlockInputStream{in, out};
res.in_sample = interpreter_select.getSampleBlock();
}
return res;

View File

@ -330,9 +330,6 @@ BlockIO InterpreterSelectQuery::execute()
/// Ограничения на результат, квота на результат, а также колбек для прогресса.
if (IProfilingBlockInputStream * stream = dynamic_cast<IProfilingBlockInputStream *>(&*streams[0]))
{
stream->setProgressCallback(context.getProgressCallback());
stream->setProcessListElement(context.getProcessListElement());
/// Ограничения действуют только на конечный результат.
if (to_stage == QueryProcessingStage::Complete)
{

View File

@ -166,6 +166,15 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
/// Держим элемент списка процессов до конца обработки запроса.
res.process_list_entry = process_list_entry;
if (res.in)
{
if (IProfilingBlockInputStream * stream = dynamic_cast<IProfilingBlockInputStream *>(res.in.get()))
{
stream->setProgressCallback(context.getProgressCallback());
stream->setProcessListElement(context.getProcessListElement());
}
}
quota.addQuery(current_time);
/// Всё, что связано с логом запросов.

View File

@ -556,12 +556,8 @@ bool ParserWithOptionalAlias::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos &
{
String alias_name = typeid_cast<ASTIdentifier &>(*alias_node).name;
if (ASTFunction * func = typeid_cast<ASTFunction *>(&*node))
func->alias = alias_name;
else if (ASTIdentifier * ident = typeid_cast<ASTIdentifier *>(&*node))
ident->alias = alias_name;
else if (ASTLiteral * lit = typeid_cast<ASTLiteral *>(&*node))
lit->alias = alias_name;
if (ASTWithAlias * ast_with_alias = dynamic_cast<ASTWithAlias *>(node.get()))
ast_with_alias->alias = alias_name;
else
{
expected = "alias cannot be here";

View File

@ -30,7 +30,7 @@ bool ParserJoin::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_parsed_p
ParserString s_using("USING", true, true);
ParserNotEmptyExpressionList exp_list;
ParserSubquery subquery;
ParserWithOptionalAlias subquery(ParserPtr(new ParserSubquery));
ParserIdentifier identifier;
ws.ignore(pos, end);
@ -91,10 +91,6 @@ bool ParserJoin::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_parsed_p
ws.ignore(pos, end);
/// Может быть указан алиас. На данный момент, он ничего не значит и не используется.
ParserAlias().ignore(pos, end);
ws.ignore(pos, end);
if (join->kind != ASTJoin::Cross)
{
if (!s_using.ignore(pos, end, max_parsed_pos, expected))

View File

@ -23,6 +23,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_p
ParserString s_select("SELECT", true, true);
ParserString s_distinct("DISTINCT", true, true);
ParserString s_from("FROM", true, true);
ParserString s_left("LEFT", true, true);
ParserString s_array("ARRAY", true, true);
ParserString s_join("JOIN", true, true);
ParserString s_using("USING", true, true);
@ -166,8 +167,22 @@ bool ParserSelectQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_p
if (!parse_final_and_sample())
return false;
/// ARRAY JOIN expr list
if (s_array.ignore(pos, end, max_parsed_pos, expected))
/// [LEFT] ARRAY JOIN expr list
Pos saved_pos = pos;
bool has_array_join = false;
if (s_left.ignore(pos, end, max_parsed_pos, expected) && ws.ignore(pos, end) && s_array.ignore(pos, end, max_parsed_pos, expected))
{
select_query->array_join_is_left = true;
has_array_join = true;
}
else
{
pos = saved_pos;
if (s_array.ignore(pos, end, max_parsed_pos, expected))
has_array_join = true;
}
if (has_array_join)
{
ws.ignore(pos, end);
@ -182,7 +197,7 @@ bool ParserSelectQuery::parseImpl(Pos & pos, Pos end, ASTPtr & node, Pos & max_p
ws.ignore(pos, end);
}
/// [GLOBAL] ANY|ALL INNER|LEFT JOIN (subquery) USING tuple
/// [GLOBAL] [ANY|ALL] INNER|LEFT|RIGHT|FULL|CROSS [OUTER] JOIN (subquery)|table_name USING tuple
join.parse(pos, end, select_query->join, max_parsed_pos, expected);
if (!parse_final_and_sample())

View File

@ -63,12 +63,24 @@ String backQuoteIfNeed(const String & x)
}
String hightlight(const String & keyword, const String & color_sequence, const bool hilite)
static String hightlight(const String & keyword, const String & color_sequence, const bool hilite)
{
return hilite ? color_sequence + keyword + hilite_none : keyword;
}
static void writeAlias(const String & name, std::ostream & s, bool hilite, bool one_line)
{
s << (hilite ? hilite_keyword : "") << " AS " << (hilite ? hilite_alias : "");
WriteBufferFromOStream wb(s, 32);
writeProbablyBackQuotedString(name, wb);
wb.next();
s << (hilite ? hilite_none : "");
}
void formatAST(const ASTExpressionList & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
for (ASTs::const_iterator it = ast.children.begin(); it != ast.children.end(); ++it)
@ -151,7 +163,9 @@ void formatAST(const ASTSelectQuery & ast, std::ostream & s, size_t indent, bo
if (ast.array_join_expression_list)
{
s << (hilite ? hilite_keyword : "") << nl_or_ws << indent_str << "ARRAY JOIN " << (hilite ? hilite_none : "");
s << (hilite ? hilite_keyword : "") << nl_or_ws << indent_str
<< (ast.array_join_is_left ? "LEFT " : "") << "ARRAY JOIN " << (hilite ? hilite_none : "");
one_line
? formatAST(*ast.array_join_expression_list, s, indent, hilite, one_line)
: formatExpressionListMultiline(typeid_cast<const ASTExpressionList &>(*ast.array_join_expression_list), s, indent, hilite);
@ -245,12 +259,23 @@ void formatAST(const ASTSelectQuery & ast, std::ostream & s, size_t indent, bo
void formatAST(const ASTSubquery & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
/// Если есть алиас, то требуются скобки вокруг всего выражения, включая алиас. Потому что запись вида 0 AS x + 0 синтаксически некорректна.
if (need_parens && !ast.alias.empty())
s << '(';
std::string indent_str = one_line ? "" : std::string(4 * indent, ' ');
std::string nl_or_nothing = one_line ? "" : "\n";
s << nl_or_nothing << indent_str << "(" << nl_or_nothing;
formatAST(*ast.children[0], s, indent + 1, hilite, one_line);
s << nl_or_nothing << indent_str << ")";
if (!ast.alias.empty())
{
writeAlias(ast.alias, s, hilite, one_line);
if (need_parens)
s << ')';
}
}
void formatAST(const ASTCreateQuery & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
@ -461,17 +486,6 @@ void formatAST(const ASTInsertQuery & ast, std::ostream & s, size_t indent, bo
}
}
static void writeAlias(const String & name, std::ostream & s, bool hilite, bool one_line)
{
s << (hilite ? hilite_keyword : "") << " AS " << (hilite ? hilite_alias : "");
WriteBufferFromOStream wb(s, 32);
writeProbablyBackQuotedString(name, wb);
wb.next();
s << (hilite ? hilite_none : "");
}
void formatAST(const ASTFunction & ast, std::ostream & s, size_t indent, bool hilite, bool one_line, bool need_parens)
{
/// Если есть алиас, то требуются скобки вокруг всего выражения, включая алиас. Потому что запись вида 0 AS x + 0 синтаксически некорректна.

View File

@ -1,6 +1,10 @@
#include <DB/Storages/StorageSystemColumns.h>
#include <DB/Storages/MergeTree/MergeTreeData.h>
#include <DB/Storages/StorageMergeTree.h>
#include <DB/Storages/StorageReplicatedMergeTree.h>
#include <DB/Columns/ColumnString.h>
#include <DB/DataTypes/DataTypeString.h>
#include <DB/DataTypes/DataTypesNumberFixed.h>
#include <DB/DataStreams/OneBlockInputStream.h>
#include <DB/Common/VirtualColumnUtils.h>
@ -15,7 +19,8 @@ StorageSystemColumns::StorageSystemColumns(const std::string & name_)
{ "name", new DataTypeString },
{ "type", new DataTypeString },
{ "default_type", new DataTypeString },
{ "default_expression", new DataTypeString }
{ "default_expression", new DataTypeString },
{ "bytes", new DataTypeUInt64 },
}
{
}
@ -103,6 +108,7 @@ BlockInputStreams StorageSystemColumns::read(
ColumnPtr type_column = new ColumnString;
ColumnPtr default_type_column = new ColumnString;
ColumnPtr default_expression_column = new ColumnString;
ColumnPtr bytes_column = new ColumnUInt64;
size_t rows = filtered_database_column->size();
for (size_t i = 0; i < rows; ++i)
@ -112,6 +118,7 @@ BlockInputStreams StorageSystemColumns::read(
NamesAndTypesList columns;
ColumnDefaults column_defaults;
std::unordered_map<String, size_t> column_sizes;
{
StoragePtr storage = storages.at(std::make_pair(database_name, table_name));
@ -120,6 +127,26 @@ BlockInputStreams StorageSystemColumns::read(
columns = storage->getColumnsList();
columns.insert(std::end(columns), std::begin(storage->alias_columns), std::end(storage->alias_columns));
column_defaults = storage->column_defaults;
/** Данные о размерах столбцов для таблиц семейства MergeTree.
* NOTE: В дальнейшем можно сделать интерфейс, позволяющий получить размеры столбцов у IStorage.
*/
if (auto storage_concrete = dynamic_cast<StorageMergeTree *>(storage.get()))
{
column_sizes = storage_concrete->getData().getColumnSizes();
}
else if (auto storage_concrete = dynamic_cast<StorageReplicatedMergeTree *>(storage.get()))
{
column_sizes = storage_concrete->getData().getColumnSizes();
auto unreplicated_data = storage_concrete->getUnreplicatedData();
if (unreplicated_data)
{
auto unreplicated_column_sizes = unreplicated_data->getColumnSizes();
for (const auto & name_size : unreplicated_column_sizes)
column_sizes[name_size.first] += name_size.second;
}
}
}
for (const auto & column : columns)
@ -129,16 +156,26 @@ BlockInputStreams StorageSystemColumns::read(
name_column->insert(column.name);
type_column->insert(column.type->getName());
const auto it = column_defaults.find(column.name);
if (it == std::end(column_defaults))
{
default_type_column->insertDefault();
default_expression_column->insertDefault();
const auto it = column_defaults.find(column.name);
if (it == std::end(column_defaults))
{
default_type_column->insertDefault();
default_expression_column->insertDefault();
}
else
{
default_type_column->insert(toString(it->second.type));
default_expression_column->insert(queryToString(it->second.expression));
}
}
else
{
default_type_column->insert(toString(it->second.type));
default_expression_column->insert(queryToString(it->second.expression));
const auto it = column_sizes.find(column.name);
if (it == std::end(column_sizes))
bytes_column->insertDefault();
else
bytes_column->insert(it->second);
}
}
}
@ -151,6 +188,7 @@ BlockInputStreams StorageSystemColumns::read(
block.insert(ColumnWithTypeAndName(type_column, new DataTypeString, "type"));
block.insert(ColumnWithTypeAndName(default_type_column, new DataTypeString, "default_type"));
block.insert(ColumnWithTypeAndName(default_expression_column, new DataTypeString, "default_expression"));
block.insert(ColumnWithTypeAndName(bytes_column, new DataTypeUInt64, "bytes"));
return BlockInputStreams{ 1, new OneBlockInputStream(block) };
}

View File

@ -0,0 +1 @@
SELECT extractURLParameter('http://test.com/?testq=aaa&q=111', 'q');

View File

@ -0,0 +1,5 @@
1 1
1
1 1
('2015-01-02','Hello')
('2015-01-02','Hello') ('2015-01-02','Hello') 1 1

View File

@ -0,0 +1,5 @@
SELECT (SELECT (SELECT (SELECT (SELECT (SELECT count() FROM (SELECT * FROM system.numbers LIMIT 10)))))) = (SELECT 10), ((SELECT 1, 'Hello', [1, 2]).3)[1];
SELECT toUInt64((SELECT 9)) IN (SELECT number FROM system.numbers LIMIT 10);
SELECT (SELECT toDate('2015-01-02')) = toDate('2015-01-02'), 'Hello' = (SELECT 'Hello');
SELECT (SELECT toDate('2015-01-02'), 'Hello');
SELECT (SELECT toDate('2015-01-02'), 'Hello') AS x, x, identity((SELECT 1)), identity((SELECT 1) AS y);

View File

@ -0,0 +1,14 @@
[1,2]
[0]
[4,5,6]
[''] ['0000-00-00'] ['0000-00-00 00:00:00']
[0] [''] ['0000-00-00 00:00:00'] ['0000-00-00']
[0] ['0'] ['2015-01-01 00:00:00'] ['2015-01-01']
[0,1] [''] ['2015-01-01 00:00:00','2015-01-01 00:00:01'] ['2015-01-01','2015-01-02']
[0] ['0'] ['2015-01-01 00:00:00','2015-01-01 00:00:01','2015-01-01 00:00:02'] ['2015-01-01','2015-01-02','2015-01-03']
[0] [''] ['2015-01-01 00:00:00','2015-01-01 00:00:01','2015-01-01 00:00:02','2015-01-01 00:00:03'] ['0000-00-00']
[0,1] ['0'] ['0000-00-00 00:00:00'] ['2015-01-01']
[0] [''] ['2015-01-01 00:00:00'] ['2015-01-01','2015-01-02']
[0] ['0'] ['2015-01-01 00:00:00','2015-01-01 00:00:01'] ['2015-01-01','2015-01-02','2015-01-03']
[0,1] [''] ['2015-01-01 00:00:00','2015-01-01 00:00:01','2015-01-01 00:00:02'] ['0000-00-00']
[0] ['0'] ['2015-01-01 00:00:00','2015-01-01 00:00:01','2015-01-01 00:00:02','2015-01-01 00:00:03'] ['2015-01-01']

View File

@ -0,0 +1,8 @@
SELECT emptyArrayToSingle(arrayFilter(x -> x != 99, arrayJoin([[1, 2], [99], [4, 5, 6]])));
SELECT emptyArrayToSingle(emptyArrayString()), emptyArrayToSingle(emptyArrayDate()), emptyArrayToSingle(emptyArrayDateTime());
SELECT
emptyArrayToSingle(range(number % 3)),
emptyArrayToSingle(arrayMap(x -> toString(x), range(number % 2))),
emptyArrayToSingle(arrayMap(x -> toDateTime('2015-01-01 00:00:00') + x, range(number % 5))),
emptyArrayToSingle(arrayMap(x -> toDate('2015-01-01') + x, range(number % 4))) FROM system.numbers LIMIT 10;

View File

@ -0,0 +1,23 @@
0
1
2
2
3
4
5
5
6
7
0 [] 0
1 [0] 0
2 [0,1] 0
2 [0,1] 1
3 [] 0
4 [0] 0
5 [0,1] 0
5 [0,1] 1
6 [] 0
7 [0] 0
8 [0,1] 0
8 [0,1] 1
9 [] 0

View File

@ -0,0 +1,2 @@
SELECT number FROM system.numbers LEFT ARRAY JOIN range(number % 3) AS arr LIMIT 10;
SELECT number, arr, x FROM (SELECT number, range(number % 3) AS arr FROM system.numbers LIMIT 10) LEFT ARRAY JOIN arr AS x;

View File

@ -0,0 +1,7 @@
0 15 15
1 14 14
2 14 14
3 15 15
4 9 9
5 9 9
6 9 9

View File

@ -0,0 +1 @@
SELECT k % 7 AS k2, finalizeAggregation(uniqMergeState(state)), uniqMerge(state) FROM (SELECT k, uniqState(x) AS state FROM (SELECT number % 11 AS k, intDiv(number, 7) AS x FROM system.numbers LIMIT 100) GROUP BY k) GROUP BY k2 ORDER BY k2;

View File

@ -0,0 +1,11 @@
1
1
0
0
1
1
4 1 1

View File

@ -0,0 +1,11 @@
DROP TABLE IF EXISTS test.test;
CREATE TABLE test.test (x UInt8) ENGINE = Log;
INSERT INTO test.test SELECT 1 AS x;
INSERT INTO test.test SELECT 1 AS x SETTINGS extremes = 1;
INSERT INTO test.test SELECT 1 AS x GROUP BY 1 WITH TOTALS;
INSERT INTO test.test SELECT 1 AS x GROUP BY 1 WITH TOTALS SETTINGS extremes = 1;
SELECT count(), min(x), max(x) FROM test.test;
DROP TABLE test.test;

View File

@ -0,0 +1,6 @@
#!/bin/bash
curl -sS http://localhost:8123/?extremes=1 -d @- <<< "DROP TABLE IF EXISTS test.test"
curl -sS http://localhost:8123/?extremes=1 -d @- <<< "CREATE TABLE test.test (x UInt8) ENGINE = Log"
curl -sS http://localhost:8123/?extremes=1 -d @- <<< "INSERT INTO test.test SELECT 1 AS x"
curl -sS http://localhost:8123/?extremes=1 -d @- <<< "DROP TABLE test.test"