mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into possible_segfaults_in_table_functions
This commit is contained in:
commit
2173bf5f4a
@ -13,7 +13,6 @@ ClickHouse is an open-source column-oriented database management system that all
|
||||
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
|
||||
|
||||
## Upcoming Events
|
||||
* [ClickHouse Meetup in Tokyo](https://clickhouse.connpass.com/event/147001/) on November 14.
|
||||
* [ClickHouse Meetup in Istanbul](https://www.eventbrite.com/e/clickhouse-meetup-istanbul-create-blazing-fast-experiences-w-clickhouse-tickets-73101120419) on November 19.
|
||||
* [ClickHouse Meetup in Ankara](https://www.eventbrite.com/e/clickhouse-meetup-ankara-create-blazing-fast-experiences-w-clickhouse-tickets-73100530655) on November 21.
|
||||
* [ClickHouse Meetup in Singapore](https://www.meetup.com/Singapore-Clickhouse-Meetup-Group/events/265085331/) on November 23.
|
||||
|
@ -497,15 +497,21 @@ private:
|
||||
throw Exception("Cannot initialize readline", ErrorCodes::CANNOT_READLINE);
|
||||
|
||||
#if RL_VERSION_MAJOR >= 7
|
||||
/// When bracketed paste mode is set, pasted text is bracketed with control sequences so
|
||||
/// that the program can differentiate pasted text from typed-in text. This helps
|
||||
/// clickhouse-client so that without -m flag, one can still paste multiline queries, and
|
||||
/// possibly get better pasting performance. See https://cirw.in/blog/bracketed-paste for
|
||||
/// more details.
|
||||
rl_variable_bind("enable-bracketed-paste", "on");
|
||||
/// Enable bracketed-paste-mode only when multiquery is enabled and multiline is
|
||||
/// disabled, so that we are able to paste and execute multiline queries in a whole
|
||||
/// instead of erroring out, while be less intrusive.
|
||||
if (config().has("multiquery") && !config().has("multiline"))
|
||||
{
|
||||
/// When bracketed paste mode is set, pasted text is bracketed with control sequences so
|
||||
/// that the program can differentiate pasted text from typed-in text. This helps
|
||||
/// clickhouse-client so that without -m flag, one can still paste multiline queries, and
|
||||
/// possibly get better pasting performance. See https://cirw.in/blog/bracketed-paste for
|
||||
/// more details.
|
||||
rl_variable_bind("enable-bracketed-paste", "on");
|
||||
|
||||
/// Use our bracketed paste handler to get better user experience. See comments above.
|
||||
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
|
||||
/// Use our bracketed paste handler to get better user experience. See comments above.
|
||||
rl_bind_keyseq(BRACK_PASTE_PREF, clickhouse_rl_bracketed_paste_begin);
|
||||
}
|
||||
#endif
|
||||
|
||||
auto clear_prompt_or_exit = [](int)
|
||||
@ -751,6 +757,9 @@ private:
|
||||
|
||||
bool process(const String & text)
|
||||
{
|
||||
if (exit_strings.end() != exit_strings.find(trim(text, [](char c){ return isWhitespaceASCII(c) || c == ';'; })))
|
||||
return false;
|
||||
|
||||
const bool test_mode = config().has("testmode");
|
||||
if (config().has("multiquery"))
|
||||
{
|
||||
@ -845,9 +854,6 @@ private:
|
||||
|
||||
bool processSingleQuery(const String & line, ASTPtr parsed_query_ = nullptr)
|
||||
{
|
||||
if (exit_strings.end() != exit_strings.find(trim(line, [](char c){ return isWhitespaceASCII(c) || c == ';'; })))
|
||||
return false;
|
||||
|
||||
resetOutput();
|
||||
got_exception = false;
|
||||
|
||||
|
@ -223,9 +223,10 @@ void MySQLHandler::authenticate(const String & user_name, const String & auth_pl
|
||||
authPluginSSL();
|
||||
}
|
||||
|
||||
try {
|
||||
try
|
||||
{
|
||||
std::optional<String> auth_response = auth_plugin_name == auth_plugin->getName() ? std::make_optional<String>(initial_auth_response) : std::nullopt;
|
||||
auth_plugin->authenticate(user_name, auth_response, connection_context, packet_sender, secure_connection, socket().address());
|
||||
auth_plugin->authenticate(user_name, auth_response, connection_context, packet_sender, secure_connection, socket().peerAddress());
|
||||
}
|
||||
catch (const Exception & exc)
|
||||
{
|
||||
|
@ -19,7 +19,7 @@ RegionsNames::RegionsNames(IRegionsNamesDataProviderPtr data_provider)
|
||||
{
|
||||
for (size_t language_id = 0; language_id < SUPPORTED_LANGUAGES_COUNT; ++language_id)
|
||||
{
|
||||
const std::string & language = getSupportedLanguages()[language_id];
|
||||
const std::string & language = supported_languages[language_id];
|
||||
names_sources[language_id] = data_provider->getLanguageRegionsNamesSource(language);
|
||||
}
|
||||
|
||||
@ -34,7 +34,7 @@ std::string RegionsNames::dumpSupportedLanguagesNames()
|
||||
if (i > 0)
|
||||
res += ", ";
|
||||
res += '\'';
|
||||
res += getLanguageAliases()[i].name;
|
||||
res += language_aliases[i].first;
|
||||
res += '\'';
|
||||
}
|
||||
return res;
|
||||
@ -48,7 +48,7 @@ void RegionsNames::reload()
|
||||
RegionID max_region_id = 0;
|
||||
for (size_t language_id = 0; language_id < SUPPORTED_LANGUAGES_COUNT; ++language_id)
|
||||
{
|
||||
const std::string & language = getSupportedLanguages()[language_id];
|
||||
const std::string & language = supported_languages[language_id];
|
||||
|
||||
auto names_source = names_sources[language_id];
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
class RegionsNames
|
||||
{
|
||||
public:
|
||||
enum class Language
|
||||
enum class Language : size_t
|
||||
{
|
||||
RU = 0,
|
||||
EN,
|
||||
@ -28,36 +28,35 @@ public:
|
||||
BY,
|
||||
KZ,
|
||||
TR,
|
||||
|
||||
END
|
||||
};
|
||||
|
||||
private:
|
||||
static const size_t ROOT_LANGUAGE = 0;
|
||||
static const size_t SUPPORTED_LANGUAGES_COUNT = 6;
|
||||
static const size_t LANGUAGE_ALIASES_COUNT = 7;
|
||||
|
||||
static const char ** getSupportedLanguages()
|
||||
static inline constexpr const char * supported_languages[] =
|
||||
{
|
||||
static const char * res[]{"ru", "en", "ua", "by", "kz", "tr"};
|
||||
return res;
|
||||
}
|
||||
|
||||
struct language_alias
|
||||
{
|
||||
const char * const name;
|
||||
const Language lang;
|
||||
"ru",
|
||||
"en",
|
||||
"ua",
|
||||
"by",
|
||||
"kz",
|
||||
"tr"
|
||||
};
|
||||
static const language_alias * getLanguageAliases()
|
||||
{
|
||||
static constexpr const language_alias language_aliases[]{{"ru", Language::RU},
|
||||
{"en", Language::EN},
|
||||
{"ua", Language::UA},
|
||||
{"uk", Language::UA},
|
||||
{"by", Language::BY},
|
||||
{"kz", Language::KZ},
|
||||
{"tr", Language::TR}};
|
||||
|
||||
return language_aliases;
|
||||
}
|
||||
static inline constexpr std::pair<const char *, Language> language_aliases[] =
|
||||
{
|
||||
{"ru", Language::RU},
|
||||
{"en", Language::EN},
|
||||
{"ua", Language::UA},
|
||||
{"uk", Language::UA},
|
||||
{"by", Language::BY},
|
||||
{"kz", Language::KZ},
|
||||
{"tr", Language::TR}
|
||||
};
|
||||
|
||||
static constexpr size_t ROOT_LANGUAGE = 0;
|
||||
static constexpr size_t SUPPORTED_LANGUAGES_COUNT = size_t(Language::END);
|
||||
static constexpr size_t LANGUAGE_ALIASES_COUNT = sizeof(language_aliases);
|
||||
|
||||
using NamesSources = std::vector<std::shared_ptr<ILanguageRegionsNamesDataSource>>;
|
||||
|
||||
@ -94,9 +93,9 @@ public:
|
||||
{
|
||||
for (size_t i = 0; i < LANGUAGE_ALIASES_COUNT; ++i)
|
||||
{
|
||||
const auto & alias = getLanguageAliases()[i];
|
||||
if (language[0] == alias.name[0] && language[1] == alias.name[1])
|
||||
return alias.lang;
|
||||
const auto & alias = language_aliases[i];
|
||||
if (language[0] == alias.first[0] && language[1] == alias.first[1])
|
||||
return alias.second;
|
||||
}
|
||||
}
|
||||
throw Poco::Exception("Unsupported language for region name. Supported languages are: " + dumpSupportedLanguagesNames() + ".");
|
||||
|
@ -58,10 +58,19 @@ private:
|
||||
struct UnpackedArrays
|
||||
{
|
||||
size_t base_rows = 0;
|
||||
std::vector<char> is_const;
|
||||
std::vector<const NullMap *> null_maps;
|
||||
std::vector<const ColumnArray::ColumnOffsets::Container *> offsets;
|
||||
ColumnRawPtrs nested_columns;
|
||||
|
||||
struct UnpackedArray
|
||||
{
|
||||
bool is_const = false;
|
||||
const NullMap * null_map = nullptr;
|
||||
const NullMap * overflow_mask = nullptr;
|
||||
const ColumnArray::ColumnOffsets::Container * offsets = nullptr;
|
||||
const IColumn * nested_column = nullptr;
|
||||
|
||||
};
|
||||
|
||||
std::vector<UnpackedArray> args;
|
||||
Columns column_holders;
|
||||
|
||||
UnpackedArrays() = default;
|
||||
};
|
||||
@ -69,9 +78,16 @@ private:
|
||||
/// Cast column to data_type removing nullable if data_type hasn't.
|
||||
/// It's expected that column can represent data_type after removing some NullMap's.
|
||||
ColumnPtr castRemoveNullable(const ColumnPtr & column, const DataTypePtr & data_type) const;
|
||||
Columns castColumns(Block & block, const ColumnNumbers & arguments,
|
||||
|
||||
struct CastArgumentsResult
|
||||
{
|
||||
ColumnsWithTypeAndName initial;
|
||||
ColumnsWithTypeAndName casted;
|
||||
};
|
||||
|
||||
CastArgumentsResult castColumns(Block & block, const ColumnNumbers & arguments,
|
||||
const DataTypePtr & return_type, const DataTypePtr & return_type_with_nulls) const;
|
||||
UnpackedArrays prepareArrays(const Columns & columns) const;
|
||||
UnpackedArrays prepareArrays(const ColumnsWithTypeAndName & columns, ColumnsWithTypeAndName & initial_columns) const;
|
||||
|
||||
template <typename Map, typename ColumnType, bool is_numeric_column>
|
||||
static ColumnPtr execute(const UnpackedArrays & arrays, MutableColumnPtr result_data);
|
||||
@ -173,12 +189,13 @@ ColumnPtr FunctionArrayIntersect::castRemoveNullable(const ColumnPtr & column, c
|
||||
return column;
|
||||
}
|
||||
|
||||
Columns FunctionArrayIntersect::castColumns(
|
||||
FunctionArrayIntersect::CastArgumentsResult FunctionArrayIntersect::castColumns(
|
||||
Block & block, const ColumnNumbers & arguments, const DataTypePtr & return_type,
|
||||
const DataTypePtr & return_type_with_nulls) const
|
||||
{
|
||||
size_t num_args = arguments.size();
|
||||
Columns columns(num_args);
|
||||
ColumnsWithTypeAndName initial_columns(num_args);
|
||||
ColumnsWithTypeAndName columns(num_args);
|
||||
|
||||
auto type_array = checkAndGetDataType<DataTypeArray>(return_type.get());
|
||||
auto & type_nested = type_array->getNestedType();
|
||||
@ -201,6 +218,8 @@ Columns FunctionArrayIntersect::castColumns(
|
||||
for (size_t i = 0; i < num_args; ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & arg = block.getByPosition(arguments[i]);
|
||||
initial_columns[i] = arg;
|
||||
columns[i] = arg;
|
||||
auto & column = columns[i];
|
||||
|
||||
if (is_numeric_or_string)
|
||||
@ -208,68 +227,120 @@ Columns FunctionArrayIntersect::castColumns(
|
||||
/// Cast to Array(T) or Array(Nullable(T)).
|
||||
if (nested_is_nullable)
|
||||
{
|
||||
if (arg.type->equals(*return_type))
|
||||
column = arg.column;
|
||||
else
|
||||
column = castColumn(arg, return_type, context);
|
||||
if (!arg.type->equals(*return_type))
|
||||
{
|
||||
column.column = castColumn(arg, return_type, context);
|
||||
column.type = return_type;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// If result has array type Array(T) still cast Array(Nullable(U)) to Array(Nullable(T))
|
||||
/// because cannot cast Nullable(T) to T.
|
||||
if (arg.type->equals(*return_type) || arg.type->equals(*nullable_return_type))
|
||||
column = arg.column;
|
||||
else if (static_cast<const DataTypeArray &>(*arg.type).getNestedType()->isNullable())
|
||||
column = castColumn(arg, nullable_return_type, context);
|
||||
else
|
||||
column = castColumn(arg, return_type, context);
|
||||
|
||||
if (!arg.type->equals(*return_type) && !arg.type->equals(*nullable_return_type))
|
||||
{
|
||||
/// If result has array type Array(T) still cast Array(Nullable(U)) to Array(Nullable(T))
|
||||
/// because cannot cast Nullable(T) to T.
|
||||
if (static_cast<const DataTypeArray &>(*arg.type).getNestedType()->isNullable())
|
||||
{
|
||||
column.column = castColumn(arg, nullable_return_type, context);
|
||||
column.type = nullable_return_type;
|
||||
}
|
||||
else
|
||||
{
|
||||
column.column = castColumn(arg, return_type, context);
|
||||
column.type = return_type;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// return_type_with_nulls is the most common subtype with possible nullable parts.
|
||||
if (arg.type->equals(*return_type_with_nulls))
|
||||
column = arg.column;
|
||||
else
|
||||
column = castColumn(arg, return_type_with_nulls, context);
|
||||
if (!arg.type->equals(*return_type_with_nulls))
|
||||
{
|
||||
column.column = castColumn(arg, return_type_with_nulls, context);
|
||||
column.type = return_type_with_nulls;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return columns;
|
||||
return {.initial = initial_columns, .casted = columns};
|
||||
}
|
||||
|
||||
FunctionArrayIntersect::UnpackedArrays FunctionArrayIntersect::prepareArrays(const Columns & columns) const
|
||||
static ColumnPtr callFunctionNotEquals(ColumnWithTypeAndName first, ColumnWithTypeAndName second, const Context & context)
|
||||
{
|
||||
ColumnsWithTypeAndName args;
|
||||
args.reserve(2);
|
||||
args.emplace_back(std::move(first));
|
||||
args.emplace_back(std::move(second));
|
||||
|
||||
auto eq_func = FunctionFactory::instance().get("notEquals", context)->build(args);
|
||||
|
||||
Block block = args;
|
||||
block.insert({nullptr, eq_func->getReturnType(), ""});
|
||||
|
||||
eq_func->execute(block, {0, 1}, 2, args.front().column->size());
|
||||
|
||||
return block.getByPosition(2).column;
|
||||
}
|
||||
|
||||
FunctionArrayIntersect::UnpackedArrays FunctionArrayIntersect::prepareArrays(
|
||||
const ColumnsWithTypeAndName & columns, ColumnsWithTypeAndName & initial_columns) const
|
||||
{
|
||||
UnpackedArrays arrays;
|
||||
|
||||
size_t columns_number = columns.size();
|
||||
arrays.is_const.assign(columns_number, false);
|
||||
arrays.null_maps.resize(columns_number);
|
||||
arrays.offsets.resize(columns_number);
|
||||
arrays.nested_columns.resize(columns_number);
|
||||
arrays.args.resize(columns_number);
|
||||
|
||||
bool all_const = true;
|
||||
|
||||
for (auto i : ext::range(0, columns_number))
|
||||
{
|
||||
auto argument_column = columns[i].get();
|
||||
auto & arg = arrays.args[i];
|
||||
auto argument_column = columns[i].column.get();
|
||||
auto initial_column = initial_columns[i].column.get();
|
||||
|
||||
if (auto argument_column_const = typeid_cast<const ColumnConst *>(argument_column))
|
||||
{
|
||||
arrays.is_const[i] = true;
|
||||
arg.is_const = true;
|
||||
argument_column = argument_column_const->getDataColumnPtr().get();
|
||||
initial_column = typeid_cast<const ColumnConst *>(initial_column)->getDataColumnPtr().get();
|
||||
}
|
||||
|
||||
if (auto argument_column_array = typeid_cast<const ColumnArray *>(argument_column))
|
||||
{
|
||||
if (!arrays.is_const[i])
|
||||
if (!arg.is_const)
|
||||
all_const = false;
|
||||
|
||||
arrays.offsets[i] = &argument_column_array->getOffsets();
|
||||
arrays.nested_columns[i] = &argument_column_array->getData();
|
||||
if (auto column_nullable = typeid_cast<const ColumnNullable *>(arrays.nested_columns[i]))
|
||||
arg.offsets = &argument_column_array->getOffsets();
|
||||
arg.nested_column = &argument_column_array->getData();
|
||||
|
||||
initial_column = &typeid_cast<const ColumnArray *>(initial_column)->getData();
|
||||
|
||||
if (auto column_nullable = typeid_cast<const ColumnNullable *>(arg.nested_column))
|
||||
{
|
||||
arrays.null_maps[i] = &column_nullable->getNullMapData();
|
||||
arrays.nested_columns[i] = &column_nullable->getNestedColumn();
|
||||
arg.null_map = &column_nullable->getNullMapData();
|
||||
arg.nested_column = &column_nullable->getNestedColumn();
|
||||
initial_column = &typeid_cast<const ColumnNullable *>(initial_column)->getNestedColumn();
|
||||
}
|
||||
|
||||
/// In case column was casted need to create overflow mask for integer types.
|
||||
if (arg.nested_column != initial_column)
|
||||
{
|
||||
auto & nested_init_type = typeid_cast<const DataTypeArray *>(removeNullable(initial_columns[i].type).get())->getNestedType();
|
||||
auto & nested_cast_type = typeid_cast<const DataTypeArray *>(removeNullable(columns[i].type).get())->getNestedType();
|
||||
|
||||
if (isInteger(nested_init_type) || isDateOrDateTime(nested_init_type))
|
||||
{
|
||||
/// Compare original and casted columns. It seem to be the easiest way.
|
||||
auto overflow_mask = callFunctionNotEquals(
|
||||
{arg.nested_column->getPtr(), nested_init_type, ""},
|
||||
{initial_column->getPtr(), nested_cast_type, ""},
|
||||
context);
|
||||
|
||||
arg.overflow_mask = &typeid_cast<const ColumnUInt8 *>(overflow_mask.get())->getData();
|
||||
arrays.column_holders.emplace_back(std::move(overflow_mask));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -278,16 +349,16 @@ FunctionArrayIntersect::UnpackedArrays FunctionArrayIntersect::prepareArrays(con
|
||||
|
||||
if (all_const)
|
||||
{
|
||||
arrays.base_rows = arrays.offsets.front()->size();
|
||||
arrays.base_rows = arrays.args.front().offsets->size();
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto i : ext::range(0, columns_number))
|
||||
{
|
||||
if (arrays.is_const[i])
|
||||
if (arrays.args[i].is_const)
|
||||
continue;
|
||||
|
||||
size_t rows = arrays.offsets[i]->size();
|
||||
size_t rows = arrays.args[i].offsets->size();
|
||||
if (arrays.base_rows == 0 && rows > 0)
|
||||
arrays.base_rows = rows;
|
||||
else if (arrays.base_rows != rows)
|
||||
@ -322,9 +393,9 @@ void FunctionArrayIntersect::executeImpl(Block & block, const ColumnNumbers & ar
|
||||
|
||||
auto return_type_with_nulls = getMostSubtype(data_types, true, true);
|
||||
|
||||
Columns columns = castColumns(block, arguments, return_type, return_type_with_nulls);
|
||||
auto columns = castColumns(block, arguments, return_type, return_type_with_nulls);
|
||||
|
||||
UnpackedArrays arrays = prepareArrays(columns);
|
||||
UnpackedArrays arrays = prepareArrays(columns.casted, columns.initial);
|
||||
|
||||
ColumnPtr result_column;
|
||||
auto not_nullable_nested_return_type = removeNullable(nested_return_type);
|
||||
@ -356,7 +427,7 @@ void FunctionArrayIntersect::executeImpl(Block & block, const ColumnNumbers & ar
|
||||
result_column = execute<StringMap, ColumnFixedString, false>(arrays, std::move(column));
|
||||
else
|
||||
{
|
||||
column = static_cast<const DataTypeArray &>(*return_type_with_nulls).getNestedType()->createColumn();
|
||||
column = assert_cast<const DataTypeArray &>(*return_type_with_nulls).getNestedType()->createColumn();
|
||||
result_column = castRemoveNullable(execute<StringMap, IColumn, false>(arrays, std::move(column)), return_type);
|
||||
}
|
||||
}
|
||||
@ -377,24 +448,24 @@ void FunctionArrayIntersect::NumberExecutor::operator()()
|
||||
template <typename Map, typename ColumnType, bool is_numeric_column>
|
||||
ColumnPtr FunctionArrayIntersect::execute(const UnpackedArrays & arrays, MutableColumnPtr result_data_ptr)
|
||||
{
|
||||
auto args = arrays.nested_columns.size();
|
||||
auto args = arrays.args.size();
|
||||
auto rows = arrays.base_rows;
|
||||
|
||||
bool all_nullable = true;
|
||||
|
||||
std::vector<const ColumnType *> columns;
|
||||
columns.reserve(args);
|
||||
for (auto arg : ext::range(0, args))
|
||||
for (auto & arg : arrays.args)
|
||||
{
|
||||
if constexpr (std::is_same<ColumnType, IColumn>::value)
|
||||
columns.push_back(arrays.nested_columns[arg]);
|
||||
columns.push_back(arg.nested_column);
|
||||
else
|
||||
columns.push_back(checkAndGetColumn<ColumnType>(arrays.nested_columns[arg]));
|
||||
columns.push_back(checkAndGetColumn<ColumnType>(arg.nested_column));
|
||||
|
||||
if (!columns.back())
|
||||
throw Exception("Unexpected array type for function arrayIntersect", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (!arrays.null_maps[arg])
|
||||
if (!arg.null_map)
|
||||
all_nullable = false;
|
||||
}
|
||||
|
||||
@ -415,44 +486,45 @@ ColumnPtr FunctionArrayIntersect::execute(const UnpackedArrays & arrays, Mutable
|
||||
|
||||
bool all_has_nullable = all_nullable;
|
||||
|
||||
for (auto arg : ext::range(0, args))
|
||||
for (auto arg_num : ext::range(0, args))
|
||||
{
|
||||
auto & arg = arrays.args[arg_num];
|
||||
bool current_has_nullable = false;
|
||||
|
||||
size_t off;
|
||||
// const array has only one row
|
||||
bool const_arg = arrays.is_const[arg];
|
||||
if (const_arg)
|
||||
off = (*arrays.offsets[arg])[0];
|
||||
if (arg.is_const)
|
||||
off = (*arg.offsets)[0];
|
||||
else
|
||||
off = (*arrays.offsets[arg])[row];
|
||||
off = (*arg.offsets)[row];
|
||||
|
||||
for (auto i : ext::range(prev_off[arg], off))
|
||||
for (auto i : ext::range(prev_off[arg_num], off))
|
||||
{
|
||||
if (arrays.null_maps[arg] && (*arrays.null_maps[arg])[i])
|
||||
if (arg.null_map && (*arg.null_map)[i])
|
||||
current_has_nullable = true;
|
||||
else
|
||||
else if (!arg.overflow_mask || (*arg.overflow_mask)[i] == 0)
|
||||
{
|
||||
typename Map::mapped_type * value = nullptr;
|
||||
|
||||
if constexpr (is_numeric_column)
|
||||
value = &map[columns[arg]->getElement(i)];
|
||||
value = &map[columns[arg_num]->getElement(i)];
|
||||
else if constexpr (std::is_same<ColumnType, ColumnString>::value || std::is_same<ColumnType, ColumnFixedString>::value)
|
||||
value = &map[columns[arg]->getDataAt(i)];
|
||||
value = &map[columns[arg_num]->getDataAt(i)];
|
||||
else
|
||||
{
|
||||
const char * data = nullptr;
|
||||
value = &map[columns[arg]->serializeValueIntoArena(i, arena, data)];
|
||||
value = &map[columns[arg_num]->serializeValueIntoArena(i, arena, data)];
|
||||
}
|
||||
|
||||
if (*value == arg)
|
||||
/// Here we count the number of element appearances, but no more than once per array.
|
||||
if (*value == arg_num)
|
||||
++(*value);
|
||||
}
|
||||
}
|
||||
|
||||
prev_off[arg] = off;
|
||||
if (const_arg)
|
||||
prev_off[arg] = 0;
|
||||
prev_off[arg_num] = off;
|
||||
if (arg.is_const)
|
||||
prev_off[arg_num] = 0;
|
||||
|
||||
if (!current_has_nullable)
|
||||
all_has_nullable = false;
|
||||
|
@ -932,14 +932,14 @@ static UInt64 getLimitForSorting(const ASTSelectQuery & query, const Context & c
|
||||
}
|
||||
|
||||
|
||||
static SortingInfoPtr optimizeReadInOrder(const MergeTreeData & merge_tree, const ASTSelectQuery & query,
|
||||
static InputSortingInfoPtr optimizeReadInOrder(const MergeTreeData & merge_tree, const ASTSelectQuery & query,
|
||||
const Context & context, const SyntaxAnalyzerResultPtr & global_syntax_result)
|
||||
{
|
||||
if (!merge_tree.hasSortingKey())
|
||||
return {};
|
||||
|
||||
auto order_descr = getSortDescription(query, context);
|
||||
SortDescription prefix_order_descr;
|
||||
SortDescription order_key_prefix_descr;
|
||||
int read_direction = order_descr.at(0).direction;
|
||||
|
||||
const auto & sorting_key_columns = merge_tree.getSortingKeyColumns();
|
||||
@ -954,7 +954,7 @@ static SortingInfoPtr optimizeReadInOrder(const MergeTreeData & merge_tree, cons
|
||||
/// or in some simple cases when order key element is wrapped into monotonic function.
|
||||
int current_direction = order_descr[i].direction;
|
||||
if (order_descr[i].column_name == sorting_key_columns[i] && current_direction == read_direction)
|
||||
prefix_order_descr.push_back(order_descr[i]);
|
||||
order_key_prefix_descr.push_back(order_descr[i]);
|
||||
else
|
||||
{
|
||||
auto ast = query.orderBy()->children[i]->children.at(0);
|
||||
@ -1002,14 +1002,14 @@ static SortingInfoPtr optimizeReadInOrder(const MergeTreeData & merge_tree, cons
|
||||
if (i == 0)
|
||||
read_direction = current_direction;
|
||||
|
||||
prefix_order_descr.push_back(order_descr[i]);
|
||||
order_key_prefix_descr.push_back(order_descr[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (prefix_order_descr.empty())
|
||||
if (order_key_prefix_descr.empty())
|
||||
return {};
|
||||
|
||||
return std::make_shared<SortingInfo>(std::move(prefix_order_descr), read_direction);
|
||||
return std::make_shared<InputSortingInfo>(std::move(order_key_prefix_descr), read_direction);
|
||||
}
|
||||
|
||||
|
||||
@ -1033,11 +1033,11 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
auto & expressions = analysis_result;
|
||||
|
||||
SortingInfoPtr sorting_info;
|
||||
InputSortingInfoPtr input_sorting_info;
|
||||
if (settings.optimize_read_in_order && storage && query.orderBy() && !query_analyzer->hasAggregation() && !query.final() && !query.join())
|
||||
{
|
||||
if (const auto * merge_tree_data = dynamic_cast<const MergeTreeData *>(storage.get()))
|
||||
sorting_info = optimizeReadInOrder(*merge_tree_data, query, *context, syntax_analyzer_result);
|
||||
input_sorting_info = optimizeReadInOrder(*merge_tree_data, query, *context, syntax_analyzer_result);
|
||||
}
|
||||
|
||||
if (options.only_analyze)
|
||||
@ -1097,7 +1097,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
throw Exception("PREWHERE is not supported if the table is filtered by row-level security expression", ErrorCodes::ILLEGAL_PREWHERE);
|
||||
|
||||
/** Read the data from Storage. from_stage - to what stage the request was completed in Storage. */
|
||||
executeFetchColumns(from_stage, pipeline, sorting_info, expressions.prewhere_info, expressions.columns_to_remove_after_prewhere, save_context_and_storage);
|
||||
executeFetchColumns(from_stage, pipeline, input_sorting_info, expressions.prewhere_info, expressions.columns_to_remove_after_prewhere, save_context_and_storage);
|
||||
|
||||
LOG_TRACE(log, QueryProcessingStage::toString(from_stage) << " -> " << QueryProcessingStage::toString(options.to_stage));
|
||||
}
|
||||
@ -1223,7 +1223,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
if (!expressions.second_stage && !expressions.need_aggregate && !expressions.has_having)
|
||||
{
|
||||
if (expressions.has_order_by)
|
||||
executeOrder(pipeline, query_info.sorting_info);
|
||||
executeOrder(pipeline, query_info.input_sorting_info);
|
||||
|
||||
if (expressions.has_order_by && query.limitLength())
|
||||
executeDistinct(pipeline, false, expressions.selected_columns);
|
||||
@ -1296,7 +1296,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
if (!expressions.first_stage && !expressions.need_aggregate && !(query.group_by_with_totals && !aggregate_final))
|
||||
executeMergeSorted(pipeline);
|
||||
else /// Otherwise, just sort.
|
||||
executeOrder(pipeline, query_info.sorting_info);
|
||||
executeOrder(pipeline, query_info.input_sorting_info);
|
||||
}
|
||||
|
||||
/** Optimization - if there are several sources and there is LIMIT, then first apply the preliminary LIMIT,
|
||||
@ -1356,7 +1356,7 @@ void InterpreterSelectQuery::executeImpl(TPipeline & pipeline, const BlockInputS
|
||||
template <typename TPipeline>
|
||||
void InterpreterSelectQuery::executeFetchColumns(
|
||||
QueryProcessingStage::Enum processing_stage, TPipeline & pipeline,
|
||||
const SortingInfoPtr & sorting_info, const PrewhereInfoPtr & prewhere_info, const Names & columns_to_remove_after_prewhere,
|
||||
const InputSortingInfoPtr & input_sorting_info, const PrewhereInfoPtr & prewhere_info, const Names & columns_to_remove_after_prewhere,
|
||||
QueryPipeline & save_context_and_storage)
|
||||
{
|
||||
constexpr bool pipeline_with_processors = std::is_same<TPipeline, QueryPipeline>::value;
|
||||
@ -1674,7 +1674,7 @@ void InterpreterSelectQuery::executeFetchColumns(
|
||||
query_info.syntax_analyzer_result = syntax_analyzer_result;
|
||||
query_info.sets = query_analyzer->getPreparedSets();
|
||||
query_info.prewhere_info = prewhere_info;
|
||||
query_info.sorting_info = sorting_info;
|
||||
query_info.input_sorting_info = input_sorting_info;
|
||||
|
||||
BlockInputStreams streams;
|
||||
Pipes pipes;
|
||||
@ -2256,46 +2256,46 @@ void InterpreterSelectQuery::executeExpression(QueryPipeline & pipeline, const E
|
||||
});
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeOrder(Pipeline & pipeline, SortingInfoPtr sorting_info)
|
||||
void InterpreterSelectQuery::executeOrder(Pipeline & pipeline, InputSortingInfoPtr input_sorting_info)
|
||||
{
|
||||
auto & query = getSelectQuery();
|
||||
SortDescription order_descr = getSortDescription(query, *context);
|
||||
SortDescription output_order_descr = getSortDescription(query, *context);
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
UInt64 limit = getLimitForSorting(query, *context);
|
||||
|
||||
if (sorting_info)
|
||||
if (input_sorting_info)
|
||||
{
|
||||
/* Case of sorting with optimization using sorting key.
|
||||
* We have several threads, each of them reads batch of parts in direct
|
||||
* or reverse order of sorting key using one input stream per part
|
||||
* and then merge them into one sorted stream.
|
||||
* At this stage we merge per-thread streams into one.
|
||||
* If the input is sorted by some prefix of the sorting key required for output,
|
||||
* we have to finish sorting after the merge.
|
||||
*/
|
||||
|
||||
bool need_finish_sorting = (sorting_info->prefix_order_descr.size() < order_descr.size());
|
||||
bool need_finish_sorting = (input_sorting_info->order_key_prefix_descr.size() < output_order_descr.size());
|
||||
|
||||
UInt64 limit_for_merging = (need_finish_sorting ? 0 : limit);
|
||||
executeMergeSorted(pipeline, input_sorting_info->order_key_prefix_descr, limit_for_merging);
|
||||
|
||||
if (need_finish_sorting)
|
||||
{
|
||||
pipeline.transform([&](auto & stream)
|
||||
{
|
||||
stream = std::make_shared<PartialSortingBlockInputStream>(stream, order_descr, limit);
|
||||
stream = std::make_shared<PartialSortingBlockInputStream>(stream, output_order_descr, limit);
|
||||
});
|
||||
}
|
||||
|
||||
UInt64 limit_for_merging = (need_finish_sorting ? 0 : limit);
|
||||
executeMergeSorted(pipeline, sorting_info->prefix_order_descr, limit_for_merging);
|
||||
|
||||
if (need_finish_sorting)
|
||||
{
|
||||
pipeline.firstStream() = std::make_shared<FinishSortingBlockInputStream>(
|
||||
pipeline.firstStream(), sorting_info->prefix_order_descr,
|
||||
order_descr, settings.max_block_size, limit);
|
||||
pipeline.firstStream(), input_sorting_info->order_key_prefix_descr,
|
||||
output_order_descr, settings.max_block_size, limit);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
pipeline.transform([&](auto & stream)
|
||||
{
|
||||
auto sorting_stream = std::make_shared<PartialSortingBlockInputStream>(stream, order_descr, limit);
|
||||
auto sorting_stream = std::make_shared<PartialSortingBlockInputStream>(stream, output_order_descr, limit);
|
||||
|
||||
/// Limits on sorting
|
||||
IBlockInputStream::LocalLimits limits;
|
||||
@ -2311,16 +2311,16 @@ void InterpreterSelectQuery::executeOrder(Pipeline & pipeline, SortingInfoPtr so
|
||||
|
||||
/// Merge the sorted blocks.
|
||||
pipeline.firstStream() = std::make_shared<MergeSortingBlockInputStream>(
|
||||
pipeline.firstStream(), order_descr, settings.max_block_size, limit,
|
||||
pipeline.firstStream(), output_order_descr, settings.max_block_size, limit,
|
||||
settings.max_bytes_before_remerge_sort,
|
||||
settings.max_bytes_before_external_sort, context->getTemporaryPath(), settings.min_free_disk_space_for_temporary_data);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoPtr sorting_info)
|
||||
void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, InputSortingInfoPtr input_sorting_info)
|
||||
{
|
||||
auto & query = getSelectQuery();
|
||||
SortDescription order_descr = getSortDescription(query, *context);
|
||||
SortDescription output_order_descr = getSortDescription(query, *context);
|
||||
UInt64 limit = getLimitForSorting(query, *context);
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
@ -2330,7 +2330,7 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
// limits.mode = IBlockInputStream::LIMITS_TOTAL;
|
||||
// limits.size_limits = SizeLimits(settings.max_rows_to_sort, settings.max_bytes_to_sort, settings.sort_overflow_mode);
|
||||
|
||||
if (sorting_info)
|
||||
if (input_sorting_info)
|
||||
{
|
||||
/* Case of sorting with optimization using sorting key.
|
||||
* We have several threads, each of them reads batch of parts in direct
|
||||
@ -2339,16 +2339,7 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
* At this stage we merge per-thread streams into one.
|
||||
*/
|
||||
|
||||
bool need_finish_sorting = (sorting_info->prefix_order_descr.size() < order_descr.size());
|
||||
|
||||
if (need_finish_sorting)
|
||||
{
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type)
|
||||
{
|
||||
bool do_count_rows = stream_type == QueryPipeline::StreamType::Main;
|
||||
return std::make_shared<PartialSortingTransform>(header, order_descr, limit, do_count_rows);
|
||||
});
|
||||
}
|
||||
bool need_finish_sorting = (input_sorting_info->order_key_prefix_descr.size() < output_order_descr.size());
|
||||
|
||||
if (pipeline.getNumStreams() > 1)
|
||||
{
|
||||
@ -2356,7 +2347,7 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
auto transform = std::make_shared<MergingSortedTransform>(
|
||||
pipeline.getHeader(),
|
||||
pipeline.getNumStreams(),
|
||||
sorting_info->prefix_order_descr,
|
||||
input_sorting_info->order_key_prefix_descr,
|
||||
settings.max_block_size, limit_for_merging);
|
||||
|
||||
pipeline.addPipe({ std::move(transform) });
|
||||
@ -2364,11 +2355,17 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
|
||||
if (need_finish_sorting)
|
||||
{
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type)
|
||||
{
|
||||
bool do_count_rows = stream_type == QueryPipeline::StreamType::Main;
|
||||
return std::make_shared<PartialSortingTransform>(header, output_order_descr, limit, do_count_rows);
|
||||
});
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header) -> ProcessorPtr
|
||||
{
|
||||
return std::make_shared<FinishSortingTransform>(
|
||||
header, sorting_info->prefix_order_descr,
|
||||
order_descr, settings.max_block_size, limit);
|
||||
header, input_sorting_info->order_key_prefix_descr,
|
||||
output_order_descr, settings.max_block_size, limit);
|
||||
});
|
||||
}
|
||||
|
||||
@ -2378,7 +2375,7 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipeline::StreamType stream_type)
|
||||
{
|
||||
bool do_count_rows = stream_type == QueryPipeline::StreamType::Main;
|
||||
return std::make_shared<PartialSortingTransform>(header, order_descr, limit, do_count_rows);
|
||||
return std::make_shared<PartialSortingTransform>(header, output_order_descr, limit, do_count_rows);
|
||||
});
|
||||
|
||||
/// If there are several streams, we merge them into one
|
||||
@ -2391,7 +2388,7 @@ void InterpreterSelectQuery::executeOrder(QueryPipeline & pipeline, SortingInfoP
|
||||
return nullptr;
|
||||
|
||||
return std::make_shared<MergeSortingTransform>(
|
||||
header, order_descr, settings.max_block_size, limit,
|
||||
header, output_order_descr, settings.max_block_size, limit,
|
||||
settings.max_bytes_before_remerge_sort,
|
||||
settings.max_bytes_before_external_sort, context->getTemporaryPath(), settings.min_free_disk_space_for_temporary_data);
|
||||
});
|
||||
@ -2816,11 +2813,11 @@ void InterpreterSelectQuery::executeExtremes(QueryPipeline & pipeline)
|
||||
void InterpreterSelectQuery::executeSubqueriesInSetsAndJoins(Pipeline & pipeline, SubqueriesForSets & subqueries_for_sets)
|
||||
{
|
||||
/// Merge streams to one. Use MergeSorting if data was read in sorted order, Union otherwise.
|
||||
if (query_info.sorting_info)
|
||||
if (query_info.input_sorting_info)
|
||||
{
|
||||
if (pipeline.stream_with_non_joined_data)
|
||||
throw Exception("Using read in order optimization, but has stream with non-joined data in pipeline", ErrorCodes::LOGICAL_ERROR);
|
||||
executeMergeSorted(pipeline, query_info.sorting_info->prefix_order_descr, 0);
|
||||
executeMergeSorted(pipeline, query_info.input_sorting_info->order_key_prefix_descr, 0);
|
||||
}
|
||||
else
|
||||
executeUnion(pipeline, {});
|
||||
@ -2831,11 +2828,11 @@ void InterpreterSelectQuery::executeSubqueriesInSetsAndJoins(Pipeline & pipeline
|
||||
|
||||
void InterpreterSelectQuery::executeSubqueriesInSetsAndJoins(QueryPipeline & pipeline, SubqueriesForSets & subqueries_for_sets)
|
||||
{
|
||||
if (query_info.sorting_info)
|
||||
if (query_info.input_sorting_info)
|
||||
{
|
||||
if (pipeline.hasDelayedStream())
|
||||
throw Exception("Using read in order optimization, but has delayed stream in pipeline", ErrorCodes::LOGICAL_ERROR);
|
||||
executeMergeSorted(pipeline, query_info.sorting_info->prefix_order_descr, 0);
|
||||
executeMergeSorted(pipeline, query_info.input_sorting_info->order_key_prefix_descr, 0);
|
||||
}
|
||||
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
@ -198,7 +198,7 @@ private:
|
||||
|
||||
template <typename TPipeline>
|
||||
void executeFetchColumns(QueryProcessingStage::Enum processing_stage, TPipeline & pipeline,
|
||||
const SortingInfoPtr & sorting_info, const PrewhereInfoPtr & prewhere_info,
|
||||
const InputSortingInfoPtr & sorting_info, const PrewhereInfoPtr & prewhere_info,
|
||||
const Names & columns_to_remove_after_prewhere,
|
||||
QueryPipeline & save_context_and_storage);
|
||||
|
||||
@ -208,7 +208,7 @@ private:
|
||||
void executeTotalsAndHaving(Pipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row, bool final);
|
||||
void executeHaving(Pipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeExpression(Pipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeOrder(Pipeline & pipeline, SortingInfoPtr sorting_info);
|
||||
void executeOrder(Pipeline & pipeline, InputSortingInfoPtr sorting_info);
|
||||
void executeWithFill(Pipeline & pipeline);
|
||||
void executeMergeSorted(Pipeline & pipeline);
|
||||
void executePreLimit(Pipeline & pipeline);
|
||||
@ -227,7 +227,7 @@ private:
|
||||
void executeTotalsAndHaving(QueryPipeline & pipeline, bool has_having, const ExpressionActionsPtr & expression, bool overflow_row, bool final);
|
||||
void executeHaving(QueryPipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeExpression(QueryPipeline & pipeline, const ExpressionActionsPtr & expression);
|
||||
void executeOrder(QueryPipeline & pipeline, SortingInfoPtr sorting_info);
|
||||
void executeOrder(QueryPipeline & pipeline, InputSortingInfoPtr sorting_info);
|
||||
void executeWithFill(QueryPipeline & pipeline);
|
||||
void executeMergeSorted(QueryPipeline & pipeline);
|
||||
void executePreLimit(QueryPipeline & pipeline);
|
||||
|
@ -604,9 +604,9 @@ Pipes MergeTreeDataSelectExecutor::readFromParts(
|
||||
virt_column_names,
|
||||
settings);
|
||||
}
|
||||
else if (settings.optimize_read_in_order && query_info.sorting_info)
|
||||
else if (settings.optimize_read_in_order && query_info.input_sorting_info)
|
||||
{
|
||||
size_t prefix_size = query_info.sorting_info->prefix_order_descr.size();
|
||||
size_t prefix_size = query_info.input_sorting_info->order_key_prefix_descr.size();
|
||||
auto order_key_prefix_ast = data.sorting_key_expr_ast->clone();
|
||||
order_key_prefix_ast->children.resize(prefix_size);
|
||||
|
||||
@ -853,7 +853,7 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
|
||||
const Settings & settings) const
|
||||
{
|
||||
size_t sum_marks = 0;
|
||||
SortingInfoPtr sorting_info = query_info.sorting_info;
|
||||
const InputSortingInfoPtr & input_sorting_info = query_info.input_sorting_info;
|
||||
size_t adaptive_parts = 0;
|
||||
std::vector<size_t> sum_marks_in_parts(parts.size());
|
||||
const auto data_settings = data.getSettings();
|
||||
@ -1004,9 +1004,9 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
|
||||
parts.emplace_back(part);
|
||||
}
|
||||
|
||||
ranges_to_get_from_part = split_ranges(ranges_to_get_from_part, sorting_info->direction);
|
||||
ranges_to_get_from_part = split_ranges(ranges_to_get_from_part, input_sorting_info->direction);
|
||||
|
||||
if (sorting_info->direction == 1)
|
||||
if (input_sorting_info->direction == 1)
|
||||
{
|
||||
pipes.emplace_back(std::make_shared<MergeTreeSelectProcessor>(
|
||||
data, part.data_part, max_block_size, settings.preferred_block_size_bytes,
|
||||
@ -1029,9 +1029,9 @@ Pipes MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsWithOrder(
|
||||
if (pipes.size() > 1)
|
||||
{
|
||||
SortDescription sort_description;
|
||||
for (size_t j = 0; j < query_info.sorting_info->prefix_order_descr.size(); ++j)
|
||||
for (size_t j = 0; j < input_sorting_info->order_key_prefix_descr.size(); ++j)
|
||||
sort_description.emplace_back(data.sorting_key_columns[j],
|
||||
sorting_info->direction, 1);
|
||||
input_sorting_info->direction, 1);
|
||||
|
||||
for (auto & pipe : pipes)
|
||||
pipe.addSimpleTransform(std::make_shared<ExpressionTransform>(pipe.getHeader(), sorting_key_prefix_expr));
|
||||
|
@ -34,18 +34,18 @@ struct FilterInfo
|
||||
bool do_remove_column = false;
|
||||
};
|
||||
|
||||
struct SortingInfo
|
||||
struct InputSortingInfo
|
||||
{
|
||||
SortDescription prefix_order_descr;
|
||||
SortDescription order_key_prefix_descr;
|
||||
int direction;
|
||||
|
||||
SortingInfo(const SortDescription & prefix_order_descr_, int direction_)
|
||||
: prefix_order_descr(prefix_order_descr_), direction(direction_) {}
|
||||
InputSortingInfo(const SortDescription & order_key_prefix_descr_, int direction_)
|
||||
: order_key_prefix_descr(order_key_prefix_descr_), direction(direction_) {}
|
||||
};
|
||||
|
||||
using PrewhereInfoPtr = std::shared_ptr<PrewhereInfo>;
|
||||
using FilterInfoPtr = std::shared_ptr<FilterInfo>;
|
||||
using SortingInfoPtr = std::shared_ptr<SortingInfo>;
|
||||
using InputSortingInfoPtr = std::shared_ptr<InputSortingInfo>;
|
||||
|
||||
struct SyntaxAnalyzerResult;
|
||||
using SyntaxAnalyzerResultPtr = std::shared_ptr<const SyntaxAnalyzerResult>;
|
||||
@ -62,7 +62,7 @@ struct SelectQueryInfo
|
||||
|
||||
PrewhereInfoPtr prewhere_info;
|
||||
|
||||
SortingInfoPtr sorting_info;
|
||||
InputSortingInfoPtr input_sorting_info;
|
||||
|
||||
/// Prepared sets are used for indices by storage engine.
|
||||
/// Example: x IN (1, 2, 3)
|
||||
|
28
dbms/tests/performance/prewhere.xml
Normal file
28
dbms/tests/performance/prewhere.xml
Normal file
@ -0,0 +1,28 @@
|
||||
<test>
|
||||
<type>loop</type>
|
||||
|
||||
<stop_conditions>
|
||||
<all_of>
|
||||
<iterations>5</iterations>
|
||||
<min_time_not_changing_for_ms>10000</min_time_not_changing_for_ms>
|
||||
</all_of>
|
||||
<any_of>
|
||||
<iterations>50</iterations>
|
||||
<total_time_ms>60000</total_time_ms>
|
||||
</any_of>
|
||||
</stop_conditions>
|
||||
|
||||
<main_metric>
|
||||
<min_time />
|
||||
</main_metric>
|
||||
|
||||
<preconditions>
|
||||
<table_exists>default.hits_10m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
<settings>
|
||||
<max_threads>1</max_threads>
|
||||
</settings>
|
||||
|
||||
<query>SELECT Title, URL FROM hits_10m_single PREWHERE WatchID % 2 = 1 WHERE UserID = 10000 FORMAT Null</query>
|
||||
</test>
|
38
dbms/tests/performance/string_set.xml
Normal file
38
dbms/tests/performance/string_set.xml
Normal file
@ -0,0 +1,38 @@
|
||||
<test>
|
||||
<type>loop</type>
|
||||
|
||||
<stop_conditions>
|
||||
<any_of>
|
||||
<iterations>10</iterations>
|
||||
</any_of>
|
||||
</stop_conditions>
|
||||
|
||||
<main_metric>
|
||||
<rows_per_second />
|
||||
</main_metric>
|
||||
|
||||
<preconditions>
|
||||
<table_exists>default.hits_10m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
<create_query>CREATE TABLE hits_10m_words (word String, UserID UInt64) ENGINE Memory</create_query>
|
||||
<create_query>CREATE TABLE strings (short String, long String) ENGINE Memory</create_query>
|
||||
|
||||
<fill_query>INSERT INTO hits_10m_words SELECT DISTINCT arrayJoin(splitByString(' ', SearchPhrase)) AS word, UserID FROM hits_10m_single WHERE length(word) > 0</fill_query>
|
||||
<fill_query>INSERT INTO strings SELECT toString(rand()) a, a || a || a || a || a || a || a || a || a || a || a || a FROM numbers(1000000)</fill_query>
|
||||
|
||||
<settings>
|
||||
<max_threads>1</max_threads>
|
||||
</settings>
|
||||
|
||||
<query>SELECT 1 FROM hits_10m_words WHERE word IN (SELECT word FROM hits_10m_words) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM strings WHERE short IN (SELECT short FROM strings) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM strings WHERE long IN (SELECT long FROM strings) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM strings WHERE short IN (SELECT long FROM strings) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM strings WHERE long IN (SELECT short FROM strings) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM hits_10m_words WHERE word IN (SELECT short FROM strings) FORMAT Null</query>
|
||||
<query>SELECT 1 FROM hits_10m_words WHERE word IN (SELECT long FROM strings) FORMAT Null</query>
|
||||
|
||||
<drop_query>DROP TABLE IF EXISTS hits_10m_words</drop_query>
|
||||
<drop_query>DROP TABLE IF EXISTS strings</drop_query>
|
||||
</test>
|
@ -46,3 +46,6 @@
|
||||
[]
|
||||
[]
|
||||
[]
|
||||
-
|
||||
[]
|
||||
[]
|
||||
|
@ -25,3 +25,6 @@ select arrayIntersect([], []) from array_intersect order by arr;
|
||||
|
||||
drop table if exists array_intersect;
|
||||
|
||||
select '-';
|
||||
select arrayIntersect([-100], [156]);
|
||||
select arrayIntersect([1], [257]);
|
@ -0,0 +1,30 @@
|
||||
0 0
|
||||
0 0
|
||||
0 0
|
||||
1 1
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
2 4
|
||||
2 4
|
||||
3 3
|
||||
3 9
|
||||
3 9
|
||||
4 16
|
||||
4 16
|
||||
4 4
|
||||
5 25
|
||||
5 25
|
||||
5 5
|
||||
6 36
|
||||
6 36
|
||||
6 6
|
||||
7 49
|
||||
7 49
|
||||
7 7
|
||||
8 64
|
||||
8 64
|
||||
8 8
|
||||
9 81
|
||||
9 81
|
||||
9 9
|
12
dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql
Normal file
12
dbms/tests/queries/0_stateless/01034_order_by_pk_prefix.sql
Normal file
@ -0,0 +1,12 @@
|
||||
DROP TABLE IF EXISTS test_table;
|
||||
|
||||
CREATE TABLE test_table (n Int32, s String)
|
||||
ENGINE = MergeTree() PARTITION BY n % 10 ORDER BY n;
|
||||
|
||||
INSERT INTO test_table SELECT number, toString(number) FROM system.numbers LIMIT 100;
|
||||
INSERT INTO test_table SELECT number, toString(number * number) FROM system.numbers LIMIT 100;
|
||||
INSERT INTO test_table SELECT number, toString(number * number) FROM system.numbers LIMIT 100;
|
||||
|
||||
SELECT * FROM test_table ORDER BY n, s LIMIT 30;
|
||||
|
||||
DROP TABLE test_table;
|
@ -3,7 +3,7 @@
|
||||
#ccache -s # uncomment to display CCache statistics
|
||||
mkdir -p /server/build_docker
|
||||
cd /server/build_docker
|
||||
cmake -G Ninja /server -DCMAKE_C_COMPILER=`which gcc-8` -DCMAKE_CXX_COMPILER=`which g++-8`
|
||||
cmake -G Ninja /server -DCMAKE_C_COMPILER=`which gcc-9` -DCMAKE_CXX_COMPILER=`which g++-9`
|
||||
|
||||
# Set the number of build jobs to the half of number of virtual CPU cores (rounded up).
|
||||
# By default, ninja use all virtual CPU cores, that leads to very high memory consumption without much improvement in build time.
|
||||
|
142
docs/ja/index.md
Normal file
142
docs/ja/index.md
Normal file
@ -0,0 +1,142 @@
|
||||
# ClickHouseとは?
|
||||
|
||||
ClickHouseは、クエリのオンライン分析処理(OLAP)用の列指向のデータベース管理システム(DBMS)です。
|
||||
|
||||
「通常の」行指向のDBMSでは、データは次の順序で保存されます。
|
||||
|
||||
| Row | WatchID | JavaEnable | Title | GoodEvent | EventTime |
|
||||
| ------ | ------------------- | ---------- | ------------------ | --------- | ------------------- |
|
||||
| #0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 |
|
||||
| #1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 |
|
||||
| #2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 |
|
||||
| #N | ... | ... | ... | ... | ... |
|
||||
|
||||
つまり、行に関連するすべての値は物理的に隣り合わせに格納されます。
|
||||
|
||||
行指向のDBMSの例:MySQL, Postgres および MS SQL Server
|
||||
{: .grey }
|
||||
|
||||
列指向のDBMSでは、データは次のように保存されます:
|
||||
|
||||
| Row: | #0 | #1 | #2 | #N |
|
||||
| ----------- | ------------------- | ------------------- | ------------------- | ------------------- |
|
||||
| WatchID: | 89354350662 | 90329509958 | 89953706054 | ... |
|
||||
| JavaEnable: | 1 | 0 | 1 | ... |
|
||||
| Title: | Investor Relations | Contact us | Mission | ... |
|
||||
| GoodEvent: | 1 | 1 | 1 | ... |
|
||||
| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | ... |
|
||||
|
||||
これらの例は、データが配置される順序のみを示しています。
|
||||
異なる列の値は別々に保存され、同じ列のデータは一緒に保存されます。
|
||||
|
||||
列指向DBMSの例:Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid および kdb+
|
||||
{: .grey }
|
||||
|
||||
異なったデータ格納の順序は、異なったシナリオにより適します。
|
||||
データアクセスシナリオとは、クエリの実行内容、頻度、割合を指します。クエリで読み取られるの各種データの量(行、列、バイト)。データの読み取りと更新の関係。作業データのサイズとローカルでの使用方法。トランザクションが使用されるかどうか、およびそれらがどの程度分離されているか。データ複製と論理的整合性の要件。クエリの種類ごとの遅延とスループットの要件など。
|
||||
|
||||
システムの負荷が高いほど、使用シナリオの要件に一致するようにセットアップされたシステムをカスタマイズすることがより重要になり、このカスタマイズはより細かくなります。大きく異なるシナリオに等しく適したシステムはありません。システムがさまざまなシナリオに適応可能である場合、高負荷下では、システムはすべてのシナリオを同等に不十分に処理するか、1つまたはいくつかの可能なシナリオでうまく機能します。
|
||||
|
||||
## OLAPシナリオの主要なプロパティ
|
||||
|
||||
- リクエストの大部分は読み取りアクセス用である。
|
||||
- データは、単一行ではなく、かなり大きなバッチ(> 1000行)で更新されます。または、まったく更新されない。
|
||||
- データはDBに追加されるが、変更されない。
|
||||
- 読み取りの場合、非常に多くの行がDBから抽出されるが、一部の列のみ。
|
||||
- テーブルは「幅が広く」、多数の列が含まれる。
|
||||
- クエリは比較的まれ(通常、サーバーあたり毎秒数百あるいはそれ以下の数のクエリ)。
|
||||
- 単純なクエリでは、約50ミリ秒の遅延が容認される。
|
||||
- 列の値はかなり小さく、数値や短い文字列(たとえば、URLごとに60バイト)。
|
||||
- 単一のクエリを処理する場合、高いスループットが必要(サーバーあたり毎秒最大数十億行)。
|
||||
- トランザクションは必要ない。
|
||||
- データの一貫性の要件が低い。
|
||||
- クエリごとに1つの大きなテーブルがある。 1つを除くすべてのテーブルは小さい。
|
||||
- クエリ結果は、ソースデータよりも大幅に小さくなる。つまり、データはフィルター処理または集計されるため、結果は単一サーバーのRAMに収まる。
|
||||
|
||||
OLAPシナリオは、他の一般的なシナリオ(OLTPやKey-Valueアクセスなど)とは非常に異なることが容易にわかります。 したがって、まともなパフォーマンスを得るには、OLTPまたはKey-Value DBを使用して分析クエリを処理しようとするのは無意味です。 たとえば、分析にMongoDBまたはRedisを使用しようとすると、OLAPデータベースに比べてパフォーマンスが非常に低下します。
|
||||
|
||||
## OLAPシナリオで列指向データベースがよりよく機能する理由
|
||||
|
||||
列指向データベースは、OLAPシナリオにより適しています。ほとんどのクエリの処理が少なくとも100倍高速です。 理由を以下に詳しく説明しますが、その根拠は視覚的に簡単に説明できます:
|
||||
|
||||
**行指向DBMS**
|
||||
|
||||
![Row-oriented](images/row_oriented.gif#)
|
||||
|
||||
**列指向DBMS**
|
||||
|
||||
![Column-oriented](images/column_oriented.gif#)
|
||||
|
||||
違いがわかりましたか?
|
||||
|
||||
### Input/output
|
||||
|
||||
1. 分析クエリでは、少数のテーブル列のみを読み取る必要があります。列指向のデータベースでは、必要なデータのみを読み取ることができます。たとえば、100のうち5つの列が必要な場合、I/Oが20倍削減されることが期待できます。
|
||||
2. データはパケットで読み取られるため、圧縮が容易です。列のデータも圧縮が簡単です。これにより、I/Oボリュームがさらに削減されます。
|
||||
3. I/Oの削減により、より多くのデータがシステムキャッシュに収まります。
|
||||
|
||||
たとえば、「各広告プラットフォームのレコード数をカウントする」クエリでは、1つの「広告プラットフォームID」列を読み取る必要がありますが、これは非圧縮では1バイトの領域を要します。トラフィックのほとんどが広告プラットフォームからのものではない場合、この列は少なくとも10倍の圧縮が期待できます。高速な圧縮アルゴリズムを使用すれば、1秒あたり少なくとも非圧縮データに換算して数ギガバイトの速度でデータを展開できます。つまり、このクエリは、単一のサーバーで1秒あたり約数十億行の速度で処理できます。この速度はまさに実際に達成されます。
|
||||
|
||||
<details markdown="1"><summary>Example</summary>
|
||||
```
|
||||
$ clickhouse-client
|
||||
ClickHouse client version 0.0.52053.
|
||||
Connecting to localhost:9000.
|
||||
Connected to ClickHouse server version 0.0.52053.
|
||||
|
||||
:) SELECT CounterID, count() FROM hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20
|
||||
|
||||
SELECT
|
||||
CounterID,
|
||||
count()
|
||||
FROM hits
|
||||
GROUP BY CounterID
|
||||
ORDER BY count() DESC
|
||||
LIMIT 20
|
||||
|
||||
┌─CounterID─┬──count()─┐
|
||||
│ 114208 │ 56057344 │
|
||||
│ 115080 │ 51619590 │
|
||||
│ 3228 │ 44658301 │
|
||||
│ 38230 │ 42045932 │
|
||||
│ 145263 │ 42042158 │
|
||||
│ 91244 │ 38297270 │
|
||||
│ 154139 │ 26647572 │
|
||||
│ 150748 │ 24112755 │
|
||||
│ 242232 │ 21302571 │
|
||||
│ 338158 │ 13507087 │
|
||||
│ 62180 │ 12229491 │
|
||||
│ 82264 │ 12187441 │
|
||||
│ 232261 │ 12148031 │
|
||||
│ 146272 │ 11438516 │
|
||||
│ 168777 │ 11403636 │
|
||||
│ 4120072 │ 11227824 │
|
||||
│ 10938808 │ 10519739 │
|
||||
│ 74088 │ 9047015 │
|
||||
│ 115079 │ 8837972 │
|
||||
│ 337234 │ 8205961 │
|
||||
└───────────┴──────────┘
|
||||
|
||||
20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.)
|
||||
|
||||
:)
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### CPU
|
||||
|
||||
クエリを実行するには大量の行を処理する必要があるため、個別の行ではなくベクター全体のすべての操作をディスパッチするか、ディスパッチコストがほとんどないようにクエリエンジンを実装すると効率的です。 適切なディスクサブシステムでこれを行わないと、クエリインタープリターが必然的にCPUを失速させます。
|
||||
データを列に格納し、可能な場合は列ごとに処理することは理にかなっています。
|
||||
|
||||
これを行うには2つの方法があります:
|
||||
|
||||
1. ベクトルエンジン。 すべての操作は、個別の値ではなく、ベクトルに対して記述されます。 これは、オペレーションを頻繁に呼び出す必要がなく、ディスパッチコストが無視できることを意味します。 操作コードには、最適化された内部サイクルが含まれています。
|
||||
|
||||
2. コード生成。 クエリ用に生成されたコードには、すべての間接的な呼び出しが含まれています。
|
||||
|
||||
これは、単純なクエリを実行する場合には意味がないため、「通常の」データベースでは実行されません。 ただし、例外があります。 たとえば、MemSQLはコード生成を使用して、SQLクエリを処理する際の遅延を減らします。 (比較のために、分析DBMSではレイテンシではなくスループットの最適化が必要です。)
|
||||
|
||||
CPU効率のために、クエリ言語は宣言型(SQLまたはMDX)、または少なくともベクトル(J、K)でなければなりません。 クエリには、最適化を可能にする暗黙的なループのみを含める必要があります。
|
||||
|
||||
[Original article](https://clickhouse.yandex/docs/en/) <!--hide-->
|
Loading…
Reference in New Issue
Block a user