Merge remote-tracking branch 'origin/master' into pr-plan-rewrite

This commit is contained in:
Igor Nikonov 2024-05-16 10:19:06 +00:00
commit 48ae7be85c
24 changed files with 265 additions and 77 deletions

View File

@ -89,7 +89,7 @@ PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
RemoveBracesLLVM: true
RemoveBracesLLVM: false
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements

View File

@ -138,6 +138,8 @@ Checks: [
# This is a good check, but clang-tidy crashes, see https://github.com/llvm/llvm-project/issues/91872
'-modernize-use-constraints',
# https://github.com/abseil/abseil-cpp/issues/1667
'-clang-analyzer-optin.core.EnumCastOutOfRange'
]
WarningsAsErrors: '*'

View File

@ -67,6 +67,8 @@ generates merged configuration file:
</clickhouse>
```
### Using from_env and from_zk
To specify that a value of an element should be replaced by the value of an environment variable, you can use attribute `from_env`.
Example with `$MAX_QUERY_SIZE = 150000`:
@ -93,6 +95,59 @@ which is equal to
</clickhouse>
```
The same is possible using `from_zk`:
``` xml
<clickhouse>
<postgresql_port from_zk="/zk_configs/postgresql_port"/>
</clickhouse>
```
```
# clickhouse-keeper-client
/ :) touch /zk_configs
/ :) create /zk_configs/postgresql_port "9005"
/ :) get /zk_configs/postgresql_port
9005
```
which is equal to
``` xml
<clickhouse>
<postgresql_port>9005</postgresql_port>
</clickhouse>
```
#### Default values for from_env and from_zk attributes
It's possible to set the default value and substitute it only if the environment variable or zookeeper node is set using `replace="1"`.
With previous example, but `MAX_QUERY_SIZE` is unset:
``` xml
<clickhouse>
<profiles>
<default>
<max_query_size from_env="MAX_QUERY_SIZE" replace="1">150000</max_query_size>
</default>
</profiles>
</clickhouse>
```
will take the default value
``` xml
<clickhouse>
<profiles>
<default>
<max_query_size>150000</max_query_size>
</default>
</profiles>
</clickhouse>
```
## Substituting Configuration {#substitution}
The config can define substitutions. There are two types of substitutions:

View File

@ -5,7 +5,6 @@
#include <Common/OpenTelemetryTraceContext.h>
#include <Common/noexcept_scope.h>
#include <cassert>
#include <type_traits>
#include <Poco/Util/Application.h>
@ -437,6 +436,11 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
/// We don't run jobs after `shutdown` is set, but we have to properly dequeue all jobs and finish them.
if (shutdown)
{
{
ALLOW_ALLOCATIONS_IN_SCOPE;
/// job can contain packaged_task which can set exception during destruction
job_data.reset();
}
job_is_done = true;
continue;
}

View File

@ -73,8 +73,9 @@ bool ParallelReadBuffer::addReaderToPool()
auto worker = read_workers.emplace_back(std::make_shared<ReadWorker>(input, range_start, size));
++active_working_readers;
schedule([this, my_worker = std::move(worker)]() mutable { readerThreadFunction(std::move(my_worker)); }, Priority{});
/// increase number of workers only after we are sure that the reader was scheduled
++active_working_readers;
return true;
}

View File

@ -21,6 +21,9 @@
#include <base/sort.h>
#include <Common/JSONBuilder.h>
#include <absl/container/flat_hash_map.h>
#include <absl/container/inlined_vector.h>
namespace DB
{
@ -708,16 +711,18 @@ static ColumnWithTypeAndName executeActionForPartialResult(const ActionsDAG::Nod
return res_column;
}
Block ActionsDAG::updateHeader(Block header) const
Block ActionsDAG::updateHeader(const Block & header) const
{
IntermediateExecutionResult node_to_column;
std::set<size_t> pos_to_remove;
{
std::unordered_map<std::string_view, std::list<size_t>> input_positions;
using inline_vector = absl::InlinedVector<size_t, 7>; // 64B, holding max 7 size_t elements inlined
absl::flat_hash_map<std::string_view, inline_vector> input_positions;
for (size_t pos = 0; pos < inputs.size(); ++pos)
input_positions[inputs[pos]->result_name].emplace_back(pos);
/// We insert from last to first in the inlinedVector so it's easier to pop_back matches later
for (size_t pos = inputs.size(); pos != 0; pos--)
input_positions[inputs[pos - 1]->result_name].emplace_back(pos - 1);
for (size_t pos = 0; pos < header.columns(); ++pos)
{
@ -725,10 +730,11 @@ Block ActionsDAG::updateHeader(Block header) const
auto it = input_positions.find(col.name);
if (it != input_positions.end() && !it->second.empty())
{
auto & list = it->second;
pos_to_remove.insert(pos);
node_to_column[inputs[list.front()]] = col;
list.pop_front();
auto & v = it->second;
node_to_column[inputs[v.back()]] = col;
v.pop_back();
}
}
}
@ -746,18 +752,21 @@ Block ActionsDAG::updateHeader(Block header) const
throw;
}
if (isInputProjected())
header.clear();
else
header.erase(pos_to_remove);
Block res;
res.reserve(result_columns.size());
for (auto & col : result_columns)
res.insert(std::move(col));
for (auto && item : header)
res.insert(std::move(item));
if (isInputProjected())
return res;
res.reserve(header.columns() - pos_to_remove.size());
for (size_t i = 0; i < header.columns(); i++)
{
if (!pos_to_remove.contains(i))
res.insert(header.data[i]);
}
return res;
}

View File

@ -272,7 +272,7 @@ public:
///
/// In addition, check that result constants are constants according to DAG.
/// In case if function return constant, but arguments are not constant, materialize it.
Block updateHeader(Block header) const;
Block updateHeader(const Block & header) const;
using IntermediateExecutionResult = std::unordered_map<const Node *, ColumnWithTypeAndName>;
static ColumnsWithTypeAndName evaluatePartialResult(

View File

@ -2487,10 +2487,15 @@ HashJoin::~HashJoin()
{
if (!data)
{
LOG_TRACE(log, "{}Join data has been already released", instance_log_id);
LOG_TEST(log, "{}Join data has been already released", instance_log_id);
return;
}
LOG_TRACE(log, "{}Join data is being destroyed, {} bytes and {} rows in hash table", instance_log_id, getTotalByteCount(), getTotalRowCount());
LOG_TEST(
log,
"{}Join data is being destroyed, {} bytes and {} rows in hash table",
instance_log_id,
getTotalByteCount(),
getTotalRowCount());
}
template <typename Mapped>

View File

@ -1229,8 +1229,9 @@ void Planner::buildQueryPlanIfNeeded()
if (query_plan.isInitialized())
return;
LOG_TRACE(getLogger("Planner"), "Query {} to stage {}{}",
query_tree->formatConvertedASTForErrorMessage(),
LOG_TRACE(
getLogger("Planner"),
"Query to stage {}{}",
QueryProcessingStage::toString(select_query_options.to_stage),
select_query_options.only_analyze ? " only analyze" : "");
@ -1506,8 +1507,9 @@ void Planner::buildPlanForQueryNode()
auto & mapping = join_tree_query_plan.query_node_to_plan_step_mapping;
query_node_to_plan_step_mapping.insert(mapping.begin(), mapping.end());
LOG_TRACE(getLogger("Planner"), "Query {} from stage {} to stage {}{}",
query_tree->formatConvertedASTForErrorMessage(),
LOG_TRACE(
getLogger("Planner"),
"Query from stage {} to stage {}{}",
QueryProcessingStage::toString(from_stage),
QueryProcessingStage::toString(select_query_options.to_stage),
select_query_options.only_analyze ? " only analyze" : "");

View File

@ -262,10 +262,6 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan::
{
const auto & left_table_key_name = join_clause.key_names_left[i];
const auto & right_table_key_name = join_clause.key_names_right[i];
if (!join_header.has(left_table_key_name) || !join_header.has(right_table_key_name))
continue;
const auto & left_table_column = left_stream_input_header.getByName(left_table_key_name);
const auto & right_table_column = right_stream_input_header.getByName(right_table_key_name);
@ -338,9 +334,9 @@ static size_t tryPushDownOverJoinStep(QueryPlan::Node * parent_node, QueryPlan::
auto join_filter_push_down_actions = filter->getExpression()->splitActionsForJOINFilterPushDown(filter->getFilterColumnName(),
filter->removesFilterColumn(),
left_stream_available_columns_to_push_down,
left_stream_input_header.getColumnsWithTypeAndName(),
left_stream_input_header,
right_stream_available_columns_to_push_down,
right_stream_input_header.getColumnsWithTypeAndName(),
right_stream_input_header,
equivalent_columns_to_push_down,
equivalent_left_stream_column_to_right_stream_column,
equivalent_right_stream_column_to_left_stream_column);

View File

@ -21,7 +21,7 @@ Block SourceStepWithFilter::applyPrewhereActions(Block block, const PrewhereInfo
{
if (prewhere_info->row_level_filter)
{
block = prewhere_info->row_level_filter->updateHeader(std::move(block));
block = prewhere_info->row_level_filter->updateHeader(block);
auto & row_level_column = block.getByName(prewhere_info->row_level_column_name);
if (!row_level_column.type->canBeUsedInBooleanContext())
{
@ -36,7 +36,7 @@ Block SourceStepWithFilter::applyPrewhereActions(Block block, const PrewhereInfo
if (prewhere_info->prewhere_actions)
{
block = prewhere_info->prewhere_actions->updateHeader(std::move(block));
block = prewhere_info->prewhere_actions->updateHeader(block);
auto & prewhere_column = block.getByName(prewhere_info->prewhere_column_name);
if (!prewhere_column.type->canBeUsedInBooleanContext())

View File

@ -3,9 +3,9 @@
namespace DB
{
Block ExpressionTransform::transformHeader(Block header, const ActionsDAG & expression)
Block ExpressionTransform::transformHeader(const Block & header, const ActionsDAG & expression)
{
return expression.updateHeader(std::move(header));
return expression.updateHeader(header);
}

View File

@ -24,7 +24,7 @@ public:
String getName() const override { return "ExpressionTransform"; }
static Block transformHeader(Block header, const ActionsDAG & expression);
static Block transformHeader(const Block & header, const ActionsDAG & expression);
protected:
void transform(Chunk & chunk) override;

View File

@ -174,26 +174,22 @@ static std::unique_ptr<IFilterDescription> combineFilterAndIndices(
}
Block FilterTransform::transformHeader(
Block header,
const ActionsDAG * expression,
const String & filter_column_name,
bool remove_filter_column)
const Block & header, const ActionsDAG * expression, const String & filter_column_name, bool remove_filter_column)
{
if (expression)
header = expression->updateHeader(std::move(header));
Block result = expression ? expression->updateHeader(header) : header;
auto filter_type = header.getByName(filter_column_name).type;
auto filter_type = result.getByName(filter_column_name).type;
if (!filter_type->onlyNull() && !isUInt8(removeNullable(removeLowCardinality(filter_type))))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER,
"Illegal type {} of column {} for filter. Must be UInt8 or Nullable(UInt8).",
filter_type->getName(), filter_column_name);
if (remove_filter_column)
header.erase(filter_column_name);
result.erase(filter_column_name);
else
replaceFilterToConstant(header, filter_column_name);
replaceFilterToConstant(result, filter_column_name);
return header;
return result;
}
FilterTransform::FilterTransform(

View File

@ -22,11 +22,8 @@ public:
const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_,
bool remove_filter_column_, bool on_totals_ = false, std::shared_ptr<std::atomic<size_t>> rows_filtered_ = nullptr);
static Block transformHeader(
Block header,
const ActionsDAG * expression,
const String & filter_column_name,
bool remove_filter_column);
static Block
transformHeader(const Block & header, const ActionsDAG * expression, const String & filter_column_name, bool remove_filter_column);
String getName() const override { return "FilterTransform"; }

View File

@ -14,12 +14,12 @@ namespace ErrorCodes
Block JoiningTransform::transformHeader(Block header, const JoinPtr & join)
{
LOG_DEBUG(getLogger("JoiningTransform"), "Before join block: '{}'", header.dumpStructure());
LOG_TEST(getLogger("JoiningTransform"), "Before join block: '{}'", header.dumpStructure());
join->checkTypesOfKeys(header);
join->initialize(header);
ExtraBlockPtr tmp;
join->joinBlock(header, tmp);
LOG_DEBUG(getLogger("JoiningTransform"), "After join block: '{}'", header.dumpStructure());
LOG_TEST(getLogger("JoiningTransform"), "After join block: '{}'", header.dumpStructure());
return header;
}

View File

@ -338,8 +338,6 @@ static void prepareChunk(Chunk & chunk)
void MergeJoinAlgorithm::initialize(Inputs inputs)
{
LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} - '{}'", __FILE__, __LINE__, 0, inputs[0].chunk.dumpStructure());
LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} - '{}'", __FILE__, __LINE__, 1, inputs[1].chunk.dumpStructure());
if (inputs.size() != 2)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Two inputs are required, got {}", inputs.size());
@ -351,8 +349,6 @@ void MergeJoinAlgorithm::initialize(Inputs inputs)
void MergeJoinAlgorithm::consume(Input & input, size_t source_num)
{
LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} - '{}'", __FILE__, __LINE__, source_num, input.chunk.dumpStructure());
if (input.skip_last_row)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "skip_last_row is not supported");
@ -816,15 +812,9 @@ IMergingAlgorithm::Status MergeJoinAlgorithm::merge()
if (!cursors[1]->cursor.isValid() && !cursors[1]->fullyCompleted())
return Status(1);
for (size_t i = 0; i < 2; ++i)
{
LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: sampleColumns {} '{}'", __FILE__, __LINE__, i, cursors[i]->sampleBlock().dumpStructure());
}
if (auto result = handleAllJoinState())
{
LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: '{}'", __FILE__, __LINE__, result ? result->chunk.dumpStructure() : "NA");
return std::move(*result);
}

View File

@ -49,7 +49,7 @@ Block TotalsHavingTransform::transformHeader(
if (expression)
{
block = expression->updateHeader(std::move(block));
block = expression->updateHeader(block);
if (remove_filter)
block.erase(filter_column_name);
}

View File

@ -2549,15 +2549,15 @@ def reportLogStats(args):
WITH
240 AS mins,
(
SELECT (count(), sum(length(message)))
SELECT (count(), sum(length(toValidUTF8(message))))
FROM system.text_log
WHERE (now() - toIntervalMinute(mins)) < event_time
) AS total
SELECT
count() AS count,
round(count / (total.1), 3) AS `count_%`,
formatReadableSize(sum(length(message))) AS size,
round(sum(length(message)) / (total.2), 3) AS `size_%`,
formatReadableSize(sum(length(toValidUTF8(message)))) AS size,
round(sum(length(toValidUTF8(message))) / (total.2), 3) AS `size_%`,
countDistinct(logger_name) AS uniq_loggers,
countDistinct(thread_id) AS uniq_threads,
groupArrayDistinct(toString(level)) AS levels,
@ -2580,8 +2580,8 @@ def reportLogStats(args):
240 AS mins
SELECT
count() AS count,
substr(replaceRegexpAll(message, '[^A-Za-z]+', ''), 1, 32) AS pattern,
substr(any(message), 1, 256) as runtime_message,
substr(replaceRegexpAll(toValidUTF8(message), '[^A-Za-z]+', ''), 1, 32) AS pattern,
substr(any(toValidUTF8(message)), 1, 256) as runtime_message,
any((extract(source_file, '/[a-zA-Z0-9_]+\\.[a-z]+'), source_line)) as line
FROM system.text_log
WHERE (now() - toIntervalMinute(mins)) < event_time AND message_format_string = ''
@ -2596,7 +2596,7 @@ def reportLogStats(args):
print("\n")
query = """
SELECT message_format_string, count(), any(message) AS any_message
SELECT message_format_string, count(), any(toValidUTF8(message)) AS any_message
FROM system.text_log
WHERE (now() - toIntervalMinute(240)) < event_time
AND (message NOT LIKE (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') AS s))
@ -2631,8 +2631,8 @@ def reportLogStats(args):
'Unknown identifier: ''{}''', 'User name is empty', 'Expected function, got: {}',
'Attempt to read after eof', 'String size is too big ({}), maximum: {}'
) AS known_short_messages
SELECT count() AS c, message_format_string, substr(any(message), 1, 120),
min(if(length(regexpExtract(message, '(.*)\\([A-Z0-9_]+\\)')) as prefix_len > 0, prefix_len, length(message)) - 26 AS length_without_exception_boilerplate) AS min_length_without_exception_boilerplate
SELECT count() AS c, message_format_string, substr(any(toValidUTF8(message)), 1, 120),
min(if(length(regexpExtract(toValidUTF8(message), '(.*)\\([A-Z0-9_]+\\)')) as prefix_len > 0, prefix_len, length(toValidUTF8(message))) - 26 AS length_without_exception_boilerplate) AS min_length_without_exception_boilerplate
FROM system.text_log
WHERE (now() - toIntervalMinute(240)) < event_time
AND (length(message_format_string) < 16

View File

@ -2,7 +2,7 @@ import random
import string
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, is_arm
cluster = ClickHouseCluster(__file__)
@ -255,6 +255,11 @@ def test_uncompressed_cache_plus_zstd_codec(start_cluster):
def test_preconfigured_deflateqpl_codec(start_cluster):
if is_arm():
pytest.skip(
"Skipping test because it's special test for Intel code (doesn't work on ARM)"
)
node6.query(
"""
CREATE TABLE compression_codec_multiple_with_key (

View File

@ -194,7 +194,7 @@ select 'exceptions shorter than 30',
(uniqExact(message_format_string) as c) <= max_messages,
c <= max_messages ? [] : groupUniqArray(message_format_string)
from logs
where message ilike '%DB::Exception%' and if(length(extract(message, '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(message)) < 30 + 26 and message_format_string not in known_short_messages;
where message ilike '%DB::Exception%' and if(length(extract(toValidUTF8(message), '(.*)\\([A-Z0-9_]+\\)')) as pref > 0, pref, length(toValidUTF8(message))) < 30 + 26 and message_format_string not in known_short_messages;
-- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold
WITH 0.30 as threshold
@ -207,7 +207,7 @@ select
with 0.16 as threshold
select
'noisy Trace messages',
greatest(coalesce(((select message_format_string, count() from logs where level = 'Trace' and message_format_string not in ('Access granted: {}{}', '{} -> {}', 'Query {} to stage {}{}', 'Query {} from stage {} to stage {}{}')
greatest(coalesce(((select message_format_string, count() from logs where level = 'Trace' and message_format_string not in ('Access granted: {}{}', '{} -> {}', 'Query to stage {}{}', 'Query from stage {} to stage {}{}')
group by message_format_string order by count() desc limit 1) as top_message).2, 0) / (select count() from logs), threshold) as r,
r <= threshold ? '' : top_message.1;
@ -252,7 +252,7 @@ select 'number of noisy messages',
-- Each message matches its pattern (returns 0 rows)
-- Note: maybe we should make it stricter ('Code:%Exception: '||s||'%'), but it's not easy because of addMessage
select 'incorrect patterns', greatest(uniqExact(message_format_string), 15) from (
select message_format_string, any(message) as any_message from logs
select message_format_string, any(toValidUTF8(message)) as any_message from logs
where ((rand() % 8) = 0)
and message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s)
and message not like (s || ' (skipped % similar messages)')

View File

@ -9,7 +9,7 @@ USER_FILES_PATH=$($CLICKHOUSE_CLIENT --query "select _path,_file from file('none
cp "$CUR_DIR"/data_csv/10m_rows.csv.xz $USER_FILES_PATH/
${CLICKHOUSE_CLIENT} --query="SELECT * FROM file('10m_rows.csv.xz' , 'CSVWithNames') order by identifier, number, name, surname, birthday LIMIT 1 settings max_threads=1, max_memory_usage=1000000000"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM file('10m_rows.csv.xz' , 'CSVWithNames') order by identifier, number, name, surname, birthday LIMIT 1 settings max_threads=1, max_memory_usage=100000000"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM file('10m_rows.csv.xz' , 'CSVWithNames') order by identifier, number, name, surname, birthday LIMIT 1 settings input_format_parallel_parsing=1, max_threads=1, max_parsing_threads=16, min_chunk_bytes_for_parallel_parsing=10485760, max_memory_usage=1000000000"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM file('10m_rows.csv.xz' , 'CSVWithNames') order by identifier, number, name, surname, birthday LIMIT 1 settings input_format_parallel_parsing=1, max_threads=1, max_parsing_threads=16, min_chunk_bytes_for_parallel_parsing=10485760, max_memory_usage=100000000"
rm $USER_FILES_PATH/10m_rows.csv.xz

View File

@ -0,0 +1,91 @@
-- { echoOn }
EXPLAIN header = 1, indexes = 1
SELECT name FROM users INNER JOIN users2 USING name WHERE users.name ='Alice';
Expression ((Project names + (Projection + )))
Header: name String
Join (JOIN FillRightFirst)
Header: __table1.name String
Filter (( + Change column names to column identifiers))
Header: __table1.name String
ReadFromMergeTree (default.users)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3
Filter (( + Change column names to column identifiers))
Header: __table2.name String
ReadFromMergeTree (default.users2)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3
SELECT '--';
--
EXPLAIN header = 1, indexes = 1
SELECT name FROM users LEFT JOIN users2 USING name WHERE users.name ='Alice';
Expression ((Project names + (Projection + )))
Header: name String
Join (JOIN FillRightFirst)
Header: __table1.name String
Filter (( + Change column names to column identifiers))
Header: __table1.name String
ReadFromMergeTree (default.users)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3
Filter (( + Change column names to column identifiers))
Header: __table2.name String
ReadFromMergeTree (default.users2)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3
SELECT '--';
--
EXPLAIN header = 1, indexes = 1
SELECT name FROM users RIGHT JOIN users2 USING name WHERE users2.name ='Alice';
Expression ((Project names + (Projection + )))
Header: name String
Join (JOIN FillRightFirst)
Header: __table1.name String
__table2.name String
Filter (( + Change column names to column identifiers))
Header: __table1.name String
ReadFromMergeTree (default.users)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3
Filter (( + Change column names to column identifiers))
Header: __table2.name String
ReadFromMergeTree (default.users2)
Header: name String
Indexes:
PrimaryKey
Keys:
name
Condition: (name in [\'Alice\', \'Alice\'])
Parts: 1/3
Granules: 1/3

View File

@ -0,0 +1,35 @@
SET allow_experimental_analyzer = 1;
DROP TABLE IF EXISTS users;
CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree order by (uid, name);
INSERT INTO users VALUES (1231, 'John', 33);
INSERT INTO users VALUES (6666, 'Ksenia', 48);
INSERT INTO users VALUES (8888, 'Alice', 50);
DROP TABLE IF EXISTS users2;
CREATE TABLE users2 (uid Int16, name String, age2 Int16) ENGINE=MergeTree order by (uid, name);
INSERT INTO users2 VALUES (1231, 'John', 33);
INSERT INTO users2 VALUES (6666, 'Ksenia', 48);
INSERT INTO users2 VALUES (8888, 'Alice', 50);
-- { echoOn }
EXPLAIN header = 1, indexes = 1
SELECT name FROM users INNER JOIN users2 USING name WHERE users.name ='Alice';
SELECT '--';
EXPLAIN header = 1, indexes = 1
SELECT name FROM users LEFT JOIN users2 USING name WHERE users.name ='Alice';
SELECT '--';
EXPLAIN header = 1, indexes = 1
SELECT name FROM users RIGHT JOIN users2 USING name WHERE users2.name ='Alice';
-- { echoOff }
DROP TABLE users;
DROP TABLE users2;