mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge branch 'master' into add-alias-for-today-and-now
This commit is contained in:
commit
a58ace75a2
@ -4524,6 +4524,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
|
|||||||
|
|
||||||
### Placeholders
|
### Placeholders
|
||||||
|
|
||||||
|
- `%a` — Full original filename (e.g., "sample.csv").
|
||||||
- `%f` — Original filename without extension (e.g., "sample").
|
- `%f` — Original filename without extension (e.g., "sample").
|
||||||
- `%e` — Original file extension with dot (e.g., ".csv").
|
- `%e` — Original file extension with dot (e.g., ".csv").
|
||||||
- `%t` — Timestamp (in microseconds).
|
- `%t` — Timestamp (in microseconds).
|
||||||
|
@ -4201,6 +4201,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi
|
|||||||
### Шаблон
|
### Шаблон
|
||||||
Шаблон поддерживает следующие виды плейсхолдеров:
|
Шаблон поддерживает следующие виды плейсхолдеров:
|
||||||
|
|
||||||
|
- `%a` — Полное исходное имя файла (например "sample.csv").
|
||||||
- `%f` — Исходное имя файла без расширения (например "sample").
|
- `%f` — Исходное имя файла без расширения (например "sample").
|
||||||
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
|
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
|
||||||
- `%t` — Текущее время (в микросекундах).
|
- `%t` — Текущее время (в микросекундах).
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <iostream>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ String FileRenamer::generateNewFilename(const String & filename) const
|
|||||||
// Define placeholders and their corresponding values
|
// Define placeholders and their corresponding values
|
||||||
std::map<String, String> placeholders =
|
std::map<String, String> placeholders =
|
||||||
{
|
{
|
||||||
|
{"%a", filename},
|
||||||
{"%f", file_base},
|
{"%f", file_base},
|
||||||
{"%e", file_ext},
|
{"%e", file_ext},
|
||||||
{"%t", timestamp},
|
{"%t", timestamp},
|
||||||
@ -69,16 +70,17 @@ bool FileRenamer::isEmpty() const
|
|||||||
bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error)
|
bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error)
|
||||||
{
|
{
|
||||||
// Check if the rule contains invalid placeholders
|
// Check if the rule contains invalid placeholders
|
||||||
re2::RE2 invalid_placeholder_pattern("^([^%]|%[fet%])*$");
|
re2::RE2 invalid_placeholder_pattern("^([^%]|%[afet%])*$");
|
||||||
if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern))
|
if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern))
|
||||||
{
|
{
|
||||||
if (throw_on_error)
|
if (throw_on_error)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %f, %e, %t, and %%");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %a, %f, %e, %t, and %%");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace valid placeholders with empty strings and count remaining percentage signs.
|
// Replace valid placeholders with empty strings and count remaining percentage signs.
|
||||||
String replaced_rule = rule;
|
String replaced_rule = rule;
|
||||||
|
boost::replace_all(replaced_rule, "%a", "");
|
||||||
boost::replace_all(replaced_rule, "%f", "");
|
boost::replace_all(replaced_rule, "%f", "");
|
||||||
boost::replace_all(replaced_rule, "%e", "");
|
boost::replace_all(replaced_rule, "%e", "");
|
||||||
boost::replace_all(replaced_rule, "%t", "");
|
boost::replace_all(replaced_rule, "%t", "");
|
||||||
|
@ -9,6 +9,7 @@ namespace DB
|
|||||||
/**
|
/**
|
||||||
* The FileRenamer class provides functionality for renaming files based on given pattern with placeholders
|
* The FileRenamer class provides functionality for renaming files based on given pattern with placeholders
|
||||||
* The supported placeholders are:
|
* The supported placeholders are:
|
||||||
|
* %a - Full original file name ("sample.csv")
|
||||||
* %f - Original filename without extension ("sample")
|
* %f - Original filename without extension ("sample")
|
||||||
* %e - Original file extension with dot (".csv")
|
* %e - Original file extension with dot (".csv")
|
||||||
* %t - Timestamp (in microseconds)
|
* %t - Timestamp (in microseconds)
|
||||||
|
@ -577,6 +577,7 @@ class IColumn;
|
|||||||
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
|
||||||
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
|
||||||
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \
|
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \
|
||||||
|
M(Bool, optimize_use_implicit_projections, false, "Automatically choose implicit projections to perform SELECT query", 0) \
|
||||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||||
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
|
||||||
@ -736,7 +737,7 @@ class IColumn;
|
|||||||
M(String, workload, "default", "Name of workload to be used to access resources", 0) \
|
M(String, workload, "default", "Name of workload to be used to access resources", 0) \
|
||||||
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
|
||||||
\
|
\
|
||||||
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
|
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%a` (full original file name), `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
|
||||||
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
|
||||||
|
@ -80,6 +80,7 @@ namespace SettingsChangesHistory
|
|||||||
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
|
||||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||||
{
|
{
|
||||||
|
{"23.7", {{"optimize_use_implicit_projections", true, false, "Disable implicit projections due to unexpected results."}}},
|
||||||
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
|
||||||
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
|
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
|
||||||
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},
|
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},
|
||||||
|
@ -111,7 +111,7 @@ void optimizePrimaryKeyCondition(const Stack & stack);
|
|||||||
void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes);
|
void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes);
|
||||||
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
||||||
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
|
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
|
||||||
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections);
|
||||||
bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes);
|
bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes);
|
||||||
bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
|
|||||||
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
|
||||||
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
|
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
|
||||||
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
|
||||||
|
settings.optimize_use_implicit_projections = settings.optimize_projection && from.optimize_use_implicit_projections;
|
||||||
return settings;
|
return settings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ struct QueryPlanOptimizationSettings
|
|||||||
/// If reading from projection can be applied
|
/// If reading from projection can be applied
|
||||||
bool optimize_projection = false;
|
bool optimize_projection = false;
|
||||||
bool force_use_projection = false;
|
bool force_use_projection = false;
|
||||||
|
bool optimize_use_implicit_projections = false;
|
||||||
|
|
||||||
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
|
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
|
||||||
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
|
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
|
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
|
||||||
#include <Processors/QueryPlan/SortingStep.h>
|
#include <Processors/QueryPlan/SortingStep.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <DataTypes/IDataType.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -28,6 +29,20 @@ const DB::DataStream & getChildOutputStream(DB::QueryPlan::Node & node)
|
|||||||
namespace DB::QueryPlanOptimizations
|
namespace DB::QueryPlanOptimizations
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/// This is a check that output columns does not have the same name
|
||||||
|
/// This is ok for DAG, but may introduce a bug in a SotringStep cause columns are selected by name.
|
||||||
|
static bool areOutputsConvertableToBlock(const ActionsDAG::NodeRawConstPtrs & outputs)
|
||||||
|
{
|
||||||
|
std::unordered_set<std::string_view> names;
|
||||||
|
for (const auto & output : outputs)
|
||||||
|
{
|
||||||
|
if (!names.emplace(output->result_name).second)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
|
||||||
{
|
{
|
||||||
if (parent_node->children.size() != 1)
|
if (parent_node->children.size() != 1)
|
||||||
@ -57,6 +72,9 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan:
|
|||||||
if (unneeded_for_sorting->trivial())
|
if (unneeded_for_sorting->trivial())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!areOutputsConvertableToBlock(needed_for_sorting->getOutputs()))
|
||||||
|
return 0;
|
||||||
|
|
||||||
// Sorting (parent_node) -> Expression (child_node)
|
// Sorting (parent_node) -> Expression (child_node)
|
||||||
auto & node_with_needed = nodes.emplace_back();
|
auto & node_with_needed = nodes.emplace_back();
|
||||||
std::swap(node_with_needed.children, child_node->children);
|
std::swap(node_with_needed.children, child_node->children);
|
||||||
|
@ -126,7 +126,8 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
|
|||||||
optimizeReadInOrder(*frame.node, nodes);
|
optimizeReadInOrder(*frame.node, nodes);
|
||||||
|
|
||||||
if (optimization_settings.optimize_projection)
|
if (optimization_settings.optimize_projection)
|
||||||
num_applied_projection += optimizeUseAggregateProjections(*frame.node, nodes);
|
num_applied_projection
|
||||||
|
+= optimizeUseAggregateProjections(*frame.node, nodes, optimization_settings.optimize_use_implicit_projections);
|
||||||
|
|
||||||
if (optimization_settings.aggregation_in_order)
|
if (optimization_settings.aggregation_in_order)
|
||||||
optimizeAggregationInOrder(*frame.node, nodes);
|
optimizeAggregationInOrder(*frame.node, nodes);
|
||||||
|
@ -433,7 +433,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
|
|||||||
QueryPlan::Node & node,
|
QueryPlan::Node & node,
|
||||||
AggregatingStep & aggregating,
|
AggregatingStep & aggregating,
|
||||||
ReadFromMergeTree & reading,
|
ReadFromMergeTree & reading,
|
||||||
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks)
|
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks,
|
||||||
|
bool allow_implicit_projections)
|
||||||
{
|
{
|
||||||
const auto & keys = aggregating.getParams().keys;
|
const auto & keys = aggregating.getParams().keys;
|
||||||
const auto & aggregates = aggregating.getParams().aggregates;
|
const auto & aggregates = aggregating.getParams().aggregates;
|
||||||
@ -453,7 +454,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
|
|||||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||||
agg_projections.push_back(&projection);
|
agg_projections.push_back(&projection);
|
||||||
|
|
||||||
bool can_use_minmax_projection = metadata->minmax_count_projection && !reading.getMergeTreeData().has_lightweight_delete_parts.load();
|
bool can_use_minmax_projection = allow_implicit_projections && metadata->minmax_count_projection
|
||||||
|
&& !reading.getMergeTreeData().has_lightweight_delete_parts.load();
|
||||||
|
|
||||||
if (!can_use_minmax_projection && agg_projections.empty())
|
if (!can_use_minmax_projection && agg_projections.empty())
|
||||||
return candidates;
|
return candidates;
|
||||||
@ -543,7 +545,7 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node)
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
|
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections)
|
||||||
{
|
{
|
||||||
if (node.children.size() != 1)
|
if (node.children.size() != 1)
|
||||||
return false;
|
return false;
|
||||||
@ -568,7 +570,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes &
|
|||||||
|
|
||||||
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
|
||||||
|
|
||||||
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks);
|
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks, allow_implicit_projections);
|
||||||
|
|
||||||
AggregateProjectionCandidate * best_candidate = nullptr;
|
AggregateProjectionCandidate * best_candidate = nullptr;
|
||||||
if (candidates.minmax_projection)
|
if (candidates.minmax_projection)
|
||||||
|
@ -564,7 +564,17 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown(
|
|||||||
}
|
}
|
||||||
case (ActionsDAG::ActionType::COLUMN):
|
case (ActionsDAG::ActionType::COLUMN):
|
||||||
{
|
{
|
||||||
res = &inverted_dag.addColumn({node.column, node.result_type, node.result_name});
|
String name;
|
||||||
|
if (const auto * column_const = typeid_cast<const ColumnConst *>(node.column.get()))
|
||||||
|
/// Re-generate column name for constant.
|
||||||
|
/// DAG form query (with enabled analyzer) uses suffixes for constants, like 1_UInt8.
|
||||||
|
/// DAG from PK does not use it. This is breakig match by column name sometimes.
|
||||||
|
/// Ideally, we should not compare manes, but DAG subtrees instead.
|
||||||
|
name = ASTLiteral(column_const->getDataColumn()[0]).getColumnName();
|
||||||
|
else
|
||||||
|
name = node.result_name;
|
||||||
|
|
||||||
|
res = &inverted_dag.addColumn({node.column, node.result_type, name});
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case (ActionsDAG::ActionType::ALIAS):
|
case (ActionsDAG::ActionType::ALIAS):
|
||||||
|
@ -6994,7 +6994,8 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
|
|||||||
|
|
||||||
ProjectionCandidate * selected_candidate = nullptr;
|
ProjectionCandidate * selected_candidate = nullptr;
|
||||||
size_t min_sum_marks = std::numeric_limits<size_t>::max();
|
size_t min_sum_marks = std::numeric_limits<size_t>::max();
|
||||||
if (metadata_snapshot->minmax_count_projection && !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
|
if (settings.optimize_use_implicit_projections && metadata_snapshot->minmax_count_projection
|
||||||
|
&& !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
|
||||||
add_projection_candidate(*metadata_snapshot->minmax_count_projection, true);
|
add_projection_candidate(*metadata_snapshot->minmax_count_projection, true);
|
||||||
std::optional<ProjectionCandidate> minmax_count_projection_candidate;
|
std::optional<ProjectionCandidate> minmax_count_projection_candidate;
|
||||||
if (!candidates.empty())
|
if (!candidates.empty())
|
||||||
|
@ -6,6 +6,7 @@ create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) or
|
|||||||
insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3);
|
insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3);
|
||||||
|
|
||||||
set max_rows_to_read = 1;
|
set max_rows_to_read = 1;
|
||||||
|
set optimize_use_implicit_projections = 1;
|
||||||
-- non-optimized
|
-- non-optimized
|
||||||
select count() from test1 settings max_parallel_replicas = 3;
|
select count() from test1 settings max_parallel_replicas = 3;
|
||||||
-- optimized (toYear is monotonic and we provide the partition expr as is)
|
-- optimized (toYear is monotonic and we provide the partition expr as is)
|
||||||
|
@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
|
|||||||
|
|
||||||
insert into d select number, number from numbers(10000);
|
insert into d select number, number from numbers(10000);
|
||||||
|
|
||||||
set max_rows_to_read = 2, optimize_use_projections = 1;
|
set max_rows_to_read = 2, optimize_use_projections = 1, optimize_use_implicit_projections = 1;
|
||||||
|
|
||||||
select min(i), max(i), count() from d;
|
select min(i), max(i), count() from d;
|
||||||
select min(i), max(i), count() from d group by _partition_id order by _partition_id;
|
select min(i), max(i), count() from d group by _partition_id order by _partition_id;
|
||||||
|
@ -30,6 +30,6 @@ SELECT sum(t) FROM XXXX WHERE indexHint(t = 42);
|
|||||||
drop table if exists XXXX;
|
drop table if exists XXXX;
|
||||||
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
|
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
|
||||||
insert into XXXX select number*60, 0 from numbers(100000);
|
insert into XXXX select number*60, 0 from numbers(100000);
|
||||||
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
|
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
|
||||||
100000
|
100000
|
||||||
drop table XXXX;
|
drop table XXXX;
|
||||||
|
@ -30,6 +30,6 @@ create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings inde
|
|||||||
|
|
||||||
insert into XXXX select number*60, 0 from numbers(100000);
|
insert into XXXX select number*60, 0 from numbers(100000);
|
||||||
|
|
||||||
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
|
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
|
||||||
|
|
||||||
drop table XXXX;
|
drop table XXXX;
|
||||||
|
@ -5,6 +5,8 @@ create table tbl(dt DateTime, i int, j String, v Float64) engine MergeTree parti
|
|||||||
|
|
||||||
insert into tbl values ('2021-04-01 00:01:02', 1, '123', 4), ('2021-04-01 01:01:02', 1, '12', 4), ('2021-04-01 02:11:02', 2, '345', 4), ('2021-04-01 04:31:02', 2, '2', 4), ('2021-04-02 00:01:02', 1, '1234', 4), ('2021-04-02 00:01:02', 2, '123', 4), ('2021-04-02 00:01:02', 3, '12', 4), ('2021-04-02 00:01:02', 4, '1', 4);
|
insert into tbl values ('2021-04-01 00:01:02', 1, '123', 4), ('2021-04-01 01:01:02', 1, '12', 4), ('2021-04-01 02:11:02', 2, '345', 4), ('2021-04-01 04:31:02', 2, '2', 4), ('2021-04-02 00:01:02', 1, '1234', 4), ('2021-04-02 00:01:02', 2, '123', 4), ('2021-04-02 00:01:02', 3, '12', 4), ('2021-04-02 00:01:02', 4, '1', 4);
|
||||||
|
|
||||||
|
set optimize_use_implicit_projections = 1;
|
||||||
|
|
||||||
select count() from tbl where _partition_value = ('2021-04-01', 1, 2) settings max_rows_to_read = 1;
|
select count() from tbl where _partition_value = ('2021-04-01', 1, 2) settings max_rows_to_read = 1;
|
||||||
select count() from tbl where _partition_value.1 = '2021-04-01' settings max_rows_to_read = 4;
|
select count() from tbl where _partition_value.1 = '2021-04-01' settings max_rows_to_read = 4;
|
||||||
select count() from tbl where _partition_value.2 = 0 settings max_rows_to_read = 4;
|
select count() from tbl where _partition_value.2 = 0 settings max_rows_to_read = 4;
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
-- Tags: long
|
-- Tags: long, no-upgrade-check
|
||||||
|
|
||||||
|
-- TODO(@vdimir): remove no-upgrade-check tag after https://github.com/ClickHouse/ClickHouse/pull/51737 is released
|
||||||
|
|
||||||
DROP TABLE IF EXISTS t1;
|
DROP TABLE IF EXISTS t1;
|
||||||
DROP TABLE IF EXISTS t2;
|
DROP TABLE IF EXISTS t2;
|
||||||
|
@ -17,7 +17,7 @@ INSERT and READ INSERT
|
|||||||
DROP
|
DROP
|
||||||
CHECK with query_log
|
CHECK with query_log
|
||||||
QueryFinish INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
QueryFinish INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
||||||
QueryFinish SELECT \'1\', min(t) FROM times; FileOpen 0
|
QueryFinish SELECT \'1\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
|
||||||
QueryFinish INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
QueryFinish INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
||||||
QueryFinish SELECT \'2\', min(t) FROM times; FileOpen 0
|
QueryFinish SELECT \'2\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
|
||||||
QueryFinish INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
QueryFinish INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; FileOpen 8
|
||||||
|
@ -44,13 +44,13 @@ INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
|
|||||||
|
|
||||||
echo "READ"
|
echo "READ"
|
||||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||||
SELECT '1', min(t) FROM times;
|
SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
|
||||||
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
|
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
|
||||||
|
|
||||||
echo "INSERT and READ INSERT"
|
echo "INSERT and READ INSERT"
|
||||||
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
|
||||||
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
|
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
|
||||||
SELECT '2', min(t) FROM times;
|
SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
|
||||||
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
|
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
|
||||||
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
|
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
|
||||||
|
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
CREATE TABLE test_tuple (`p` DateTime, `i` int, `j` int) ENGINE = MergeTree PARTITION BY (toDate(p), i) ORDER BY j SETTINGS index_granularity = 1;
|
CREATE TABLE test_tuple (`p` DateTime, `i` int, `j` int) ENGINE = MergeTree PARTITION BY (toDate(p), i) ORDER BY j SETTINGS index_granularity = 1;
|
||||||
insert into test_tuple values (1, 1, 1);
|
insert into test_tuple values (1, 1, 1);
|
||||||
SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }
|
SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0, optimize_use_implicit_projections=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }
|
||||||
|
@ -22,6 +22,6 @@ OPTIMIZE TABLE 02725_memory_for_merges FINAL;
|
|||||||
SYSTEM FLUSH LOGS;
|
SYSTEM FLUSH LOGS;
|
||||||
|
|
||||||
WITH (SELECT uuid FROM system.tables WHERE table='02725_memory_for_merges' and database=currentDatabase()) as uuid
|
WITH (SELECT uuid FROM system.tables WHERE table='02725_memory_for_merges' and database=currentDatabase()) as uuid
|
||||||
SELECT sum(peak_memory_usage) < 1024 * 1024 * 200 from system.part_log where table_uuid=uuid and event_type='MergeParts';
|
SELECT (sum(peak_memory_usage) < 1024 * 1024 * 200 AS x) ? x : sum(peak_memory_usage) from system.part_log where table_uuid=uuid and event_type='MergeParts';
|
||||||
|
|
||||||
DROP TABLE IF EXISTS 02725_memory_for_merges SYNC;
|
DROP TABLE IF EXISTS 02725_memory_for_merges SYNC;
|
||||||
|
@ -19,3 +19,6 @@ OK
|
|||||||
tmp5.csv
|
tmp5.csv
|
||||||
OK
|
OK
|
||||||
tmp5.csv
|
tmp5.csv
|
||||||
|
4
|
||||||
|
tmp6.csv.processed
|
||||||
|
!tmp6.csv
|
||||||
|
@ -29,6 +29,7 @@ cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_1.csv
|
|||||||
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_2.csv
|
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_2.csv
|
||||||
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp4.csv
|
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp4.csv
|
||||||
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp5.csv
|
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp5.csv
|
||||||
|
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp6.csv
|
||||||
|
|
||||||
### Checking that renaming works
|
### Checking that renaming works
|
||||||
|
|
||||||
@ -115,5 +116,14 @@ if [ -e "${tmp_dir}/tmp5.csv" ]; then
|
|||||||
echo "tmp5.csv"
|
echo "tmp5.csv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# check full file name placeholder
|
||||||
|
${CLICKHOUSE_CLIENT} --rename-files-after-processing="%a.processed" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp6.csv')"
|
||||||
|
if [ -e "${tmp_dir}/tmp6.csv.processed" ]; then
|
||||||
|
echo "tmp6.csv.processed"
|
||||||
|
fi
|
||||||
|
if [ ! -e "${tmp_dir}/tmp6.csv" ]; then
|
||||||
|
echo "!tmp6.csv"
|
||||||
|
fi
|
||||||
|
|
||||||
# Clean
|
# Clean
|
||||||
rm -rd $tmp_dir
|
rm -rd $tmp_dir
|
||||||
|
@ -0,0 +1,133 @@
|
|||||||
|
drop table if exists test;
|
||||||
|
drop table if exists test1;
|
||||||
|
|
||||||
|
CREATE TABLE test
|
||||||
|
(
|
||||||
|
`pt` String,
|
||||||
|
`count_distinct_exposure_uv` AggregateFunction(uniqHLL12, Int64)
|
||||||
|
)
|
||||||
|
ENGINE = AggregatingMergeTree
|
||||||
|
ORDER BY pt;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,m0.`exposure_uv` AS exposure_uv
|
||||||
|
,round(m2.exposure_uv,4) AS exposure_uv_hb_last_value
|
||||||
|
,if(m2.exposure_uv IS NULL OR m2.exposure_uv = 0,NULL,round((m0.exposure_uv - m2.exposure_uv) * 1.0 / m2.exposure_uv,4)) AS exposure_uv_hb_diff_percent
|
||||||
|
,round(m1.exposure_uv,4) AS exposure_uv_tb_last_value
|
||||||
|
,if(m1.exposure_uv IS NULL OR m1.exposure_uv = 0,NULL,round((m0.exposure_uv - m1.exposure_uv) * 1.0 / m1.exposure_uv,4)) AS exposure_uv_tb_diff_percent
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,`exposure_uv` AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
|
||||||
|
FROM test
|
||||||
|
GROUP BY pt
|
||||||
|
) m
|
||||||
|
GROUP BY pt
|
||||||
|
) m0
|
||||||
|
) m0
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,`exposure_uv` AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT formatDateTime(addYears(parseDateTimeBestEffort(pt),1),'%Y%m%d') AS pt
|
||||||
|
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
|
||||||
|
FROM test
|
||||||
|
GROUP BY pt
|
||||||
|
) m
|
||||||
|
GROUP BY pt
|
||||||
|
) m0
|
||||||
|
) m1
|
||||||
|
ON m0.pt = m1.pt
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,`exposure_uv` AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT formatDateTime(addDays(toDate(parseDateTimeBestEffort(pt)),1),'%Y%m%d') AS pt
|
||||||
|
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
|
||||||
|
FROM test
|
||||||
|
GROUP BY pt
|
||||||
|
) m
|
||||||
|
GROUP BY pt
|
||||||
|
) m0
|
||||||
|
) m2
|
||||||
|
ON m0.pt = m2.pt
|
||||||
|
) c0
|
||||||
|
ORDER BY pt ASC, exposure_uv DESC
|
||||||
|
settings join_use_nulls = 1;
|
||||||
|
|
||||||
|
CREATE TABLE test1
|
||||||
|
(
|
||||||
|
`pt` String,
|
||||||
|
`exposure_uv` Float64
|
||||||
|
)
|
||||||
|
ENGINE = Memory;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT m0.pt
|
||||||
|
,m0.exposure_uv AS exposure_uv
|
||||||
|
,round(m2.exposure_uv,4)
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt
|
||||||
|
,exposure_uv
|
||||||
|
FROM test1
|
||||||
|
) m0
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT pt
|
||||||
|
,exposure_uv
|
||||||
|
FROM test1
|
||||||
|
) m1
|
||||||
|
ON m0.pt = m1.pt
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT pt
|
||||||
|
,exposure_uv
|
||||||
|
FROM test1
|
||||||
|
) m2
|
||||||
|
ON m0.pt = m2.pt
|
||||||
|
) c0
|
||||||
|
ORDER BY exposure_uv
|
||||||
|
settings join_use_nulls = 1;
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
pt AS pt,
|
||||||
|
exposure_uv AS exposure_uv
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
pt
|
||||||
|
FROM test1
|
||||||
|
) AS m0
|
||||||
|
FULL OUTER JOIN
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
pt,
|
||||||
|
exposure_uv
|
||||||
|
FROM test1
|
||||||
|
) AS m1 ON m0.pt = m1.pt;
|
@ -0,0 +1,3 @@
|
|||||||
|
20230626 0.3156979034107179 \N \N
|
||||||
|
20230626 0.2624629016490004 \N \N
|
||||||
|
20230626 0.19390556368960468 \N \N
|
@ -0,0 +1,107 @@
|
|||||||
|
create table test1 (
|
||||||
|
`pt` String,
|
||||||
|
`brand_name` String,
|
||||||
|
`total_indirect_order_cnt` Float64,
|
||||||
|
`total_indirect_gmv` Float64
|
||||||
|
) ENGINE = Memory;
|
||||||
|
|
||||||
|
create table test2 (
|
||||||
|
`pt` String,
|
||||||
|
`brand_name` String,
|
||||||
|
`exposure_uv` Float64,
|
||||||
|
`click_uv` Float64
|
||||||
|
) ENGINE = Memory;
|
||||||
|
|
||||||
|
INSERT INTO test1 (`pt`, `brand_name`, `total_indirect_order_cnt`, `total_indirect_gmv`) VALUES ('20230625', 'LINING', 2232, 1008710), ('20230625', 'adidas', 125, 58820), ('20230625', 'Nike', 1291, 1033020), ('20230626', 'Nike', 1145, 938926), ('20230626', 'LINING', 1904, 853336), ('20230626', 'adidas', 133, 62546), ('20220626', 'LINING', 3747, 1855203), ('20220626', 'Nike', 2295, 1742665), ('20220626', 'adidas', 302, 122388);
|
||||||
|
|
||||||
|
INSERT INTO test2 (`pt`, `brand_name`, `exposure_uv`, `click_uv`) VALUES ('20230625', 'Nike', 2012913, 612831), ('20230625', 'adidas', 480277, 96176), ('20230625', 'LINING', 2474234, 627814), ('20230626', 'Nike', 1934666, 610770), ('20230626', 'adidas', 469904, 91117), ('20230626', 'LINING', 2285142, 599765), ('20220626', 'Nike', 2979656, 937166), ('20220626', 'adidas', 704751, 124250), ('20220626', 'LINING', 3163884, 1010221);
|
||||||
|
|
||||||
|
SELECT * FROM (
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,m0.`uvctr` AS uvctr
|
||||||
|
,round(m1.uvctr,4) AS uvctr_hb_last_value
|
||||||
|
,round(m2.uvctr,4) AS uvctr_tb_last_value
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
|
||||||
|
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,exposure_uv AS `exposure_uv`
|
||||||
|
,click_uv AS `click_uv`
|
||||||
|
FROM test2
|
||||||
|
WHERE pt = '20230626'
|
||||||
|
) m0
|
||||||
|
FULL JOIN
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,total_indirect_order_cnt AS `total_indirect_order_cnt`
|
||||||
|
,total_indirect_gmv AS `total_indirect_gmv`
|
||||||
|
FROM test1
|
||||||
|
WHERE pt = '20230626'
|
||||||
|
) m1
|
||||||
|
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
|
||||||
|
) m0
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
|
||||||
|
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
|
||||||
|
,`exposure_uv` AS `exposure_uv`
|
||||||
|
,`click_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,exposure_uv AS `exposure_uv`
|
||||||
|
,click_uv AS `click_uv`
|
||||||
|
FROM test2
|
||||||
|
WHERE pt = '20230625'
|
||||||
|
) m0
|
||||||
|
FULL JOIN
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,total_indirect_order_cnt AS `total_indirect_order_cnt`
|
||||||
|
,total_indirect_gmv AS `total_indirect_gmv`
|
||||||
|
FROM test1
|
||||||
|
WHERE pt = '20230625'
|
||||||
|
) m1
|
||||||
|
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
|
||||||
|
) m1
|
||||||
|
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
|
||||||
|
LEFT JOIN
|
||||||
|
(
|
||||||
|
SELECT m0.pt AS pt
|
||||||
|
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
|
||||||
|
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
|
||||||
|
,`exposure_uv` AS `exposure_uv`
|
||||||
|
,`click_uv`
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,exposure_uv AS `exposure_uv`
|
||||||
|
,click_uv AS `click_uv`
|
||||||
|
FROM test2
|
||||||
|
WHERE pt = '20220626'
|
||||||
|
) m0
|
||||||
|
FULL JOIN
|
||||||
|
(
|
||||||
|
SELECT pt AS pt
|
||||||
|
,brand_name AS `brand_name`
|
||||||
|
,total_indirect_order_cnt AS `total_indirect_order_cnt`
|
||||||
|
,total_indirect_gmv AS `total_indirect_gmv`
|
||||||
|
FROM test1
|
||||||
|
WHERE pt = '20220626'
|
||||||
|
) m1
|
||||||
|
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
|
||||||
|
) m2
|
||||||
|
ON m0.brand_name = m2.brand_name AND m0.pt = m2.pt
|
||||||
|
) c0
|
||||||
|
ORDER BY pt ASC, uvctr DESC;
|
||||||
|
|
@ -1,4 +1,5 @@
|
|||||||
-- Tags: no-parallel-replicas
|
-- Tags: no-parallel-replicas
|
||||||
|
|
||||||
set max_threads=10;
|
set max_threads=10;
|
||||||
|
set optimize_use_implicit_projections=1;
|
||||||
EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1;
|
EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1;
|
||||||
|
@ -362,11 +362,12 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
int read_exe_path(char *exe, size_t/* buf_sz*/)
|
int read_exe_path(char *exe, size_t buf_sz)
|
||||||
{
|
{
|
||||||
if (realpath("/proc/self/exe", exe) == nullptr)
|
ssize_t n = readlink("/proc/self/exe", exe, buf_sz - 1);
|
||||||
return 1;
|
if (n > 0)
|
||||||
return 0;
|
exe[n] = '\0';
|
||||||
|
return n > 0 && n < static_cast<ssize_t>(buf_sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -430,20 +431,18 @@ int main(int/* argc*/, char* argv[])
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int lock = -1;
|
|
||||||
/// Protection from double decompression
|
|
||||||
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
|
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
|
||||||
/// get inode of this executable
|
/// get inode of this executable
|
||||||
uint64_t inode = getInode(self);
|
uint64_t inode = getInode(self);
|
||||||
/// In some cases /proc/self/maps may not contain the inode for the
|
if (inode == 0)
|
||||||
/// /proc/self/exe, one of such examples are using qemu-*-static, in this
|
|
||||||
/// case maps will be proxied through the qemu, and it will remove
|
|
||||||
/// information about itself from it.
|
|
||||||
if (inode != 0)
|
|
||||||
{
|
{
|
||||||
|
std::cerr << "Unable to obtain inode for exe '" << self << "'." << std::endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
|
lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
|
||||||
lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
|
int lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
|
||||||
if (lock < 0)
|
if (lock < 0)
|
||||||
{
|
{
|
||||||
perror("lock open");
|
perror("lock open");
|
||||||
@ -482,7 +481,6 @@ int main(int/* argc*/, char* argv[])
|
|||||||
printf("No target executable - decompression only was performed.\n");
|
printf("No target executable - decompression only was performed.\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int input_fd = open(self, O_RDONLY);
|
int input_fd = open(self, O_RDONLY);
|
||||||
@ -549,19 +547,21 @@ int main(int/* argc*/, char* argv[])
|
|||||||
|
|
||||||
if (has_exec)
|
if (has_exec)
|
||||||
{
|
{
|
||||||
|
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
|
||||||
/// write one byte to the lock in case other copies of compressed are running to indicate that
|
/// write one byte to the lock in case other copies of compressed are running to indicate that
|
||||||
/// execution should be performed
|
/// execution should be performed
|
||||||
if (lock >= 0)
|
|
||||||
write(lock, "1", 1);
|
write(lock, "1", 1);
|
||||||
|
#endif
|
||||||
execv(self, argv);
|
execv(self, argv);
|
||||||
|
|
||||||
/// This part of code will be reached only if error happened
|
/// This part of code will be reached only if error happened
|
||||||
perror("execv");
|
perror("execv");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
|
||||||
/// since inodes can be reused - it's a precaution if lock file already exists and have size of 1
|
/// since inodes can be reused - it's a precaution if lock file already exists and have size of 1
|
||||||
if (lock >= 0)
|
|
||||||
ftruncate(lock, 0);
|
ftruncate(lock, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
printf("No target executable - decompression only was performed.\n");
|
printf("No target executable - decompression only was performed.\n");
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user