Merge branch 'master' into parse-ddl

This commit is contained in:
Val Doroshchuk 2023-07-18 15:19:13 +02:00 committed by GitHub
commit 79ba798ab5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 1537 additions and 131 deletions

2
contrib/cctz vendored

@ -1 +1 @@
Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c

View File

@ -4524,6 +4524,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
### Placeholders
- `%a` — Full original filename (e.g., "sample.csv").
- `%f` — Original filename without extension (e.g., "sample").
- `%e` — Original file extension with dot (e.g., ".csv").
- `%t` — Timestamp (in microseconds).

View File

@ -0,0 +1,32 @@
---
slug: /en/sql-reference/aggregate-functions/reference/array_concat_agg
sidebar_position: 110
---
# array_concat_agg
- Alias of `groupArrayArray`. The function is case insensitive.
**Example**
```text
SELECT *
FROM t
┌─a───────┐
│ [1,2,3] │
│ [4,5] │
│ [6] │
└─────────┘
```
Query:
```sql
SELECT array_concat_agg(a) AS a
FROM t
┌─a─────────────┐
│ [1,2,3,4,5,6] │
└───────────────┘
```

View File

@ -4201,6 +4201,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi
### Шаблон
Шаблон поддерживает следующие виды плейсхолдеров:
- `%a` — Полное исходное имя файла (например "sample.csv").
- `%f` — Исходное имя файла без расширения (например "sample").
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
- `%t` — Текущее время (в микросекундах).

View File

@ -222,7 +222,6 @@ AggregateFunctionPtr AggregateFunctionFactory::tryGet(
: nullptr;
}
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetProperties(String name) const
{
if (name.size() > MAX_AGGREGATE_FUNCTION_NAME_LENGTH)

View File

@ -126,6 +126,7 @@ void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
factory.registerAlias("array_agg", "groupArray", AggregateFunctionFactory::CaseInsensitive);
factory.registerAliasUnchecked("array_concat_agg", "groupArrayArray", AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
factory.registerFunction("groupArrayLast", { createAggregateFunctionGroupArray<true>, properties });
}

View File

@ -10,7 +10,6 @@
#include <cassert>
#include <chrono>
#include <cstring>
#include <iostream>
#include <memory>

View File

@ -47,6 +47,7 @@ String FileRenamer::generateNewFilename(const String & filename) const
// Define placeholders and their corresponding values
std::map<String, String> placeholders =
{
{"%a", filename},
{"%f", file_base},
{"%e", file_ext},
{"%t", timestamp},
@ -69,16 +70,17 @@ bool FileRenamer::isEmpty() const
bool FileRenamer::validateRenamingRule(const String & rule, bool throw_on_error)
{
// Check if the rule contains invalid placeholders
re2::RE2 invalid_placeholder_pattern("^([^%]|%[fet%])*$");
re2::RE2 invalid_placeholder_pattern("^([^%]|%[afet%])*$");
if (!re2::RE2::FullMatch(rule, invalid_placeholder_pattern))
{
if (throw_on_error)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %f, %e, %t, and %%");
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid renaming rule: Allowed placeholders only %a, %f, %e, %t, and %%");
return false;
}
// Replace valid placeholders with empty strings and count remaining percentage signs.
String replaced_rule = rule;
boost::replace_all(replaced_rule, "%a", "");
boost::replace_all(replaced_rule, "%f", "");
boost::replace_all(replaced_rule, "%e", "");
boost::replace_all(replaced_rule, "%t", "");

View File

@ -9,6 +9,7 @@ namespace DB
/**
* The FileRenamer class provides functionality for renaming files based on given pattern with placeholders
* The supported placeholders are:
* %a - Full original file name ("sample.csv")
* %f - Original filename without extension ("sample")
* %e - Original file extension with dot (".csv")
* %t - Timestamp (in microseconds)

View File

@ -52,35 +52,38 @@ public:
{
const auto & creator_map = getMap();
const auto & case_insensitive_creator_map = getCaseInsensitiveMap();
const String factory_name = getFactoryName();
String real_dict_name;
if (creator_map.count(real_name))
real_dict_name = real_name;
else if (auto real_name_lowercase = Poco::toLower(real_name); case_insensitive_creator_map.count(real_name_lowercase))
real_dict_name = real_name_lowercase;
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: can't create alias '{}', the real name '{}' is not registered",
factory_name, alias_name, real_name);
auto real_name_lowercase = Poco::toLower(real_name);
if (!creator_map.contains(real_name) && !case_insensitive_creator_map.contains(real_name_lowercase))
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"{}: can't create alias '{}', the real name '{}' is not registered",
getFactoryName(),
alias_name,
real_name);
registerAliasUnchecked(alias_name, real_name, case_sensitiveness);
}
/// We need sure the real_name exactly exists when call the function directly.
void registerAliasUnchecked(const String & alias_name, const String & real_name, CaseSensitiveness case_sensitiveness = CaseSensitive)
{
String alias_name_lowercase = Poco::toLower(alias_name);
if (creator_map.count(alias_name) || case_insensitive_creator_map.count(alias_name_lowercase))
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: the alias name '{}' is already registered as real name",
factory_name, alias_name);
String real_name_lowercase = Poco::toLower(real_name);
const String factory_name = getFactoryName();
if (case_sensitiveness == CaseInsensitive)
{
if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_dict_name).second)
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: case insensitive alias name '{}' is not unique",
factory_name, alias_name);
if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_name).second)
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: case insensitive alias name '{}' is not unique", factory_name, alias_name);
case_insensitive_name_mapping[alias_name_lowercase] = real_name;
}
if (!aliases.emplace(alias_name, real_dict_name).second)
if (!aliases.emplace(alias_name, real_name).second)
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: alias name '{}' is not unique", factory_name, alias_name);
}
std::vector<String> getAllRegisteredNames() const override
{
std::vector<String> result;
@ -93,7 +96,7 @@ public:
bool isCaseInsensitive(const String & name) const
{
String name_lowercase = Poco::toLower(name);
return getCaseInsensitiveMap().count(name_lowercase) || case_insensitive_aliases.count(name_lowercase);
return getCaseInsensitiveMap().contains(name_lowercase) || case_insensitive_aliases.contains(name_lowercase);
}
const String & aliasTo(const String & name) const
@ -106,14 +109,11 @@ public:
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: name '{}' is not alias", getFactoryName(), name);
}
bool isAlias(const String & name) const
{
return aliases.count(name) || case_insensitive_aliases.contains(name);
}
bool isAlias(const String & name) const { return aliases.contains(name) || case_insensitive_aliases.contains(name); }
bool hasNameOrAlias(const String & name) const
{
return getMap().count(name) || getCaseInsensitiveMap().count(name) || isAlias(name);
return getMap().contains(name) || getCaseInsensitiveMap().contains(name) || isAlias(name);
}
/// Return the canonical name (the name used in registration) if it's different from `name`.
@ -129,7 +129,7 @@ public:
private:
using InnerMap = std::unordered_map<String, Value>; // name -> creator
using AliasMap = std::unordered_map<String, String>; // alias -> original type
using AliasMap = std::unordered_map<String, String>; // alias -> original name
virtual const InnerMap & getMap() const = 0;
virtual const InnerMap & getCaseInsensitiveMap() const = 0;

View File

@ -577,6 +577,7 @@ class IColumn;
M(Bool, optimize_skip_merged_partitions, false, "Skip partitions with one part with level > 0 in optimize final", 0) \
M(Bool, optimize_on_insert, true, "Do the same transformation for inserted block of data as if merge was done on this block.", 0) \
M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \
M(Bool, optimize_use_implicit_projections, false, "Automatically choose implicit projections to perform SELECT query", 0) \
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \
@ -736,7 +737,7 @@ class IColumn;
M(String, workload, "default", "Name of workload to be used to access resources", 0) \
M(Milliseconds, storage_system_stack_trace_pipe_read_timeout_ms, 100, "Maximum time to read from a pipe for receiving information from the threads when querying the `system.stack_trace` table. This setting is used for testing purposes and not meant to be changed by users.", 0) \
\
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
M(String, rename_files_after_processing, "", "Rename successfully processed files according to the specified pattern; Pattern can include the following placeholders: `%a` (full original file name), `%f` (original filename without extension), `%e` (file extension with dot), `%t` (current timestamp in µs), and `%%` (% sign)", 0) \
\
M(Bool, parallelize_output_from_storages, true, "Parallelize output for reading step from storage. It allows parallelizing query processing right after reading from storage if possible", 0) \
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
@ -774,6 +775,7 @@ class IColumn;
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
M(Timezone, session_timezone, "", "The default timezone for current session or query. The server default timezone if empty.", 0) \
M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0)\
// End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.

View File

@ -80,6 +80,7 @@ namespace SettingsChangesHistory
/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972)
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
{
{"23.7", {{"optimize_use_implicit_projections", true, false, "Disable implicit projections due to unexpected results."}}},
{"23.6", {{"http_send_timeout", 180, 30, "3 minutes seems crazy long. Note that this is timeout for a single network write call, not for the whole upload operation."},
{"http_receive_timeout", 180, 30, "See http_send_timeout."}}},
{"23.5", {{"input_format_parquet_preserve_order", true, false, "Allow Parquet reader to reorder rows for better parallelism."},

View File

@ -15,6 +15,7 @@ namespace DB
namespace ErrorCodes
{
extern const int TABLE_IS_READ_ONLY;
extern const int INCORRECT_QUERY;
}
@ -23,6 +24,21 @@ BlockIO InterpreterCreateIndexQuery::execute()
auto current_context = getContext();
const auto & create_index = query_ptr->as<ASTCreateIndexQuery &>();
// Noop if allow_create_index_without_type = true. throw otherwise
if (!create_index.index_decl->as<ASTIndexDeclaration>()->type)
{
if (!current_context->getSettingsRef().allow_create_index_without_type)
{
throw Exception(ErrorCodes::INCORRECT_QUERY, "CREATE INDEX without TYPE is forbidden."
" SET allow_create_index_without_type=1 to ignore this statements.");
}
else
{
// Nothing to do
return {};
}
}
AccessRightsElements required_access;
required_access.emplace_back(AccessType::ALTER_ADD_INDEX, create_index.getDatabase(), create_index.getTable());

View File

@ -56,8 +56,7 @@ void ASTCreateIndexQuery::formatQueryImpl(const FormatSettings & settings, Forma
formatOnCluster(settings);
if (!cluster.empty())
settings.ostr << " ";
settings.ostr << " ";
index_decl->formatImpl(settings, state, frame);
}

View File

@ -13,8 +13,8 @@ ASTPtr ASTIndexDeclaration::clone() const
auto res = std::make_shared<ASTIndexDeclaration>();
res->name = name;
res->granularity = granularity;
if (granularity)
res->granularity = granularity;
if (expr)
res->set(res->expr, expr->clone());
if (type)
@ -25,23 +25,37 @@ ASTPtr ASTIndexDeclaration::clone() const
void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
{
if (part_of_create_index_query)
if (expr)
{
s.ostr << "(";
expr->formatImpl(s, state, frame);
s.ostr << ")";
}
else
{
s.ostr << backQuoteIfNeed(name);
s.ostr << " ";
expr->formatImpl(s, state, frame);
if (part_of_create_index_query)
{
if (expr->as<ASTExpressionList>())
{
s.ostr << "(";
expr->formatImpl(s, state, frame);
s.ostr << ")";
}
else
expr->formatImpl(s, state, frame);
}
else
{
s.ostr << backQuoteIfNeed(name);
s.ostr << " ";
expr->formatImpl(s, state, frame);
}
}
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
type->formatImpl(s, state, frame);
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
s.ostr << granularity;
if (type)
{
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
type->formatImpl(s, state, frame);
}
if (granularity)
{
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
s.ostr << granularity;
}
}
}

View File

@ -17,24 +17,36 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected
{
ParserKeyword s_type("TYPE");
ParserKeyword s_granularity("GRANULARITY");
ParserToken open(TokenType::OpeningRoundBracket);
ParserToken close(TokenType::ClosingRoundBracket);
ParserOrderByExpressionList order_list;
ParserDataType data_type_p;
ParserExpression expression_p;
ParserUnsignedInteger granularity_p;
ASTPtr expr;
ASTPtr order;
ASTPtr type;
ASTPtr granularity;
/// Skip name parser for SQL-standard CREATE INDEX
if (!expression_p.parse(pos, expr, expected))
return false;
if (expression_p.parse(pos, expr, expected))
{
}
else if (open.ignore(pos, expected))
{
if (!order_list.parse(pos, order, expected))
return false;
if (!s_type.ignore(pos, expected))
return false;
if (!close.ignore(pos, expected))
return false;
}
if (!data_type_p.parse(pos, type, expected))
return false;
if (s_type.ignore(pos, expected))
{
if (!data_type_p.parse(pos, type, expected))
return false;
}
if (s_granularity.ignore(pos, expected))
{
@ -45,13 +57,14 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected
auto index = std::make_shared<ASTIndexDeclaration>();
index->part_of_create_index_query = true;
index->set(index->expr, expr);
index->set(index->type, type);
if (type)
index->set(index->type, type);
if (granularity)
index->granularity = granularity->as<ASTLiteral &>().value.safeGet<UInt64>();
else
{
if (index->type->name == "annoy")
if (index->type && index->type->name == "annoy")
index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY;
else
index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY;

View File

@ -111,7 +111,7 @@ void optimizePrimaryKeyCondition(const Stack & stack);
void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes);
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections);
bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes);
bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes);

View File

@ -19,6 +19,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
settings.remove_redundant_distinct = from.query_plan_remove_redundant_distinct;
settings.optimize_projection = from.optimize_use_projections && from.query_plan_optimize_projection;
settings.force_use_projection = settings.optimize_projection && from.force_optimize_projection;
settings.optimize_use_implicit_projections = settings.optimize_projection && from.optimize_use_implicit_projections;
return settings;
}

View File

@ -41,6 +41,7 @@ struct QueryPlanOptimizationSettings
/// If reading from projection can be applied
bool optimize_projection = false;
bool force_use_projection = false;
bool optimize_use_implicit_projections = false;
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
static QueryPlanOptimizationSettings fromContext(ContextPtr from);

View File

@ -4,6 +4,7 @@
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
#include <Processors/QueryPlan/SortingStep.h>
#include <Common/Exception.h>
#include <DataTypes/IDataType.h>
namespace DB
{
@ -28,6 +29,20 @@ const DB::DataStream & getChildOutputStream(DB::QueryPlan::Node & node)
namespace DB::QueryPlanOptimizations
{
/// This is a check that output columns does not have the same name
/// This is ok for DAG, but may introduce a bug in a SotringStep cause columns are selected by name.
static bool areOutputsConvertableToBlock(const ActionsDAG::NodeRawConstPtrs & outputs)
{
std::unordered_set<std::string_view> names;
for (const auto & output : outputs)
{
if (!names.emplace(output->result_name).second)
return false;
}
return true;
}
size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes)
{
if (parent_node->children.size() != 1)
@ -57,6 +72,9 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan:
if (unneeded_for_sorting->trivial())
return 0;
if (!areOutputsConvertableToBlock(needed_for_sorting->getOutputs()))
return 0;
// Sorting (parent_node) -> Expression (child_node)
auto & node_with_needed = nodes.emplace_back();
std::swap(node_with_needed.children, child_node->children);

View File

@ -126,7 +126,8 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
optimizeReadInOrder(*frame.node, nodes);
if (optimization_settings.optimize_projection)
num_applied_projection += optimizeUseAggregateProjections(*frame.node, nodes);
num_applied_projection
+= optimizeUseAggregateProjections(*frame.node, nodes, optimization_settings.optimize_use_implicit_projections);
if (optimization_settings.aggregation_in_order)
optimizeAggregationInOrder(*frame.node, nodes);

View File

@ -433,7 +433,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
QueryPlan::Node & node,
AggregatingStep & aggregating,
ReadFromMergeTree & reading,
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks)
const std::shared_ptr<PartitionIdToMaxBlock> & max_added_blocks,
bool allow_implicit_projections)
{
const auto & keys = aggregating.getParams().keys;
const auto & aggregates = aggregating.getParams().aggregates;
@ -453,7 +454,8 @@ AggregateProjectionCandidates getAggregateProjectionCandidates(
if (projection.type == ProjectionDescription::Type::Aggregate)
agg_projections.push_back(&projection);
bool can_use_minmax_projection = metadata->minmax_count_projection && !reading.getMergeTreeData().has_lightweight_delete_parts.load();
bool can_use_minmax_projection = allow_implicit_projections && metadata->minmax_count_projection
&& !reading.getMergeTreeData().has_lightweight_delete_parts.load();
if (!can_use_minmax_projection && agg_projections.empty())
return candidates;
@ -543,7 +545,7 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node)
return nullptr;
}
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes, bool allow_implicit_projections)
{
if (node.children.size() != 1)
return false;
@ -568,7 +570,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes &
std::shared_ptr<PartitionIdToMaxBlock> max_added_blocks = getMaxAddedBlocks(reading);
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks);
auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks, allow_implicit_projections);
AggregateProjectionCandidate * best_candidate = nullptr;
if (candidates.minmax_projection)

View File

@ -11,6 +11,7 @@
#include <Storages/extractKeyExpressionList.h>
#include <Core/Defines.h>
#include "Common/Exception.h"
namespace DB
@ -89,8 +90,16 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
result.type = Poco::toLower(index_definition->type->name);
result.granularity = index_definition->granularity;
ASTPtr expr_list = extractKeyExpressionList(index_definition->expr->clone());
result.expression_list_ast = expr_list->clone();
ASTPtr expr_list;
if (index_definition->expr)
{
expr_list = extractKeyExpressionList(index_definition->expr->clone());
result.expression_list_ast = expr_list->clone();
}
else
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expression is not set");
}
auto syntax = TreeRewriter(context).analyze(expr_list, columns.getAllPhysical());
result.expression = ExpressionAnalyzer(expr_list, syntax, context).getActions(true);

View File

@ -564,7 +564,17 @@ static const ActionsDAG::Node & cloneASTWithInversionPushDown(
}
case (ActionsDAG::ActionType::COLUMN):
{
res = &inverted_dag.addColumn({node.column, node.result_type, node.result_name});
String name;
if (const auto * column_const = typeid_cast<const ColumnConst *>(node.column.get()))
/// Re-generate column name for constant.
/// DAG form query (with enabled analyzer) uses suffixes for constants, like 1_UInt8.
/// DAG from PK does not use it. This is breakig match by column name sometimes.
/// Ideally, we should not compare manes, but DAG subtrees instead.
name = ASTLiteral(column_const->getDataColumn()[0]).getColumnName();
else
name = node.result_name;
res = &inverted_dag.addColumn({node.column, node.result_type, name});
break;
}
case (ActionsDAG::ActionType::ALIAS):

View File

@ -6994,7 +6994,8 @@ std::optional<ProjectionCandidate> MergeTreeData::getQueryProcessingStageWithAgg
ProjectionCandidate * selected_candidate = nullptr;
size_t min_sum_marks = std::numeric_limits<size_t>::max();
if (metadata_snapshot->minmax_count_projection && !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
if (settings.optimize_use_implicit_projections && metadata_snapshot->minmax_count_projection
&& !has_lightweight_delete_parts.load(std::memory_order_relaxed)) /// Disable ReadFromStorage for parts with lightweight.
add_projection_candidate(*metadata_snapshot->minmax_count_projection, true);
std::optional<ProjectionCandidate> minmax_count_projection_candidate;
if (!candidates.empty())

View File

@ -2448,10 +2448,13 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
if (part_desc->checksum_hex != part_desc->src_table_part->checksums.getTotalChecksumHex())
throw Exception(ErrorCodes::UNFINISHED, "Checksums of {} is suddenly changed", part_desc->src_table_part->name);
bool zero_copy_enabled = dynamic_cast<const MergeTreeData *>(source_table.get())->getSettings()->allow_remote_fs_zero_copy_replication;
/// Don't do hardlinks in case of zero-copy at any side (defensive programming)
bool source_zero_copy_enabled = dynamic_cast<const MergeTreeData *>(source_table.get())->getSettings()->allow_remote_fs_zero_copy_replication;
bool our_zero_copy_enabled = storage_settings_ptr->allow_remote_fs_zero_copy_replication;
IDataPartStorage::ClonePartParams clone_params
{
.copy_instead_of_hardlink = zero_copy_enabled && part_desc->src_table_part->isStoredOnRemoteDiskWithZeroCopySupport(),
.copy_instead_of_hardlink = (our_zero_copy_enabled || source_zero_copy_enabled) && part_desc->src_table_part->isStoredOnRemoteDiskWithZeroCopySupport(),
.metadata_version_to_write = metadata_snapshot->getMetadataVersion()
};
auto [res_part, temporary_part_lock] = cloneAndLoadDataPartOnSameDisk(
@ -7585,8 +7588,10 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
UInt64 index = lock->getNumber();
MergeTreePartInfo dst_part_info(partition_id, index, index, src_part->info.level);
/// Don't do hardlinks in case of zero-copy at any side (defensive programming)
bool zero_copy_enabled = storage_settings_ptr->allow_remote_fs_zero_copy_replication
|| dynamic_cast<const MergeTreeData *>(dest_table.get())->getSettings()->allow_remote_fs_zero_copy_replication;
IDataPartStorage::ClonePartParams clone_params
{
.copy_instead_of_hardlink = zero_copy_enabled && src_part->isStoredOnRemoteDiskWithZeroCopySupport(),

View File

@ -6,6 +6,7 @@ create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) or
insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3);
set max_rows_to_read = 1;
set optimize_use_implicit_projections = 1;
-- non-optimized
select count() from test1 settings max_parallel_replicas = 3;
-- optimized (toYear is monotonic and we provide the partition expr as is)

View File

@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
insert into d select number, number from numbers(10000);
set max_rows_to_read = 2, optimize_use_projections = 1;
set max_rows_to_read = 2, optimize_use_projections = 1, optimize_use_implicit_projections = 1;
select min(i), max(i), count() from d;
select min(i), max(i), count() from d group by _partition_id order by _partition_id;

View File

@ -30,6 +30,6 @@ SELECT sum(t) FROM XXXX WHERE indexHint(t = 42);
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
100000
drop table XXXX;

View File

@ -30,6 +30,6 @@ create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings inde
insert into XXXX select number*60, 0 from numbers(100000);
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
drop table XXXX;

View File

@ -5,6 +5,8 @@ create table tbl(dt DateTime, i int, j String, v Float64) engine MergeTree parti
insert into tbl values ('2021-04-01 00:01:02', 1, '123', 4), ('2021-04-01 01:01:02', 1, '12', 4), ('2021-04-01 02:11:02', 2, '345', 4), ('2021-04-01 04:31:02', 2, '2', 4), ('2021-04-02 00:01:02', 1, '1234', 4), ('2021-04-02 00:01:02', 2, '123', 4), ('2021-04-02 00:01:02', 3, '12', 4), ('2021-04-02 00:01:02', 4, '1', 4);
set optimize_use_implicit_projections = 1;
select count() from tbl where _partition_value = ('2021-04-01', 1, 2) settings max_rows_to_read = 1;
select count() from tbl where _partition_value.1 = '2021-04-01' settings max_rows_to_read = 4;
select count() from tbl where _partition_value.2 = 0 settings max_rows_to_read = 4;

View File

@ -1,4 +1,6 @@
-- Tags: long
-- Tags: long, no-upgrade-check
-- TODO(@vdimir): remove no-upgrade-check tag after https://github.com/ClickHouse/ClickHouse/pull/51737 is released
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;

View File

@ -17,7 +17,7 @@ INSERT and READ INSERT
DROP
CHECK with query_log
QueryFinish INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; FileOpen 8
QueryFinish SELECT \'1\', min(t) FROM times; FileOpen 0
QueryFinish SELECT \'1\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
QueryFinish INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; FileOpen 8
QueryFinish SELECT \'2\', min(t) FROM times; FileOpen 0
QueryFinish SELECT \'2\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
QueryFinish INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; FileOpen 8

View File

@ -44,13 +44,13 @@ INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
echo "READ"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
SELECT '1', min(t) FROM times;
SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
echo "INSERT and READ INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
SELECT '2', min(t) FROM times;
SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '

View File

@ -1,3 +1,3 @@
CREATE TABLE test_tuple (`p` DateTime, `i` int, `j` int) ENGINE = MergeTree PARTITION BY (toDate(p), i) ORDER BY j SETTINGS index_granularity = 1;
insert into test_tuple values (1, 1, 1);
SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }
SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0, optimize_use_implicit_projections=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }

View File

@ -22,6 +22,6 @@ OPTIMIZE TABLE 02725_memory_for_merges FINAL;
SYSTEM FLUSH LOGS;
WITH (SELECT uuid FROM system.tables WHERE table='02725_memory_for_merges' and database=currentDatabase()) as uuid
SELECT sum(peak_memory_usage) < 1024 * 1024 * 200 from system.part_log where table_uuid=uuid and event_type='MergeParts';
SELECT (sum(peak_memory_usage) < 1024 * 1024 * 200 AS x) ? x : sum(peak_memory_usage) from system.part_log where table_uuid=uuid and event_type='MergeParts';
DROP TABLE IF EXISTS 02725_memory_for_merges SYNC;

View File

@ -19,3 +19,6 @@ OK
tmp5.csv
OK
tmp5.csv
4
tmp6.csv.processed
!tmp6.csv

View File

@ -29,6 +29,7 @@ cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_1.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_2.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp4.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp5.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp6.csv
### Checking that renaming works
@ -115,5 +116,14 @@ if [ -e "${tmp_dir}/tmp5.csv" ]; then
echo "tmp5.csv"
fi
# check full file name placeholder
${CLICKHOUSE_CLIENT} --rename-files-after-processing="%a.processed" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp6.csv')"
if [ -e "${tmp_dir}/tmp6.csv.processed" ]; then
echo "tmp6.csv.processed"
fi
if [ ! -e "${tmp_dir}/tmp6.csv" ]; then
echo "!tmp6.csv"
fi
# Clean
rm -rd $tmp_dir

View File

@ -0,0 +1,133 @@
drop table if exists test;
drop table if exists test1;
CREATE TABLE test
(
`pt` String,
`count_distinct_exposure_uv` AggregateFunction(uniqHLL12, Int64)
)
ENGINE = AggregatingMergeTree
ORDER BY pt;
SELECT *
FROM
(
SELECT m0.pt AS pt
,m0.`exposure_uv` AS exposure_uv
,round(m2.exposure_uv,4) AS exposure_uv_hb_last_value
,if(m2.exposure_uv IS NULL OR m2.exposure_uv = 0,NULL,round((m0.exposure_uv - m2.exposure_uv) * 1.0 / m2.exposure_uv,4)) AS exposure_uv_hb_diff_percent
,round(m1.exposure_uv,4) AS exposure_uv_tb_last_value
,if(m1.exposure_uv IS NULL OR m1.exposure_uv = 0,NULL,round((m0.exposure_uv - m1.exposure_uv) * 1.0 / m1.exposure_uv,4)) AS exposure_uv_tb_diff_percent
FROM
(
SELECT m0.pt AS pt
,`exposure_uv` AS `exposure_uv`
FROM
(
SELECT pt AS pt
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
FROM
(
SELECT pt AS pt
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
FROM test
GROUP BY pt
) m
GROUP BY pt
) m0
) m0
LEFT JOIN
(
SELECT m0.pt AS pt
,`exposure_uv` AS `exposure_uv`
FROM
(
SELECT formatDateTime(addYears(parseDateTimeBestEffort(pt),1),'%Y%m%d') AS pt
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
FROM
(
SELECT pt AS pt
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
FROM test
GROUP BY pt
) m
GROUP BY pt
) m0
) m1
ON m0.pt = m1.pt
LEFT JOIN
(
SELECT m0.pt AS pt
,`exposure_uv` AS `exposure_uv`
FROM
(
SELECT formatDateTime(addDays(toDate(parseDateTimeBestEffort(pt)),1),'%Y%m%d') AS pt
,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
FROM
(
SELECT pt AS pt
,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
FROM test
GROUP BY pt
) m
GROUP BY pt
) m0
) m2
ON m0.pt = m2.pt
) c0
ORDER BY pt ASC, exposure_uv DESC
settings join_use_nulls = 1;
CREATE TABLE test1
(
`pt` String,
`exposure_uv` Float64
)
ENGINE = Memory;
SELECT *
FROM
(
SELECT m0.pt
,m0.exposure_uv AS exposure_uv
,round(m2.exposure_uv,4)
FROM
(
SELECT pt
,exposure_uv
FROM test1
) m0
LEFT JOIN
(
SELECT pt
,exposure_uv
FROM test1
) m1
ON m0.pt = m1.pt
LEFT JOIN
(
SELECT pt
,exposure_uv
FROM test1
) m2
ON m0.pt = m2.pt
) c0
ORDER BY exposure_uv
settings join_use_nulls = 1;
SELECT
pt AS pt,
exposure_uv AS exposure_uv
FROM
(
SELECT
pt
FROM test1
) AS m0
FULL OUTER JOIN
(
SELECT
pt,
exposure_uv
FROM test1
) AS m1 ON m0.pt = m1.pt;

View File

@ -0,0 +1,3 @@
20230626 0.3156979034107179 \N \N
20230626 0.2624629016490004 \N \N
20230626 0.19390556368960468 \N \N

View File

@ -0,0 +1,107 @@
create table test1 (
`pt` String,
`brand_name` String,
`total_indirect_order_cnt` Float64,
`total_indirect_gmv` Float64
) ENGINE = Memory;
create table test2 (
`pt` String,
`brand_name` String,
`exposure_uv` Float64,
`click_uv` Float64
) ENGINE = Memory;
INSERT INTO test1 (`pt`, `brand_name`, `total_indirect_order_cnt`, `total_indirect_gmv`) VALUES ('20230625', 'LINING', 2232, 1008710), ('20230625', 'adidas', 125, 58820), ('20230625', 'Nike', 1291, 1033020), ('20230626', 'Nike', 1145, 938926), ('20230626', 'LINING', 1904, 853336), ('20230626', 'adidas', 133, 62546), ('20220626', 'LINING', 3747, 1855203), ('20220626', 'Nike', 2295, 1742665), ('20220626', 'adidas', 302, 122388);
INSERT INTO test2 (`pt`, `brand_name`, `exposure_uv`, `click_uv`) VALUES ('20230625', 'Nike', 2012913, 612831), ('20230625', 'adidas', 480277, 96176), ('20230625', 'LINING', 2474234, 627814), ('20230626', 'Nike', 1934666, 610770), ('20230626', 'adidas', 469904, 91117), ('20230626', 'LINING', 2285142, 599765), ('20220626', 'Nike', 2979656, 937166), ('20220626', 'adidas', 704751, 124250), ('20220626', 'LINING', 3163884, 1010221);
SELECT * FROM (
SELECT m0.pt AS pt
,m0.`uvctr` AS uvctr
,round(m1.uvctr,4) AS uvctr_hb_last_value
,round(m2.uvctr,4) AS uvctr_tb_last_value
FROM
(
SELECT m0.pt AS pt
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
FROM
(
SELECT pt AS pt
,brand_name AS `brand_name`
,exposure_uv AS `exposure_uv`
,click_uv AS `click_uv`
FROM test2
WHERE pt = '20230626'
) m0
FULL JOIN
(
SELECT pt AS pt
,brand_name AS `brand_name`
,total_indirect_order_cnt AS `total_indirect_order_cnt`
,total_indirect_gmv AS `total_indirect_gmv`
FROM test1
WHERE pt = '20230626'
) m1
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
) m0
LEFT JOIN
(
SELECT m0.pt AS pt
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
,`exposure_uv` AS `exposure_uv`
,`click_uv`
FROM
(
SELECT pt AS pt
,brand_name AS `brand_name`
,exposure_uv AS `exposure_uv`
,click_uv AS `click_uv`
FROM test2
WHERE pt = '20230625'
) m0
FULL JOIN
(
SELECT pt AS pt
,brand_name AS `brand_name`
,total_indirect_order_cnt AS `total_indirect_order_cnt`
,total_indirect_gmv AS `total_indirect_gmv`
FROM test1
WHERE pt = '20230625'
) m1
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
) m1
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
LEFT JOIN
(
SELECT m0.pt AS pt
,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
,`exposure_uv` AS `exposure_uv`
,`click_uv`
FROM
(
SELECT pt AS pt
,brand_name AS `brand_name`
,exposure_uv AS `exposure_uv`
,click_uv AS `click_uv`
FROM test2
WHERE pt = '20220626'
) m0
FULL JOIN
(
SELECT pt AS pt
,brand_name AS `brand_name`
,total_indirect_order_cnt AS `total_indirect_order_cnt`
,total_indirect_gmv AS `total_indirect_gmv`
FROM test1
WHERE pt = '20220626'
) m1
ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
) m2
ON m0.brand_name = m2.brand_name AND m0.pt = m2.pt
) c0
ORDER BY pt ASC, uvctr DESC;

View File

@ -0,0 +1,5 @@
[1,2,3,4,5,6]
[1,2,3,4,5,6]
1 [1,2,3]
2 [4,5]
3 [6]

View File

@ -0,0 +1,9 @@
drop table if exists t;
create table t (n UInt32, a Array(Int32)) engine=Memory;
insert into t values (1, [1,2,3]), (2, [4,5]), (3, [6]);
select array_concat_agg(a) from t;
select ArrAy_cOncAt_aGg(a) from t;
select n, array_concat_agg(a) from t group by n order by n;
drop table t;

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
-- Tags: no-parallel-replicas
set max_threads=10;
set optimize_use_implicit_projections=1;
EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1;

View File

@ -62,7 +62,7 @@ def default_clickhouse_odbc_conn_str():
return str(
OdbcConnectingArgs.create_from_kw(
dsn="ClickHouse DSN (ANSI)",
Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree&union_default_mode=DISTINCT&group_by_use_nulls=1&join_use_nulls=1",
Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree&union_default_mode=DISTINCT&group_by_use_nulls=1&join_use_nulls=1&allow_create_index_without_type=1",
)
)

View File

@ -991,6 +991,7 @@ addressToLine
addressToLineWithInlines
addressToSymbol
adviced
agg
aggregatefunction
aggregatingmergetree
aggregatio

View File

@ -362,11 +362,12 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress
#else
int read_exe_path(char *exe, size_t/* buf_sz*/)
int read_exe_path(char *exe, size_t buf_sz)
{
if (realpath("/proc/self/exe", exe) == nullptr)
return 1;
return 0;
ssize_t n = readlink("/proc/self/exe", exe, buf_sz - 1);
if (n > 0)
exe[n] = '\0';
return n > 0 && n < static_cast<ssize_t>(buf_sz);
}
#endif
@ -430,58 +431,55 @@ int main(int/* argc*/, char* argv[])
return 1;
}
int lock = -1;
/// Protection from double decompression
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// get inode of this executable
uint64_t inode = getInode(self);
/// In some cases /proc/self/maps may not contain the inode for the
/// /proc/self/exe, one of such examples are using qemu-*-static, in this
/// case maps will be proxied through the qemu, and it will remove
/// information about itself from it.
if (inode != 0)
if (inode == 0)
{
std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
if (lock < 0)
std::cerr << "Unable to obtain inode for exe '" << self << "'." << std::endl;
return 1;
}
std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
int lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
if (lock < 0)
{
perror("lock open");
return 1;
}
/// lock file should be closed on exec call
fcntl(lock, F_SETFD, FD_CLOEXEC);
if (lockf(lock, F_LOCK, 0))
{
perror("lockf");
return 1;
}
/// inconsistency in WSL1 Ubuntu - inode reported in /proc/self/maps is a 64bit to
/// 32bit conversion of input_info.st_ino
if (input_info.st_ino & 0xFFFFFFFF00000000 && !(inode & 0xFFFFFFFF00000000))
input_info.st_ino &= 0x00000000FFFFFFFF;
/// if decompression was performed by another process since this copy was started
/// then file referred by path "self" is already pointing to different inode
if (input_info.st_ino != inode)
{
struct stat lock_info;
if (0 != fstat(lock, &lock_info))
{
perror("lock open");
perror("fstat lock");
return 1;
}
/// lock file should be closed on exec call
fcntl(lock, F_SETFD, FD_CLOEXEC);
/// size 1 of lock file indicates that another decompressor has found active executable
if (lock_info.st_size == 1)
execv(self, argv);
if (lockf(lock, F_LOCK, 0))
{
perror("lockf");
return 1;
}
/// inconsistency in WSL1 Ubuntu - inode reported in /proc/self/maps is a 64bit to
/// 32bit conversion of input_info.st_ino
if (input_info.st_ino & 0xFFFFFFFF00000000 && !(inode & 0xFFFFFFFF00000000))
input_info.st_ino &= 0x00000000FFFFFFFF;
/// if decompression was performed by another process since this copy was started
/// then file referred by path "self" is already pointing to different inode
if (input_info.st_ino != inode)
{
struct stat lock_info;
if (0 != fstat(lock, &lock_info))
{
perror("fstat lock");
return 1;
}
/// size 1 of lock file indicates that another decompressor has found active executable
if (lock_info.st_size == 1)
execv(self, argv);
printf("No target executable - decompression only was performed.\n");
return 0;
}
printf("No target executable - decompression only was performed.\n");
return 0;
}
#endif
@ -549,19 +547,21 @@ int main(int/* argc*/, char* argv[])
if (has_exec)
{
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// write one byte to the lock in case other copies of compressed are running to indicate that
/// execution should be performed
if (lock >= 0)
write(lock, "1", 1);
write(lock, "1", 1);
#endif
execv(self, argv);
/// This part of code will be reached only if error happened
perror("execv");
return 1;
}
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// since inodes can be reused - it's a precaution if lock file already exists and have size of 1
if (lock >= 0)
ftruncate(lock, 0);
ftruncate(lock, 0);
#endif
printf("No target executable - decompression only was performed.\n");
}