Compare commits

...

42 Commits

Author SHA1 Message Date
Dmitry Novik
c026241b04
Merge 92cc90a711 into c27fe21339 2024-11-27 16:04:06 +01:00
Nikolay Degterinsky
c27fe21339
Merge pull request #71286 from arthurpassos/demand_changes_on_alter_user
Demand changes on alter user
2024-11-27 14:01:56 +00:00
Max Kainov
0eeb6c38dc
Merge pull request #72531 from ClickHouse/ci_remove_hdfs_tests
CI: Remove functional tests with hdfs
2024-11-27 13:57:03 +00:00
alesapin
6cc7746970
Merge pull request #72497 from ClickHouse/allow_pulling_executor_for_matview
Allow to use pulling executor in materialized views
2024-11-27 12:49:40 +00:00
Vladimir Cherkasov
b4e5178dd0
Merge pull request #72387 from ClickHouse/vdimir/with_fill_dup_sort_bug
Fix WITH FILL: Invalid number of rows in Chunk
2024-11-27 12:30:13 +00:00
Alexey Milovidov
ec914b108e
Merge pull request #71919 from ClickHouse/chesema-fix-test-03006
fix index_granularity_bytes to make test stable
2024-11-27 13:10:27 +01:00
Max Kainov
b61921f237 remove test cases 2024-11-27 12:04:06 +01:00
Alexey Milovidov
4a3ceac1c5
Merge pull request #72518 from vitlibar/fix-data-race-in-counters-set-parent
Fix data race in ProfileEvents::Counters::setParent().
2024-11-27 10:47:00 +00:00
alesapin
045baa3c39 Add to settings changes 2024-11-27 11:10:46 +01:00
Nikita Fomichev
682eb4c92f
Merge pull request #72519 from ClickHouse/24.10_disable_shared_set_join
SettingsChangesHistory: disable allow_experimental_shared_set_join
2024-11-27 10:01:09 +00:00
Max Kainov
bae2c068e9 fix sed in test 2024-11-27 10:23:28 +01:00
Max Kainov
09aa96370e CI: Remove functional tests with hdfs 2024-11-27 10:16:45 +01:00
Robert Schulze
9c893d4712
Merge pull request #72520 from ClickHouse/docs-cloud-atomic-default-engine
[Docs] Specify that Replicated is default engine for cloud
2024-11-27 08:53:36 +00:00
Justin de Guzman
b6725d33d1
[Docs] Specify that Replicated is default engine for cloud 2024-11-26 16:57:49 -08:00
Vitaly Baranov
ac6c28b15f Fix compilation. 2024-11-27 01:46:06 +01:00
Nikita Fomichev
1166e93447 SettingsChangesHistory: disable allow_experimental_shared_set_join 2024-11-27 00:56:03 +01:00
Vitaly Baranov
d78c3d4cba Fix data race in ProfileEvents::Counters::setParent(). 2024-11-27 00:39:15 +01:00
alesapin
6e9e0c82be Allow to use pulling executor in materialized views 2024-11-26 19:39:34 +01:00
Arthur Passos
1429266f65 Merge branch 'master' into demand_changes_on_alter_user 2024-11-26 15:33:33 -03:00
Vladimir Cherkasov
fea9e2ede8
Merge branch 'master' into vdimir/with_fill_dup_sort_bug 2024-11-26 14:14:48 +01:00
vdimir
37a534325f
Fix WITH FILL: Invalid number of rows in Chunk 2024-11-25 13:00:55 +00:00
Sema Checherinda
79ecd1c262 fix index_granularity_bytes to make test stable 2024-11-14 13:37:42 +01:00
Arthur Passos
4765fcac38 Merge branch 'master' into demand_changes_on_alter_user 2024-11-11 09:33:33 -03:00
Dmitry Novik
92cc90a711
Remove logs 2024-11-04 15:29:17 +01:00
Arthur Passos
ffc88fd8a3 comments 2024-10-30 21:53:53 -03:00
Arthur Passos
c7318b60da demand changes on alter user 2024-10-30 19:43:38 -03:00
Dmitry Novik
2f36feff7d Merge remote-tracking branch 'origin/master' into param-view-analyze 2024-10-28 15:20:52 +01:00
Dmitry Novik
ec22024639 Merge remote-tracking branch 'origin/master' into param-view-analyze 2024-09-26 10:35:16 +02:00
Dmitry Novik
d8c93dc70f Update test 2024-09-25 14:23:10 +02:00
Dmitry Novik
0d4226a1c4 Fix fuzzer issue 2024-09-24 18:38:59 +02:00
Dmitry Novik
08438c5659 Add test for #69598 2024-09-19 17:33:46 +02:00
Dmitry Novik
993e10dc26 Merge remote-tracking branch 'origin/master' into param-view-analyze 2024-09-19 17:29:01 +02:00
Dmitry Novik
586182a045
Fix clang-tidy build 2024-09-05 16:06:24 +02:00
Dmitry Novik
31026aa74a Add a test 2024-09-03 15:03:57 +02:00
Dmitry Novik
5c39591ad1 Add tests 2024-09-03 15:03:57 +02:00
Dmitry Novik
769589dbc9 Remove redundant code 2024-09-03 15:03:57 +02:00
Dmitry Novik
355d7bce05 Improve param view docs 2024-09-03 15:03:57 +02:00
Dmitry Novik
34ceebe992 Rollback changes in QueryTreeBuilder 2024-09-03 15:03:57 +02:00
Dmitry Novik
887b49649c Small refactoring 2024-09-03 15:03:57 +02:00
Dmitry Novik
f4c4b58e64 Rewrite DESCRIBE for parameterized view implementation 2024-09-03 15:03:57 +02:00
Dmitry Novik
79140a3589 Support DESCRIBE for parameterized view; Fix usage in scalars 2024-09-03 15:03:57 +02:00
Dmitry Novik
967ba9d3d4 Parameterized views: Analyze SELECT query 2024-09-03 15:03:57 +02:00
53 changed files with 241 additions and 411 deletions

View File

@ -78,10 +78,6 @@ RUN arch=${TARGETARCH:-amd64} \
&& curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o ./mc \
&& chmod +x ./mc ./minio
RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
&& tar -xvf hadoop-3.3.1.tar.gz \
&& rm -rf hadoop-3.3.1.tar.gz
ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1

View File

@ -6,7 +6,7 @@ sidebar_position: 10
# Atomic
It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#rename-table) queries and atomic [EXCHANGE TABLES](#exchange-tables) queries. `Atomic` database engine is used by default.
It supports non-blocking [DROP TABLE](#drop-detach-table) and [RENAME TABLE](#rename-table) queries and atomic [EXCHANGE TABLES](#exchange-tables) queries. `Atomic` database engine is used by default. Note that on ClickHouse Cloud, the `Replicated` database engine is used by default.
## Creating a Database {#creating-a-database}

View File

@ -41,15 +41,24 @@ SELECT a, b, c FROM (SELECT ...)
## Parameterized View
Parametrized views are similar to normal views, but can be created with parameters which are not resolved immediately. These views can be used with table functions, which specify the name of the view as function name and the parameter values as its arguments.
Parametrized views are similar to normal views, but can be created with parameters which are not resolved immediately.
These views can be used with table functions, which specify the name of the view as function name and the parameter values as its arguments.
``` sql
CREATE VIEW view AS SELECT * FROM TABLE WHERE Column1={column1:datatype1} and Column2={column2:datatype2} ...
CREATE VIEW param_view AS SELECT * FROM TABLE WHERE Column1={column1:datatype1} and Column2={column2:datatype2} ...
```
The above creates a view for table which can be used as table function by substituting parameters as shown below.
``` sql
SELECT * FROM view(column1=value1, column2=value2 ...)
SELECT * FROM param_view(column1=value1, column2=value2 ...)
```
Since the parameterized view depends on the parameter values, it doesn't have a schema when parameters are not provided.
That means there's no information about parameterized views in the `system.columns` table.
Also, `DESCRIBE` queries would work only if parameters are provided.
```sql
DESCRIBE param_view(column1=value1, column2=value2 ...)
```
## Materialized View

View File

@ -4658,20 +4658,7 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().tryGet(table_function_name, scope_context);
if (!table_function_ptr)
{
String database_name = scope_context->getCurrentDatabase();
String table_name;
auto function_ast = table_function_node->toAST();
Identifier table_identifier{table_function_name};
if (table_identifier.getPartsSize() == 1)
{
table_name = table_identifier[0];
}
else if (table_identifier.getPartsSize() == 2)
{
database_name = table_identifier[0];
table_name = table_identifier[1];
}
auto [database_name, table_name] = extractDatabaseAndTableNameForParametrizedView(table_function_name, scope_context);
/// Collect parametrized view arguments
NameToNameMap view_params;
@ -4713,9 +4700,9 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
if (parametrized_view_storage)
{
auto fake_table_node = std::make_shared<TableNode>(parametrized_view_storage, scope_context);
fake_table_node->setAlias(table_function_node->getAlias());
table_function_node = fake_table_node;
std::vector<size_t> skip_analysis_arguments_indexes(table_function_node_typed.getArguments().getNodes().size());
std::iota(skip_analysis_arguments_indexes.begin(), skip_analysis_arguments_indexes.end(), 0);
table_function_node_typed.resolve({}, parametrized_view_storage, scope_context, std::move(skip_analysis_arguments_indexes));
return;
}

View File

@ -73,7 +73,7 @@ public:
/// Returns true, if table function is resolved, false otherwise
bool isResolved() const
{
return storage != nullptr && table_function != nullptr;
return storage != nullptr;
}
/// Get table function, returns nullptr if table function node is not resolved

View File

@ -944,4 +944,23 @@ QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const QueryTreeNo
return buildSubqueryToReadColumnsFromTableExpression(columns_to_select, table_node, context);
}
std::pair<String, String> extractDatabaseAndTableNameForParametrizedView(const String & table_function_name, const ContextPtr & context)
{
String database_name = context->getCurrentDatabase();
String table_name;
Identifier table_identifier{table_function_name};
if (table_identifier.getPartsSize() == 1)
{
table_name = table_identifier[0];
}
else if (table_identifier.getPartsSize() == 2)
{
database_name = table_identifier[0];
table_name = table_identifier[1];
}
return { database_name, table_name };
}
}

View File

@ -159,5 +159,6 @@ QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTyp
*/
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const QueryTreeNodePtr & table_node, const ContextPtr & context);
std::pair<String, String> extractDatabaseAndTableNameForParametrizedView(const String & table_function_name, const ContextPtr & context);
}

View File

@ -962,6 +962,15 @@ Counters::Counters(VariableContext level_, Counters * parent_)
counters = counters_holder.get();
}
Counters::Counters(Counters && src) noexcept
: counters(std::exchange(src.counters, nullptr))
, counters_holder(std::move(src.counters_holder))
, parent(src.parent.exchange(nullptr))
, trace_profile_events(src.trace_profile_events)
, level(src.level)
{
}
void Counters::resetCounters()
{
if (counters)
@ -973,7 +982,7 @@ void Counters::resetCounters()
void Counters::reset()
{
parent = nullptr;
setParent(nullptr);
resetCounters();
}

View File

@ -60,7 +60,7 @@ namespace ProfileEvents
Counter * counters = nullptr;
std::unique_ptr<Counter[]> counters_holder;
/// Used to propagate increments
Counters * parent = nullptr;
std::atomic<Counters *> parent = {};
bool trace_profile_events = false;
public:
@ -74,6 +74,8 @@ namespace ProfileEvents
explicit Counters(Counter * allocated_counters) noexcept
: counters(allocated_counters), parent(nullptr), level(VariableContext::Global) {}
Counters(Counters && src) noexcept;
Counter & operator[] (Event event)
{
return counters[event];
@ -114,13 +116,13 @@ namespace ProfileEvents
/// Get parent (thread unsafe)
Counters * getParent()
{
return parent;
return parent.load(std::memory_order_relaxed);
}
/// Set parent (thread unsafe)
void setParent(Counters * parent_)
{
parent = parent_;
parent.store(parent_, std::memory_order_relaxed);
}
void setTraceProfileEvents(bool value)

View File

@ -5521,6 +5521,8 @@ The default value is `CURRENT_USER`.
DECLARE(UInt64, cache_warmer_threads, 4, R"(
Only available in ClickHouse Cloud. Number of background threads for speculatively downloading new data parts into file cache, when cache_populated_by_fetch is enabled. Zero to disable.
)", 0) \
DECLARE(Bool, use_async_executor_for_materialized_views, false, R"(
Use async and potentially multithreaded execution of materialized view query, can speedup views processing during INSERT, but also consume more memory.)", 0) \
DECLARE(Int64, ignore_cold_parts_seconds, 0, R"(
Only available in ClickHouse Cloud. Exclude new data parts from SELECT queries until they're either pre-warmed (see cache_populated_by_fetch) or this many seconds old. Only for Replicated-/SharedMergeTree.
)", 0) \

View File

@ -60,6 +60,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{
{"24.12",
{
{"use_async_executor_for_materialized_views", false, false, "New setting."},
}
},
{"24.11",
@ -117,7 +118,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"min_free_disk_ratio_to_perform_insert", 0.0, 0.0, "New setting."},
{"enable_named_columns_in_function_tuple", false, false, "Disabled pending usability improvements"},
{"cloud_mode_database_engine", 1, 1, "A setting for ClickHouse Cloud"},
{"allow_experimental_shared_set_join", 1, 1, "A setting for ClickHouse Cloud"},
{"allow_experimental_shared_set_join", 0, 0, "A setting for ClickHouse Cloud"},
{"read_through_distributed_cache", 0, 0, "A setting for ClickHouse Cloud"},
{"write_through_distributed_cache", 0, 0, "A setting for ClickHouse Cloud"},
{"distributed_cache_throw_on_error", 0, 0, "A setting for ClickHouse Cloud"},

View File

@ -3,8 +3,15 @@
#include <QueryPipeline/BlockIO.h>
#include <DataTypes/DataTypeString.h>
#include <Parsers/queryToString.h>
#include <Parsers/FunctionParameterValuesVisitor.h>
#include <Common/typeid_cast.h>
#include <Analyzer/Utils.h>
#include <Analyzer/Passes/QueryAnalysisPass.h>
#include <Analyzer/QueryTreeBuilder.h>
#include <Analyzer/TableFunctionNode.h>
#include <Analyzer/TableNode.h>
#include <Core/Settings.h>
#include <Storages/StorageView.h>
#include <TableFunctions/ITableFunction.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
@ -34,6 +41,14 @@ namespace Setting
extern const SettingsBool print_pretty_type_names;
}
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
extern const int UNKNOWN_FUNCTION;
}
InterpreterDescribeQuery::InterpreterDescribeQuery(const ASTPtr & query_ptr_, ContextPtr context_)
: WithContext(context_)
, query_ptr(query_ptr_)
@ -133,10 +148,14 @@ BlockIO InterpreterDescribeQuery::execute()
void InterpreterDescribeQuery::fillColumnsFromSubquery(const ASTTableExpression & table_expression)
{
Block sample_block;
auto select_query = table_expression.subquery->children.at(0);
auto current_context = getContext();
fillColumnsFromSubqueryImpl(select_query, current_context);
}
void InterpreterDescribeQuery::fillColumnsFromSubqueryImpl(const ASTPtr & select_query, const ContextPtr & current_context)
{
Block sample_block;
if (settings[Setting::allow_experimental_analyzer])
{
SelectQueryOptions select_query_options;
@ -154,7 +173,39 @@ void InterpreterDescribeQuery::fillColumnsFromSubquery(const ASTTableExpression
void InterpreterDescribeQuery::fillColumnsFromTableFunction(const ASTTableExpression & table_expression)
{
auto current_context = getContext();
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().get(table_expression.table_function, current_context);
auto table_function_name = table_expression.table_function->as<ASTFunction>()->name;
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().tryGet(table_function_name, current_context);
if (!table_function_ptr)
{
auto [database_name, table_name] = extractDatabaseAndTableNameForParametrizedView(table_function_name, current_context);
auto table_id = getContext()->resolveStorageID({database_name, table_name});
getContext()->checkAccess(AccessType::SHOW_COLUMNS, table_id);
auto table = DatabaseCatalog::instance().getTable(table_id, getContext());
if (auto * storage_view = table->as<StorageView>())
{
if (storage_view->isParameterizedView())
{
auto query = storage_view->getInMemoryMetadataPtr()->getSelectQuery().inner_query->clone();
NameToNameMap parameterized_view_values = analyzeFunctionParamValues(table_expression.table_function, current_context);
StorageView::replaceQueryParametersIfParametrizedView(query, parameterized_view_values);
fillColumnsFromSubqueryImpl(query, current_context);
return;
}
}
auto hints = TableFunctionFactory::instance().getHints(table_function_name);
if (!hints.empty())
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}. Maybe you meant: {}", table_function_name, toString(hints));
else
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "Unknown table function {}", table_function_name);
}
else
{
table_function_ptr->parseArguments(table_expression.table_function, current_context);
}
auto column_descriptions = table_function_ptr->getActualTableStructure(getContext(), /*is_insert_query*/ true);
for (const auto & column : column_descriptions)
@ -180,6 +231,14 @@ void InterpreterDescribeQuery::fillColumnsFromTable(const ASTTableExpression & t
auto table_id = getContext()->resolveStorageID(table_expression.database_and_table_name);
getContext()->checkAccess(AccessType::SHOW_COLUMNS, table_id);
auto table = DatabaseCatalog::instance().getTable(table_id, getContext());
if (auto * storage_view = table->as<StorageView>())
{
if (storage_view->isParameterizedView())
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
"Cannot infer table schema for the parametrized view when no query parameters are provided");
}
auto table_lock = table->lockForShare(getContext()->getInitialQueryId(), settings[Setting::lock_acquire_timeout]);
auto metadata_snapshot = table->getInMemoryMetadataPtr();

View File

@ -1,5 +1,6 @@
#pragma once
#include <Interpreters/Context_fwd.h>
#include <Interpreters/IInterpreter.h>
#include <Storages/ColumnsDescription.h>
#include <Storages/StorageSnapshot.h>
@ -24,6 +25,7 @@ public:
private:
void fillColumnsFromSubquery(const ASTTableExpression & table_expression);
void fillColumnsFromSubqueryImpl(const ASTPtr & select_query, const ContextPtr & current_context);
void fillColumnsFromTableFunction(const ASTTableExpression & table_expression);
void fillColumnsFromTable(const ASTTableExpression & table_expression);

View File

@ -549,6 +549,8 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
auto names = typeid_cast<std::shared_ptr<ASTUserNamesWithHost>>(names_ast);
auto names_ref = names->names;
auto pos_after_parsing_names = pos;
std::optional<String> new_name;
std::optional<AllowedClientHosts> hosts;
std::optional<AllowedClientHosts> add_hosts;
@ -674,6 +676,13 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
else if (alter)
names->concatParts();
bool alter_query_with_no_changes = alter && pos_after_parsing_names == pos;
if (alter_query_with_no_changes)
{
return false;
}
auto query = std::make_shared<ASTCreateUserQuery>();
node = query;

View File

@ -32,14 +32,15 @@ namespace ErrorCodes
namespace
{
class CollectSetsVisitor : public ConstInDepthQueryTreeVisitor<CollectSetsVisitor>
class CollectSetsVisitor : public InDepthQueryTreeVisitorWithContext<CollectSetsVisitor>
{
public:
explicit CollectSetsVisitor(PlannerContext & planner_context_)
: planner_context(planner_context_)
: InDepthQueryTreeVisitorWithContext<CollectSetsVisitor>(planner_context_.getQueryContext())
, planner_context(planner_context_)
{}
void visitImpl(const QueryTreeNodePtr & node)
void enterImpl(const QueryTreeNodePtr & node)
{
if (const auto * constant_node = node->as<ConstantNode>())
/// Collect sets from source expression as well.
@ -122,7 +123,7 @@ private:
}
void collectSets(const QueryTreeNodePtr & node, PlannerContext & planner_context)
void collectSets(QueryTreeNodePtr node, PlannerContext & planner_context)
{
CollectSetsVisitor visitor(planner_context);
visitor.visit(node);

View File

@ -12,6 +12,6 @@ struct SelectQueryOptions;
/** Collect prepared sets and sets for subqueries that are necessary to execute IN function and its variations.
* Collected sets are registered in planner context.
*/
void collectSets(const QueryTreeNodePtr & node, PlannerContext & planner_context);
void collectSets(QueryTreeNodePtr node, PlannerContext & planner_context);
}

View File

@ -62,8 +62,8 @@ void Chunk::checkNumRowsIsConsistent()
{
auto & column = columns[i];
if (column->size() != num_rows)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of rows in Chunk column {}: expected {}, got {}",
column->getName() + " position " + toString(i), toString(num_rows), toString(column->size()));
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of rows in Chunk {} column {} at position {}: expected {}, got {}",
dumpStructure(), column->getName(), i, num_rows, column->size());
}
}
@ -100,8 +100,8 @@ void Chunk::addColumn(ColumnPtr column)
if (empty())
num_rows = column->size();
else if (column->size() != num_rows)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of rows in Chunk column {}, got {}",
column->getName()+ ": expected " + toString(num_rows), toString(column->size()));
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of rows in Chunk {} column {}: expected {}, got {}",
dumpStructure(), column->getName(), num_rows, column->size());
columns.emplace_back(std::move(column));
}

View File

@ -205,6 +205,20 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr &
return true;
}
SortDescription duduplicateSortDescription(const SortDescription & sort_description)
{
SortDescription result;
std::unordered_set<std::string> unique_columns;
for (const auto & desc : sort_description)
{
const auto & [_, inserted] = unique_columns.insert(desc.column_name);
if (!inserted)
continue;
result.push_back(desc);
}
return result;
}
FillingTransform::FillingTransform(
const Block & header_,
const SortDescription & sort_description_,
@ -212,7 +226,7 @@ FillingTransform::FillingTransform(
InterpolateDescriptionPtr interpolate_description_,
const bool use_with_fill_by_sorting_prefix_)
: ISimpleTransform(header_, transformHeader(header_, fill_description_), true)
, sort_description(sort_description_)
, sort_description(duduplicateSortDescription(sort_description_))
, fill_description(fill_description_)
, interpolate_description(interpolate_description_)
, filling_row(fill_description_)

View File

@ -11,6 +11,7 @@
#include <Processors/Transforms/PlanSquashingTransform.h>
#include <Processors/Transforms/SquashingTransform.h>
#include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
#include <Processors/Executors/PullingPipelineExecutor.h>
#include <Storages/LiveView/StorageLiveView.h>
#include <Storages/WindowView/StorageWindowView.h>
@ -63,6 +64,7 @@ namespace Setting
extern const SettingsUInt64 min_insert_block_size_rows_for_materialized_views;
extern const SettingsBool parallel_view_processing;
extern const SettingsBool use_concurrency_control;
extern const SettingsBool use_async_executor_for_materialized_views;
}
namespace ErrorCodes
@ -129,6 +131,7 @@ private:
};
/// For source chunk, execute view query over it.
template <typename Executor>
class ExecutingInnerQueryFromViewTransform final : public ExceptionKeepingTransform
{
public:
@ -148,7 +151,7 @@ private:
struct State
{
QueryPipeline pipeline;
PullingPipelineExecutor executor;
Executor executor;
explicit State(QueryPipeline pipeline_)
: pipeline(std::move(pipeline_))
@ -428,17 +431,31 @@ std::optional<Chain> generateViewChain(
out.addSource(std::make_shared<DeduplicationToken::CheckTokenTransform>("Right after Inner query", out.getInputHeader()));
#endif
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform>(
storage_header, views_data->views.back(), views_data, disable_deduplication_for_children);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
if (context->getSettingsRef()[Setting::use_async_executor_for_materialized_views])
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform<PullingAsyncPipelineExecutor>>(
storage_header, views_data->views.back(), views_data, disable_deduplication_for_children);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
out.addSource(std::move(executing_inner_query));
}
else
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform<PullingPipelineExecutor>>(
storage_header, views_data->views.back(), views_data, disable_deduplication_for_children);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
}
#ifdef ABORT_ON_LOGICAL_ERROR
out.addSource(std::make_shared<DeduplicationToken::CheckTokenTransform>("Right before Inner query", out.getInputHeader()));
#endif
}
return out;
}
@ -766,7 +783,8 @@ IProcessor::Status CopyingDataToViewsTransform::prepare()
}
ExecutingInnerQueryFromViewTransform::ExecutingInnerQueryFromViewTransform(
template <typename Executor>
ExecutingInnerQueryFromViewTransform<Executor>::ExecutingInnerQueryFromViewTransform(
const Block & header,
ViewRuntimeData & view_,
std::shared_ptr<ViewsData> views_data_,
@ -778,14 +796,16 @@ ExecutingInnerQueryFromViewTransform::ExecutingInnerQueryFromViewTransform(
{
}
void ExecutingInnerQueryFromViewTransform::onConsume(Chunk chunk)
template <typename Executor>
void ExecutingInnerQueryFromViewTransform<Executor>::onConsume(Chunk chunk)
{
auto block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns());
state.emplace(process(std::move(block), view, *views_data, std::move(chunk.getChunkInfos()), disable_deduplication_for_children));
}
ExecutingInnerQueryFromViewTransform::GenerateResult ExecutingInnerQueryFromViewTransform::onGenerate()
template <typename Executor>
ExecutingInnerQueryFromViewTransform<Executor>::GenerateResult ExecutingInnerQueryFromViewTransform<Executor>::onGenerate()
{
GenerateResult res;
if (!state.has_value())

View File

@ -1,21 +0,0 @@
#!/bin/bash
# shellcheck disable=SC2024
set -e -x -a -u
ls -lha
cd /hadoop-3.3.1
export JAVA_HOME=/usr
mkdir -p target/test/data
chown clickhouse ./target/test/data
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
while ! nc -z localhost 12222; do
sleep 1
done
lsof -i :12222
sleep 5

View File

@ -57,8 +57,6 @@ source /repo/tests/docker_scripts/utils.lib
/repo/tests/docker_scripts/setup_minio.sh stateless
/repo/tests/docker_scripts/setup_hdfs_minicluster.sh
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; then

View File

@ -1,10 +0,0 @@
-- Tags: no-fasttest, use-hdfs
drop table if exists test_table_hdfs_syntax
;
create table test_table_hdfs_syntax (id UInt32) ENGINE = HDFS('')
; -- { serverError BAD_ARGUMENTS }
create table test_table_hdfs_syntax (id UInt32) ENGINE = HDFS('','','', '')
; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
drop table if exists test_table_hdfs_syntax
;

View File

@ -16,5 +16,5 @@ for _ in {1..1000}; do
if [[ $elapsed -gt 30 ]]; then
break
fi
done 2>&1 | grep -o -P 'Query memory limit exceeded' | sed -r -e 's/(.*):([a-Z ]*)([mM]emory limit exceeded)(.*)/\2\3/' | uniq
done 2>&1 | grep -o 'Query memory limit exceeded' | head -n1
echo 'Ok'

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, use-hdfs
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
TCP_PORT=$($CLICKHOUSE_CLIENT -q "SELECT tcpPort()")
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('hdfs://localhost:$TCP_PORT/data.csv', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "HDFS_ERROR" && echo 'OK' || echo 'FAIL';

View File

@ -1,17 +0,0 @@
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK
OK

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, use-hdfs
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('abcd', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('abcd/', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('//abcd', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('//abcd/', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('//abcd/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('://abcd', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('://abcd/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('abcd:9000', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('abcd:9000/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('//abcd:9000/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('://abcd:9000/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('abcd/', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('hdfs://abcd', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('hdfs1:9000/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('hdfs://hdfs1/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "HDFS_ERROR" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('http://hdfs1:9000/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "BAD_ARGUMENTS" && echo 'OK' || echo 'FAIL';
$CLICKHOUSE_CLIENT -q "SELECT * FROM hdfs('hdfs://hdfs1@nameservice/abcd/data', 'CSV', 'x UInt32')" 2>&1 | grep -F -q "HDFS_ERROR" && echo 'OK' || echo 'FAIL';

View File

@ -1,48 +0,0 @@
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
1 2 3
4 5 6
7 8 9
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
c1 UInt32
c2 UInt32
c3 UInt32
c1 UInt32
c2 UInt32
c3 UInt32
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
c1 UInt32
c2 UInt32
c3 UInt32
c1 UInt32
c2 UInt32
c3 UInt32

View File

@ -1,26 +0,0 @@
-- Tags: no-fasttest, no-parallel
-- Tag no-fasttest: Depends on Java
insert into table function hdfs('hdfs://localhost:12222/test_1.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 1, 2, 3 settings hdfs_truncate_on_insert=1;
insert into table function hdfs('hdfs://localhost:12222/test_2.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 4, 5, 6 settings hdfs_truncate_on_insert=1;
insert into table function hdfs('hdfs://localhost:12222/test_3.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 7, 8, 9 settings hdfs_truncate_on_insert=1;
select * from hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv') order by c1, c2, c3;
select * from hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV') order by c1, c2, c3;
select * from hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32') order by c1, c2, c3;
select * from hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32', 'auto') order by c1, c2, c3;
select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv') order by c1, c2, c3;
select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV') order by c1, c2, c3;
select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32') order by c1, c2, c3;
select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32', 'auto') order by c1, c2, c3;
desc hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv');
desc hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV');
desc hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32');
desc hdfs('hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32', 'auto');
desc hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv');
desc hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV');
desc hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32');
desc hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_{1,2,3}.tsv', 'TSV', 'c1 UInt32, c2 UInt32, c3 UInt32', 'auto');

View File

@ -1,26 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, no-asan, no-tsan, no-msan, no-ubsan, no-debug
# FIXME https://github.com/ClickHouse/ClickHouse/issues/47207
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
for i in $(seq 1 10);
do
$CLICKHOUSE_CLIENT --query_id="02368_$i" -q "insert into function hdfs('hdfs://localhost:12222/02368_data_$i.jsonl') select range(number % 1000) from numbers(100000) settings hdfs_truncate_on_insert=1, output_format_parallel_formatting=1" 2> /dev/null &
done
sleep 2
$CLICKHOUSE_CLIENT -q "kill query where startsWith(query_id, '02368_') sync" > /dev/null 2>&1
for i in $(seq 1 10);
do
$CLICKHOUSE_CLIENT --query_id="02368_$i" -q "insert into function hdfs('hdfs://localhost:12222/02368_data_$i.jsonl') select range(number % 1000) from numbers(100000) settings hdfs_truncate_on_insert=1, output_format_parallel_formatting=0" 2> /dev/null &
done
sleep 2
$CLICKHOUSE_CLIENT -q "kill query where startsWith(query_id, '02368_') sync" > /dev/null 2>&1

View File

@ -1,5 +0,0 @@
-- Tags: no-fasttest
SELECT * FROM hdfsCluster('test_shard_localhost', '', 'TSV'); -- { serverError BAD_ARGUMENTS }
SELECT * FROM hdfsCluster('test_shard_localhost', ' ', 'TSV'); -- { serverError BAD_ARGUMENTS }
SELECT * FROM hdfsCluster('test_shard_localhost', '/', 'TSV'); -- { serverError BAD_ARGUMENTS }
SELECT * FROM hdfsCluster('test_shard_localhost', 'http/', 'TSV'); -- { serverError BAD_ARGUMENTS }

View File

@ -1,10 +0,0 @@
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
c1 Nullable(Int64)
c2 Nullable(Int64)
c3 Nullable(Int64)
1 2 3
4 5 6
1 2 3
4 5 6

View File

@ -1,11 +0,0 @@
-- Tags: no-fasttest, no-parallel
-- Tag no-fasttest: Depends on Java
insert into table function hdfs('hdfs://localhost:12222/test_02458_1.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 1, 2, 3 settings hdfs_truncate_on_insert=1;
insert into table function hdfs('hdfs://localhost:12222/test_02458_2.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 4, 5, 6 settings hdfs_truncate_on_insert=1;
desc hdfsCluster('test_cluster_one_shard_three_replicas_localhost', 'hdfs://localhost:12222/test_02458_{1,2}.tsv');
desc hdfsCluster('test_cluster_one_shard_three_replicas_localhost', 'hdfs://localhost:12222/test_02458_{1,2}.tsv', 'TSV');
select * from hdfsCluster('test_cluster_one_shard_three_replicas_localhost', 'hdfs://localhost:12222/test_02458_{1,2}.tsv') order by c1, c2, c3;
select * from hdfsCluster('test_cluster_one_shard_three_replicas_localhost', 'hdfs://localhost:12222/test_02458_{1,2}.tsv', 'TSV') order by c1, c2, c3;

View File

@ -1,11 +0,0 @@
-- Tags: no-fasttest, no-parallel
-- Tag no-fasttest: Depends on Java
insert into table function hdfs('hdfs://localhost:12222/test_02536.jsonl', 'TSV') select '{"x" : {"a" : 1, "b" : 2}}' settings hdfs_truncate_on_insert=1;
set input_format_json_try_infer_named_tuples_from_objects=0;
drop table if exists test;
create table test (x Tuple(a UInt32, b UInt32)) engine=Memory();
insert into test select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_02536.jsonl') settings use_structure_from_insertion_table_in_table_functions=0; -- {serverError ILLEGAL_COLUMN}
insert into test select * from hdfsCluster('test_cluster_two_shards_localhost', 'hdfs://localhost:12222/test_02536.jsonl') settings use_structure_from_insertion_table_in_table_functions=1;
select * from test;
drop table test;

View File

@ -1,11 +0,0 @@
Test 1: select from hdfs database
1 2 3
test_hdfs_1
1 2 3
test_hdfs_2
Test 2: check exceptions
BAD_ARGUMENTS
OK
OK
OK
OK

View File

@ -1,72 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, use-hdfs, no-parallel
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
# Prepare data
${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_1.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 1, 2, 3 settings hdfs_truncate_on_insert=1;"
ret=$?
if [ $ret -ne 0 ]; then
echo "Insert failed!"
exit 1
fi
${CLICKHOUSE_CLIENT} -q "insert into table function hdfs('hdfs://localhost:12222/test_02725_2.tsv', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32') select 4, 5, 6 settings hdfs_truncate_on_insert=1;"
ret=$?
if [ $ret -ne 0 ]; then
echo "Insert failed!"
exit 1
fi
#################
echo "Test 1: select from hdfs database"
# Database without specific host
${CLICKHOUSE_CLIENT} --multiline -q """
DROP DATABASE IF EXISTS test_hdfs_1;
CREATE DATABASE test_hdfs_1 ENGINE = HDFS;
USE test_hdfs_1;
SELECT * FROM \"hdfs://localhost:12222/test_02725_1.tsv\"
"""
${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test_hdfs_1
# Database with host
${CLICKHOUSE_CLIENT} --multiline -q """
DROP DATABASE IF EXISTS test_hdfs_2;
CREATE DATABASE test_hdfs_2 ENGINE = HDFS('hdfs://localhost:12222');
USE test_hdfs_2;
SELECT * FROM \"test_02725_1.tsv\"
"""
${CLICKHOUSE_CLIENT} -q "SHOW DATABASES;" | grep test_hdfs_2
#################
echo "Test 2: check exceptions"
${CLICKHOUSE_CLIENT} --multiline -q """
DROP DATABASE IF EXISTS test_hdfs_3;
CREATE DATABASE test_hdfs_3 ENGINE = HDFS('abacaba');
""" 2>&1 | tr '\n' ' ' | grep -oF "BAD_ARGUMENTS"
${CLICKHOUSE_CLIENT} --multiline -q """
DROP DATABASE IF EXISTS test_hdfs_4;
CREATE DATABASE test_hdfs_4 ENGINE = HDFS;
USE test_hdfs_4;
SELECT * FROM \"abacaba/file.tsv\"
""" 2>&1 | tr '\n' ' ' | grep -oF "CANNOT_EXTRACT_TABLE_STRUCTURE"
${CLICKHOUSE_CLIENT} -q "SELECT * FROM test_hdfs_4.\`http://localhost:11111/test/a.tsv\`" 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "BAD_ARGUMENTS" > /dev/null && echo "OK" || echo 'FAIL' ||:
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_hdfs_4.\`hdfs://localhost:12222/file.myext\`" 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "The data format cannot be detected" > /dev/null && echo "OK" || echo 'FAIL' ||:
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_hdfs_4.\`hdfs://localhost:12222/test_02725_3.tsv\`" 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "The table structure cannot be extracted" > /dev/null && echo "OK" || echo 'FAIL' ||:
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_hdfs_4.\`hdfs://localhost:12222\`" 2>&1 | tr '\n' ' ' | grep -oF -e "UNKNOWN_TABLE" -e "BAD_ARGUMENTS" > /dev/null && echo "OK" || echo 'FAIL' ||:
# Cleanup
${CLICKHOUSE_CLIENT} --multiline -q """
DROP DATABASE IF EXISTS test_hdfs_1;
DROP DATABASE IF EXISTS test_hdfs_2;
DROP DATABASE IF EXISTS test_hdfs_3;
DROP DATABASE IF EXISTS test_hdfs_4;
"""

View File

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, use-hdfs
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv') select 1 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv') select 2 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv') select 3 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT --print-profile-events -q "select * from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') where _file like '%data1%' format Null" 2>&1 | grep -F -c "EngineFileLikeReadFiles: 1"
$CLICKHOUSE_CLIENT --print-profile-events -q "select * from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') where _path like '%data1%' format Null" 2>&1 | grep -F -c "EngineFileLikeReadFiles: 1"

View File

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, use-hdfs
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv') select 1 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv') select 11 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv') select 111 settings hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size"
$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size"

View File

@ -9,7 +9,7 @@ CREATE TABLE url_na_log
ENGINE = MergeTree
PRIMARY KEY SiteId
ORDER BY (SiteId, DateVisit)
SETTINGS index_granularity = 1000, min_bytes_for_wide_part = 0;
SETTINGS index_granularity_bytes = 1000000, index_granularity = 1000, min_bytes_for_wide_part = 0;
CREATE ROW POLICY url_na_log_policy0 ON url_na_log FOR SELECT USING (DateVisit < '2022-08-11') OR (DateVisit > '2022-08-19') TO default;

View File

@ -0,0 +1,10 @@
number UInt64
number UInt64
number UInt64
\'Biba\' String
CAST(dummy, \'Int\') Int32
CAST(dummy, \'String\') String
0
0
55
45

View File

@ -0,0 +1,25 @@
create view paramview as select * from system.numbers where number <= {top:UInt64};
describe paramview; -- { serverError UNSUPPORTED_METHOD }
describe paramview(top = 10);
describe paramview(top = 2 + 2);
create view p2 as select number, {name:String} from system.numbers where number <= {top:UInt64};
describe p2(top = 10); -- { serverError UNKNOWN_QUERY_PARAMETER }
describe p2(name = 'Biba', top = 2);
create view p3 as select CAST(dummy, {t:String});
describe p3(t = 'Int');
describe p3(t = 'String');
describe (SELECT * FROM p3(t = 'Int64') union all SELECT * FROM p3(t = 'UInt64')); -- { serverError NO_COMMON_TYPE }
SELECT * FROM p3(t = 'String');
SELECT * FROM p3(plus(equals(equals(equals(t), equals(toLowCardinality(6) = 6, 2), top)), 'String', 6, 6), materialize(3), equals(p3(globalIn(1), p2(equals(top)), t = 'Int') = top, p3(globalIn(1), p2(equals(top)), t = 'Int'), 'Int', toNullable(toUInt256(3)), 3, 3), t = 'String') SETTINGS allow_experimental_analyzer = 1;
SELECT * FROM p3(plus(equals(equals(equals(t), equals(toLowCardinality(6) = 6, 2), top)), 'String', 6, 6), materialize(3), equals(p3(globalIn(1), p2(equals(top)), t = 'Int') = top, p3(globalIn(1), p2(equals(top)), t = 'Int'), 'Int', toNullable(toUInt256(3)), 3, 3), t = 'String') SETTINGS allow_experimental_analyzer = 0; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
select arrayReduce('sum', (select groupArray(number) from paramview(top=10)));
create view test_pv as select number from numbers({limit:UInt64});
with (select sum(number) from test_pv(limit=10)) as sm select sm;

View File

@ -1,2 +0,0 @@
99999
99999

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bash
# Tags: no-fasttest, no-parallel, use-hdfs
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "insert into table function file('test_03243.parquet', 'Parquet') select number as i from numbers(100000) settings output_format_parquet_row_group_size=10000,engine_file_truncate_on_insert=1"
$CLICKHOUSE_CLIENT -q "select max(i) from file('test_03243.parquet', 'Parquet') settings max_threads = 1;"
$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/test_03243.parquet', 'Parquet') select number as i from numbers(100000) settings output_format_parquet_row_group_size=10000,hdfs_truncate_on_insert=1;"
$CLICKHOUSE_CLIENT -q "select max(i) from hdfs('hdfs://localhost:12222/test_03243.parquet', 'Parquet') settings max_threads = 1;"

View File

@ -0,0 +1,5 @@
-- Tags: no-parallel
create user u_03254_alter_user;
alter user u_03254_alter_user; -- { clientError SYNTAX_ERROR }
drop user u_03254_alter_user;

View File

@ -0,0 +1,8 @@
1 2
1 3
1 4
1 5
1 6
1 7
1 8
1 9

View File

@ -0,0 +1,7 @@
SELECT
1 AS a,
2 AS b
ORDER BY
a ASC,
1 ASC,
b ASC WITH FILL TO 10;

View File

@ -2,6 +2,8 @@ DROP TABLE IF EXISTS src;
DROP TABLE IF EXISTS dst;
DROP TABLE IF EXISTS matview;
SET use_async_executor_for_materialized_views=1;
CREATE TABLE src (
event_time DateTime,
key UInt64,