mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-15 10:52:30 +00:00
Merge branch 'master' into ldap_fix_search_limit
This commit is contained in:
commit
9bce072ec1
@ -286,9 +286,7 @@ if __name__ == "__main__":
|
|||||||
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
||||||
"--client-option",
|
"--client-option",
|
||||||
"max_untracked_memory=1Gi",
|
"max_untracked_memory=1Gi",
|
||||||
"--client-option",
|
|
||||||
"max_memory_usage_for_user=0",
|
"max_memory_usage_for_user=0",
|
||||||
"--client-option",
|
|
||||||
"memory_profiler_step=1Gi",
|
"memory_profiler_step=1Gi",
|
||||||
# Use system database to avoid CREATE/DROP DATABASE queries
|
# Use system database to avoid CREATE/DROP DATABASE queries
|
||||||
"--database=system",
|
"--database=system",
|
||||||
|
@ -101,7 +101,7 @@ SELECT count()
|
|||||||
FROM uk_price_paid
|
FROM uk_price_paid
|
||||||
```
|
```
|
||||||
|
|
||||||
At the time this query was executed, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
At the time this query was run, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT formatReadableSize(total_bytes)
|
SELECT formatReadableSize(total_bytes)
|
||||||
@ -342,7 +342,7 @@ The result looks like:
|
|||||||
|
|
||||||
## Let's Speed Up Queries Using Projections {#speedup-with-projections}
|
## Let's Speed Up Queries Using Projections {#speedup-with-projections}
|
||||||
|
|
||||||
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At execution time, ClickHouse will use your projection if it thinks the projection can improve the performance fo the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At query time, ClickHouse will use your projection if it thinks the projection can improve the performance of the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
||||||
|
|
||||||
### Build a Projection {#build-projection}
|
### Build a Projection {#build-projection}
|
||||||
|
|
||||||
|
@ -178,7 +178,10 @@ public:
|
|||||||
func = std::forward<Function>(func),
|
func = std::forward<Function>(func),
|
||||||
args = std::make_tuple(std::forward<Args>(args)...)]() mutable /// mutable is needed to destroy capture
|
args = std::make_tuple(std::forward<Args>(args)...)]() mutable /// mutable is needed to destroy capture
|
||||||
{
|
{
|
||||||
SCOPE_EXIT(state->event.set());
|
SCOPE_EXIT(
|
||||||
|
state->thread_id = std::thread::id();
|
||||||
|
state->event.set();
|
||||||
|
);
|
||||||
|
|
||||||
state->thread_id = std::this_thread::get_id();
|
state->thread_id = std::this_thread::get_id();
|
||||||
|
|
||||||
|
@ -117,7 +117,8 @@ FilterDAGInfoPtr generateFilterActions(
|
|||||||
const StoragePtr & storage,
|
const StoragePtr & storage,
|
||||||
const StorageSnapshotPtr & storage_snapshot,
|
const StorageSnapshotPtr & storage_snapshot,
|
||||||
const StorageMetadataPtr & metadata_snapshot,
|
const StorageMetadataPtr & metadata_snapshot,
|
||||||
Names & prerequisite_columns)
|
Names & prerequisite_columns,
|
||||||
|
PreparedSetsPtr prepared_sets)
|
||||||
{
|
{
|
||||||
auto filter_info = std::make_shared<FilterDAGInfo>();
|
auto filter_info = std::make_shared<FilterDAGInfo>();
|
||||||
|
|
||||||
@ -155,7 +156,7 @@ FilterDAGInfoPtr generateFilterActions(
|
|||||||
|
|
||||||
/// Using separate expression analyzer to prevent any possible alias injection
|
/// Using separate expression analyzer to prevent any possible alias injection
|
||||||
auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot));
|
auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot));
|
||||||
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot);
|
SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot, {}, false, {}, prepared_sets);
|
||||||
filter_info->actions = analyzer.simpleSelectActions();
|
filter_info->actions = analyzer.simpleSelectActions();
|
||||||
|
|
||||||
filter_info->column_name = expr_list->children.at(0)->getColumnName();
|
filter_info->column_name = expr_list->children.at(0)->getColumnName();
|
||||||
@ -615,7 +616,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
if (row_policy_filter)
|
if (row_policy_filter)
|
||||||
{
|
{
|
||||||
filter_info = generateFilterActions(
|
filter_info = generateFilterActions(
|
||||||
table_id, row_policy_filter, context, storage, storage_snapshot, metadata_snapshot, required_columns);
|
table_id, row_policy_filter, context, storage, storage_snapshot, metadata_snapshot, required_columns,
|
||||||
|
prepared_sets);
|
||||||
|
|
||||||
query_info.filter_asts.push_back(row_policy_filter);
|
query_info.filter_asts.push_back(row_policy_filter);
|
||||||
}
|
}
|
||||||
@ -623,7 +625,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
if (query_info.additional_filter_ast)
|
if (query_info.additional_filter_ast)
|
||||||
{
|
{
|
||||||
additional_filter_info = generateFilterActions(
|
additional_filter_info = generateFilterActions(
|
||||||
table_id, query_info.additional_filter_ast, context, storage, storage_snapshot, metadata_snapshot, required_columns);
|
table_id, query_info.additional_filter_ast, context, storage, storage_snapshot, metadata_snapshot, required_columns,
|
||||||
|
prepared_sets);
|
||||||
|
|
||||||
additional_filter_info->do_remove_column = true;
|
additional_filter_info->do_remove_column = true;
|
||||||
|
|
||||||
|
@ -867,3 +867,30 @@ def test_policy_on_distributed_table_via_role():
|
|||||||
assert node.query(
|
assert node.query(
|
||||||
"SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1"
|
"SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1"
|
||||||
) == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]])
|
) == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]])
|
||||||
|
|
||||||
|
|
||||||
|
def test_row_policy_filter_with_subquery():
|
||||||
|
copy_policy_xml("no_filters.xml")
|
||||||
|
assert node.query("SHOW POLICIES") == ""
|
||||||
|
|
||||||
|
node.query("DROP ROW POLICY IF EXISTS filter_1 ON table1")
|
||||||
|
node.query("DROP TABLE IF EXISTS table_1")
|
||||||
|
node.query("DROP TABLE IF EXISTS table_2")
|
||||||
|
|
||||||
|
node.query(
|
||||||
|
"CREATE TABLE table_1 (x int, y int) ENGINE = MergeTree ORDER BY tuple()"
|
||||||
|
)
|
||||||
|
node.query("INSERT INTO table_1 SELECT number, number * number FROM numbers(10)")
|
||||||
|
|
||||||
|
node.query("CREATE TABLE table_2 (a int) ENGINE=MergeTree ORDER BY tuple()")
|
||||||
|
node.query("INSERT INTO table_2 VALUES (3), (5)")
|
||||||
|
|
||||||
|
node.query(
|
||||||
|
"CREATE ROW POLICY filter_1 ON table_1 USING x IN (SELECT a FROM table_2) TO ALL"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert node.query("SELECT * FROM table_1") == TSV([[3, 9], [5, 25]])
|
||||||
|
|
||||||
|
node.query("DROP ROW POLICY filter_1 ON table_1")
|
||||||
|
node.query("DROP TABLE table_1")
|
||||||
|
node.query("DROP TABLE table_2")
|
||||||
|
Loading…
Reference in New Issue
Block a user