mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into fp16
This commit is contained in:
commit
935694480b
@ -162,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
|
||||
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
|
||||
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
|
||||
if [ "$tries" -le "0" ]; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
echo >&2 'ClickHouse init process timeout.'
|
||||
exit 1
|
||||
fi
|
||||
tries=$(( tries-1 ))
|
||||
|
@ -46,7 +46,7 @@ Detailed table sizes with scale factor 100:
|
||||
| orders | 150.000.000 | 6.15 GB |
|
||||
| lineitem | 600.00.00 | 26.69 GB |
|
||||
|
||||
(The table sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.
|
||||
(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.)
|
||||
|
||||
Now create tables in ClickHouse.
|
||||
|
||||
|
@ -7,119 +7,4 @@ toc_hidden: true
|
||||
|
||||
# List of Aggregate Functions
|
||||
|
||||
Standard aggregate functions:
|
||||
|
||||
- [count](../reference/count.md)
|
||||
- [min](../reference/min.md)
|
||||
- [max](../reference/max.md)
|
||||
- [sum](../reference/sum.md)
|
||||
- [avg](../reference/avg.md)
|
||||
- [any](../reference/any.md)
|
||||
- [stddevPop](../reference/stddevpop.md)
|
||||
- [stddevPopStable](../reference/stddevpopstable.md)
|
||||
- [stddevSamp](../reference/stddevsamp.md)
|
||||
- [stddevSampStable](../reference/stddevsampstable.md)
|
||||
- [varPop](../reference/varpop.md)
|
||||
- [varSamp](../reference/varsamp.md)
|
||||
- [corr](../reference/corr.md)
|
||||
- [corr](../reference/corrstable.md)
|
||||
- [corrMatrix](../reference/corrmatrix.md)
|
||||
- [covarPop](../reference/covarpop.md)
|
||||
- [covarStable](../reference/covarpopstable.md)
|
||||
- [covarPopMatrix](../reference/covarpopmatrix.md)
|
||||
- [covarSamp](../reference/covarsamp.md)
|
||||
- [covarSampStable](../reference/covarsampstable.md)
|
||||
- [covarSampMatrix](../reference/covarsampmatrix.md)
|
||||
- [entropy](../reference/entropy.md)
|
||||
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
|
||||
- [intervalLengthSum](../reference/intervalLengthSum.md)
|
||||
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
|
||||
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
|
||||
- [median](../reference/median.md)
|
||||
- [rankCorr](../reference/rankCorr.md)
|
||||
- [sumKahan](../reference/sumkahan.md)
|
||||
- [studentTTest](../reference/studentttest.md)
|
||||
- [welchTTest](../reference/welchttest.md)
|
||||
|
||||
ClickHouse-specific aggregate functions:
|
||||
|
||||
- [aggThrow](../reference/aggthrow.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
- [anyLast](../reference/anylast.md)
|
||||
- [boundingRatio](../reference/boundrat.md)
|
||||
- [first_value](../reference/first_value.md)
|
||||
- [last_value](../reference/last_value.md)
|
||||
- [argMin](../reference/argmin.md)
|
||||
- [argMax](../reference/argmax.md)
|
||||
- [avgWeighted](../reference/avgweighted.md)
|
||||
- [topK](../reference/topk.md)
|
||||
- [topKWeighted](../reference/topkweighted.md)
|
||||
- [deltaSum](../reference/deltasum.md)
|
||||
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
|
||||
- [flameGraph](../reference/flame_graph.md)
|
||||
- [groupArray](../reference/grouparray.md)
|
||||
- [groupArrayLast](../reference/grouparraylast.md)
|
||||
- [groupUniqArray](../reference/groupuniqarray.md)
|
||||
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
|
||||
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
|
||||
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
|
||||
- [groupArraySample](../reference/grouparraysample.md)
|
||||
- [groupArraySorted](../reference/grouparraysorted.md)
|
||||
- [groupArrayIntersect](../reference/grouparrayintersect.md)
|
||||
- [groupBitAnd](../reference/groupbitand.md)
|
||||
- [groupBitOr](../reference/groupbitor.md)
|
||||
- [groupBitXor](../reference/groupbitxor.md)
|
||||
- [groupBitmap](../reference/groupbitmap.md)
|
||||
- [groupBitmapAnd](../reference/groupbitmapand.md)
|
||||
- [groupBitmapOr](../reference/groupbitmapor.md)
|
||||
- [groupBitmapXor](../reference/groupbitmapxor.md)
|
||||
- [sumWithOverflow](../reference/sumwithoverflow.md)
|
||||
- [sumMap](../reference/summap.md)
|
||||
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
|
||||
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
|
||||
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
|
||||
- [minMap](../reference/minmap.md)
|
||||
- [maxMap](../reference/maxmap.md)
|
||||
- [skewSamp](../reference/skewsamp.md)
|
||||
- [skewPop](../reference/skewpop.md)
|
||||
- [kurtSamp](../reference/kurtsamp.md)
|
||||
- [kurtPop](../reference/kurtpop.md)
|
||||
- [uniq](../reference/uniq.md)
|
||||
- [uniqExact](../reference/uniqexact.md)
|
||||
- [uniqCombined](../reference/uniqcombined.md)
|
||||
- [uniqCombined64](../reference/uniqcombined64.md)
|
||||
- [uniqHLL12](../reference/uniqhll12.md)
|
||||
- [uniqTheta](../reference/uniqthetasketch.md)
|
||||
- [quantile](../reference/quantile.md)
|
||||
- [quantiles](../reference/quantiles.md)
|
||||
- [quantileExact](../reference/quantileexact.md)
|
||||
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
|
||||
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
|
||||
- [quantileExactWeighted](../reference/quantileexactweighted.md)
|
||||
- [quantileTiming](../reference/quantiletiming.md)
|
||||
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
|
||||
- [quantileDeterministic](../reference/quantiledeterministic.md)
|
||||
- [quantileTDigest](../reference/quantiletdigest.md)
|
||||
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
|
||||
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
|
||||
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
|
||||
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
|
||||
- [simpleLinearRegression](../reference/simplelinearregression.md)
|
||||
- [singleValueOrNull](../reference/singlevalueornull.md)
|
||||
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
|
||||
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
|
||||
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
|
||||
- [contingency](../reference/contingency.md)
|
||||
- [cramersV](../reference/cramersv.md)
|
||||
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
|
||||
- [theilsU](../reference/theilsu.md)
|
||||
- [maxIntersections](../reference/maxintersections.md)
|
||||
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
|
||||
- [meanZTest](../reference/meanztest.md)
|
||||
- [quantileGK](../reference/quantileGK.md)
|
||||
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
|
||||
- [sparkBar](../reference/sparkbar.md)
|
||||
- [sumCount](../reference/sumcount.md)
|
||||
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
|
||||
ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions.
|
||||
|
@ -6,7 +6,9 @@ sidebar_label: AggregateFunction
|
||||
|
||||
# AggregateFunction
|
||||
|
||||
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
|
||||
Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md).
|
||||
The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix.
|
||||
To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
|
||||
|
||||
`AggregateFunction(name, types_of_arguments...)` — parametric data type.
|
||||
|
||||
|
@ -6,29 +6,8 @@ sidebar_position: 1
|
||||
|
||||
# Data Types in ClickHouse
|
||||
|
||||
ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any.
|
||||
This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md).
|
||||
|
||||
:::note
|
||||
You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table.
|
||||
:::
|
||||
|
||||
ClickHouse data types include:
|
||||
|
||||
- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`)
|
||||
- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md)
|
||||
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
|
||||
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
|
||||
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
|
||||
- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
|
||||
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
|
||||
- **UUID**: a performant option for storing [`UUID` values](./uuid.md)
|
||||
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
|
||||
- **Arrays**: any column can be defined as an [`Array` of values](./array.md)
|
||||
- **Maps**: use [`Map`](./map.md) for storing key/value pairs
|
||||
- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results
|
||||
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
|
||||
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
||||
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
|
||||
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
|
||||
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
||||
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
||||
System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an
|
||||
overview of all available data types.
|
||||
It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`).
|
||||
|
@ -7,7 +7,7 @@ keywords: [object, data type]
|
||||
|
||||
# Object Data Type (deprecated)
|
||||
|
||||
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||
**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||
|
||||
<hr />
|
||||
|
||||
|
@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction
|
||||
---
|
||||
# SimpleAggregateFunction
|
||||
|
||||
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
|
||||
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does.
|
||||
This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`.
|
||||
This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
|
||||
|
||||
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.
|
||||
|
||||
|
@ -5,70 +5,4 @@ sidebar_position: 62
|
||||
title: "Geo Functions"
|
||||
---
|
||||
|
||||
|
||||
## Geographical Coordinates Functions
|
||||
|
||||
- [greatCircleDistance](./coordinates.md#greatcircledistance)
|
||||
- [geoDistance](./coordinates.md#geodistance)
|
||||
- [greatCircleAngle](./coordinates.md#greatcircleangle)
|
||||
- [pointInEllipses](./coordinates.md#pointinellipses)
|
||||
- [pointInPolygon](./coordinates.md#pointinpolygon)
|
||||
|
||||
## Geohash Functions
|
||||
- [geohashEncode](./geohash.md#geohashencode)
|
||||
- [geohashDecode](./geohash.md#geohashdecode)
|
||||
- [geohashesInBox](./geohash.md#geohashesinbox)
|
||||
|
||||
## H3 Indexes Functions
|
||||
|
||||
- [h3IsValid](./h3.md#h3isvalid)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3EdgeAngle](./h3.md#h3edgeangle)
|
||||
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
|
||||
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
|
||||
- [geoToH3](./h3.md#geotoh3)
|
||||
- [h3ToGeo](./h3.md#h3togeo)
|
||||
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
|
||||
- [h3kRing](./h3.md#h3kring)
|
||||
- [h3GetBaseCell](./h3.md#h3getbasecell)
|
||||
- [h3HexAreaM2](./h3.md#h3hexaream2)
|
||||
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
|
||||
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
|
||||
- [h3ToChildren](./h3.md#h3tochildren)
|
||||
- [h3ToParent](./h3.md#h3toparent)
|
||||
- [h3ToString](./h3.md#h3tostring)
|
||||
- [stringToH3](./h3.md#stringtoh3)
|
||||
- [h3GetResolution](./h3.md#h3getresolution)
|
||||
- [h3IsResClassIII](./h3.md#h3isresclassiii)
|
||||
- [h3IsPentagon](./h3.md#h3ispentagon)
|
||||
- [h3GetFaces](./h3.md#h3getfaces)
|
||||
- [h3CellAreaM2](./h3.md#h3cellaream2)
|
||||
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
|
||||
- [h3ToCenterChild](./h3.md#h3tocenterchild)
|
||||
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
|
||||
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
|
||||
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
|
||||
- [h3NumHexagons](./h3.md#h3numhexagons)
|
||||
- [h3Line](./h3.md#h3line)
|
||||
- [h3Distance](./h3.md#h3distance)
|
||||
- [h3HexRing](./h3.md#h3hexring)
|
||||
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
|
||||
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
|
||||
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
|
||||
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
|
||||
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
|
||||
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
|
||||
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
|
||||
|
||||
## S2 Index Functions
|
||||
|
||||
- [geoToS2](./s2.md#geotos2)
|
||||
- [s2ToGeo](./s2.md#s2togeo)
|
||||
- [s2GetNeighbors](./s2.md#s2getneighbors)
|
||||
- [s2CellsIntersect](./s2.md#s2cellsintersect)
|
||||
- [s2CapContains](./s2.md#s2capcontains)
|
||||
- [s2CapUnion](./s2.md#s2capunion)
|
||||
- [s2RectAdd](./s2.md#s2rectadd)
|
||||
- [s2RectContains](./s2.md#s2rectcontains)
|
||||
- [s2RectUnion](./s2.md#s2rectunion)
|
||||
- [s2RectIntersection](./s2.md#s2rectintersection)
|
||||
Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md).
|
||||
|
@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul
|
||||
|
||||
## Types of Results
|
||||
|
||||
All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
|
||||
All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -6,16 +6,4 @@ sidebar_label: CREATE
|
||||
|
||||
# CREATE Queries
|
||||
|
||||
Create queries make a new entity of one of the following kinds:
|
||||
|
||||
- [DATABASE](/docs/en/sql-reference/statements/create/database.md)
|
||||
- [TABLE](/docs/en/sql-reference/statements/create/table.md)
|
||||
- [VIEW](/docs/en/sql-reference/statements/create/view.md)
|
||||
- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md)
|
||||
- [FUNCTION](/docs/en/sql-reference/statements/create/function.md)
|
||||
- [USER](/docs/en/sql-reference/statements/create/user.md)
|
||||
- [ROLE](/docs/en/sql-reference/statements/create/role.md)
|
||||
- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md)
|
||||
- [QUOTA](/docs/en/sql-reference/statements/create/quota.md)
|
||||
- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md)
|
||||
- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md)
|
||||
CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md).
|
||||
|
@ -6,27 +6,4 @@ sidebar_label: List of statements
|
||||
|
||||
# ClickHouse SQL Statements
|
||||
|
||||
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately:
|
||||
|
||||
- [SELECT](/docs/en/sql-reference/statements/select/index.md)
|
||||
- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md)
|
||||
- [CREATE](/docs/en/sql-reference/statements/create/index.md)
|
||||
- [ALTER](/docs/en/sql-reference/statements/alter/index.md)
|
||||
- [SYSTEM](/docs/en/sql-reference/statements/system.md)
|
||||
- [SHOW](/docs/en/sql-reference/statements/show.md)
|
||||
- [GRANT](/docs/en/sql-reference/statements/grant.md)
|
||||
- [REVOKE](/docs/en/sql-reference/statements/revoke.md)
|
||||
- [ATTACH](/docs/en/sql-reference/statements/attach.md)
|
||||
- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md)
|
||||
- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md)
|
||||
- [DETACH](/docs/en/sql-reference/statements/detach.md)
|
||||
- [DROP](/docs/en/sql-reference/statements/drop.md)
|
||||
- [EXISTS](/docs/en/sql-reference/statements/exists.md)
|
||||
- [KILL](/docs/en/sql-reference/statements/kill.md)
|
||||
- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md)
|
||||
- [RENAME](/docs/en/sql-reference/statements/rename.md)
|
||||
- [SET](/docs/en/sql-reference/statements/set.md)
|
||||
- [SET ROLE](/docs/en/sql-reference/statements/set-role.md)
|
||||
- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md)
|
||||
- [USE](/docs/en/sql-reference/statements/use.md)
|
||||
- [EXPLAIN](/docs/en/sql-reference/statements/explain.md)
|
||||
Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md).
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Databases/registerDatabases.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
@ -257,12 +258,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
auto overlay = std::make_shared<DatabasesOverlay>(name_, context);
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseAtomic>(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context));
|
||||
overlay->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context));
|
||||
return overlay;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
@ -811,7 +812,12 @@ void LocalServer::processConfig()
|
||||
DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase();
|
||||
|
||||
std::string default_database = server_settings[ServerSetting::default_database];
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
{
|
||||
DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context);
|
||||
if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil)
|
||||
DatabaseCatalog::instance().addUUIDMapping(uuid);
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, database);
|
||||
}
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
|
||||
if (getClientConfiguration().has("path"))
|
||||
|
@ -1,27 +1,22 @@
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <Common/EnvironmentChecks.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
|
||||
#include <new>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility> /// pair
|
||||
|
||||
#include <fmt/format.h>
|
||||
#if defined(SANITIZE_COVERAGE)
|
||||
# include <Common/Coverage.h>
|
||||
#endif
|
||||
|
||||
#include "config.h"
|
||||
#include "config_tools.h"
|
||||
|
||||
#include <Common/EnvironmentChecks.h>
|
||||
#include <Common/Coverage.h>
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/getHashOfLoadedBinary.h>
|
||||
#include <Common/IO.h>
|
||||
|
||||
#include <base/phdr_cache.h>
|
||||
#include <base/coverage.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <new>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility> /// pair
|
||||
#include <vector>
|
||||
|
||||
/// Universal executable for various clickhouse applications
|
||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||
@ -238,9 +233,12 @@ int main(int argc_, char ** argv_)
|
||||
/// clickhouse # spawn local
|
||||
/// clickhouse local # spawn local
|
||||
/// clickhouse "select ..." # spawn local
|
||||
/// clickhouse /tmp/repro --enable-analyzer
|
||||
///
|
||||
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'
|
||||
|| std::string_view(argv[1]).contains(' ')))
|
||||
std::error_code ec;
|
||||
if (main_func == printHelp && !argv.empty()
|
||||
&& (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ')
|
||||
|| std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec)))
|
||||
{
|
||||
main_func = mainEntryClickHouseLocal;
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <Client/ClientApplicationBase.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -108,6 +110,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
|
||||
{
|
||||
/// Two special cases for better usability:
|
||||
/// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1"
|
||||
/// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql)
|
||||
/// These are relevant for interactive usage - user-friendly, but questionable in general.
|
||||
/// In case of ambiguity or for scripts, prefer using proper options.
|
||||
|
||||
@ -115,8 +118,11 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de
|
||||
po::variable_value value(boost::any(op.value), false);
|
||||
|
||||
const char * option;
|
||||
std::error_code ec;
|
||||
if (token.contains(' '))
|
||||
option = "query";
|
||||
else if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec))
|
||||
option = "queries-file";
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token);
|
||||
|
||||
|
@ -2892,6 +2892,9 @@ Possible values:
|
||||
**See Also**
|
||||
|
||||
- [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order)
|
||||
)", 0) \
|
||||
DECLARE(Bool, read_in_order_use_virtual_row, false, R"(
|
||||
Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched.
|
||||
)", 0) \
|
||||
DECLARE(Bool, optimize_read_in_window_order, true, R"(
|
||||
Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.
|
||||
|
@ -78,6 +78,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
|
||||
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
|
||||
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
|
||||
{"read_in_order_use_virtual_row", false, false, "Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched."},
|
||||
}
|
||||
},
|
||||
{"24.10",
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DDLTask.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -19,6 +18,7 @@
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Core/Settings.h>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
@ -60,9 +60,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c
|
||||
, db_uuid(uuid)
|
||||
{
|
||||
assert(db_uuid != UUIDHelpers::Nil);
|
||||
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
|
||||
fs::create_directories(path_to_table_symlinks);
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
||||
DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_)
|
||||
@ -70,6 +67,20 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C
|
||||
{
|
||||
}
|
||||
|
||||
void DatabaseAtomic::createDirectories()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
createDirectoriesUnlocked();
|
||||
}
|
||||
|
||||
void DatabaseAtomic::createDirectoriesUnlocked()
|
||||
{
|
||||
DatabaseOnDisk::createDirectoriesUnlocked();
|
||||
fs::create_directories(fs::path(getContext()->getPath()) / "metadata");
|
||||
fs::create_directories(path_to_table_symlinks);
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
||||
String DatabaseAtomic::getTableDataPath(const String & table_name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
@ -108,6 +119,7 @@ void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name,
|
||||
assert(relative_table_path != data_path && !relative_table_path.empty());
|
||||
DetachedTables not_in_use;
|
||||
std::lock_guard lock(mutex);
|
||||
createDirectoriesUnlocked();
|
||||
not_in_use = cleanupDetachedTables();
|
||||
auto table_id = table->getStorageID();
|
||||
assertDetachedTableNotInUse(table_id.uuid);
|
||||
@ -208,11 +220,15 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
if (exchange && !supportsAtomicRename(&message))
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported because exchanging files is not supported by the OS ({})", message);
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
auto & other_db = dynamic_cast<DatabaseAtomic &>(to_database);
|
||||
bool inside_database = this == &other_db;
|
||||
|
||||
if (!inside_database)
|
||||
other_db.createDirectories();
|
||||
|
||||
String old_metadata_path = getObjectMetadataPath(table_name);
|
||||
String new_metadata_path = to_database.getObjectMetadataPath(to_table_name);
|
||||
|
||||
@ -333,6 +349,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
const String & table_metadata_tmp_path, const String & table_metadata_path,
|
||||
ContextPtr query_context)
|
||||
{
|
||||
createDirectories();
|
||||
DetachedTables not_in_use;
|
||||
auto table_data_path = getTableDataPath(query);
|
||||
try
|
||||
@ -469,6 +486,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin
|
||||
if (mode < LoadingStrictnessLevel::FORCE_RESTORE)
|
||||
return;
|
||||
|
||||
if (!fs::exists(path_to_table_symlinks))
|
||||
return;
|
||||
|
||||
/// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken
|
||||
for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks))
|
||||
{
|
||||
@ -611,6 +631,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new
|
||||
{
|
||||
/// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
bool check_ref_deps = query_context->getSettingsRef()[Setting::check_referential_table_dependencies];
|
||||
@ -702,4 +723,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory)
|
||||
};
|
||||
factory.registerDatabase("Atomic", create_fn);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -76,6 +76,9 @@ protected:
|
||||
using DetachedTables = std::unordered_map<UUID, StoragePtr>;
|
||||
[[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex);
|
||||
|
||||
void createDirectories();
|
||||
void createDirectoriesUnlocked() TSA_REQUIRES(mutex);
|
||||
|
||||
void tryCreateMetadataSymlink();
|
||||
|
||||
virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; }
|
||||
|
@ -47,6 +47,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_,
|
||||
: DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_)
|
||||
, expiration_time(expiration_time_)
|
||||
{
|
||||
createDirectories();
|
||||
}
|
||||
|
||||
|
||||
|
@ -180,7 +180,18 @@ DatabaseOnDisk::DatabaseOnDisk(
|
||||
, metadata_path(metadata_path_)
|
||||
, data_path(data_path_)
|
||||
{
|
||||
fs::create_directories(local_context->getPath() + data_path);
|
||||
}
|
||||
|
||||
|
||||
void DatabaseOnDisk::createDirectories()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
createDirectoriesUnlocked();
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::createDirectoriesUnlocked()
|
||||
{
|
||||
fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path);
|
||||
fs::create_directories(metadata_path);
|
||||
}
|
||||
|
||||
@ -198,6 +209,8 @@ void DatabaseOnDisk::createTable(
|
||||
const StoragePtr & table,
|
||||
const ASTPtr & query)
|
||||
{
|
||||
createDirectories();
|
||||
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
const auto & create = query->as<ASTCreateQuery &>();
|
||||
assert(table_name == create.getTable());
|
||||
@ -265,7 +278,6 @@ void DatabaseOnDisk::createTable(
|
||||
}
|
||||
|
||||
commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context);
|
||||
|
||||
removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false);
|
||||
}
|
||||
|
||||
@ -293,6 +305,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
{
|
||||
try
|
||||
{
|
||||
createDirectories();
|
||||
|
||||
/// Add a table to the map of known tables.
|
||||
attachTable(query_context, query.getTable(), table, getTableDataPath(query));
|
||||
|
||||
@ -426,6 +440,7 @@ void DatabaseOnDisk::renameTable(
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported");
|
||||
}
|
||||
|
||||
createDirectories();
|
||||
waitDatabaseStarted();
|
||||
|
||||
auto table_data_relative_path = getTableDataPath(table_name);
|
||||
@ -621,6 +636,9 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n
|
||||
|
||||
void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const
|
||||
{
|
||||
if (!fs::exists(metadata_path))
|
||||
return;
|
||||
|
||||
auto process_tmp_drop_metadata_file = [&](const String & file_name)
|
||||
{
|
||||
assert(getUUID() == UUIDHelpers::Nil);
|
||||
|
@ -99,6 +99,9 @@ protected:
|
||||
virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach);
|
||||
virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {}
|
||||
|
||||
void createDirectories();
|
||||
void createDirectoriesUnlocked() TSA_REQUIRES(mutex);
|
||||
|
||||
const String metadata_path;
|
||||
const String data_path;
|
||||
};
|
||||
|
@ -416,6 +416,7 @@ public:
|
||||
std::lock_guard lock{mutex};
|
||||
return database_name;
|
||||
}
|
||||
|
||||
/// Get UUID of database.
|
||||
virtual UUID getUUID() const { return UUIDHelpers::Nil; }
|
||||
|
||||
|
@ -62,6 +62,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL(
|
||||
, settings(std::move(settings_))
|
||||
, materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get())
|
||||
{
|
||||
createDirectories();
|
||||
}
|
||||
|
||||
DatabaseMaterializedMySQL::~DatabaseMaterializedMySQL() = default;
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include <Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreePartLevelInfo.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include <Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreePartLevelInfo.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,29 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/Chunk.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// To carry part level if chunk is produced by a merge tree source
|
||||
class MergeTreePartLevelInfo : public ChunkInfoCloneable<MergeTreePartLevelInfo>
|
||||
{
|
||||
public:
|
||||
MergeTreePartLevelInfo() = delete;
|
||||
explicit MergeTreePartLevelInfo(ssize_t part_level)
|
||||
: origin_merge_tree_part_level(part_level)
|
||||
{ }
|
||||
MergeTreePartLevelInfo(const MergeTreePartLevelInfo & other) = default;
|
||||
|
||||
size_t origin_merge_tree_part_level = 0;
|
||||
};
|
||||
|
||||
inline size_t getPartLevelFromChunk(const Chunk & chunk)
|
||||
{
|
||||
const auto part_level_info = chunk.getChunkInfos().get<MergeTreePartLevelInfo>();
|
||||
if (part_level_info)
|
||||
return part_level_info->origin_merge_tree_part_level;
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
86
src/Processors/Merges/Algorithms/MergeTreeReadInfo.h
Normal file
86
src/Processors/Merges/Algorithms/MergeTreeReadInfo.h
Normal file
@ -0,0 +1,86 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/Chunk.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/// To carry part level and virtual row if chunk is produced by a merge tree source
|
||||
class MergeTreeReadInfo : public ChunkInfoCloneable<MergeTreeReadInfo>
|
||||
{
|
||||
public:
|
||||
MergeTreeReadInfo() = delete;
|
||||
explicit MergeTreeReadInfo(size_t part_level) :
|
||||
origin_merge_tree_part_level(part_level) {}
|
||||
explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_) :
|
||||
origin_merge_tree_part_level(part_level), pk_block(pk_block_), virtual_row_conversions(std::move(virtual_row_conversions_)) {}
|
||||
MergeTreeReadInfo(const MergeTreeReadInfo & other) = default;
|
||||
|
||||
size_t origin_merge_tree_part_level = 0;
|
||||
|
||||
/// If is virtual_row, block should not be empty.
|
||||
Block pk_block;
|
||||
ExpressionActionsPtr virtual_row_conversions;
|
||||
};
|
||||
|
||||
inline size_t getPartLevelFromChunk(const Chunk & chunk)
|
||||
{
|
||||
const auto read_info = chunk.getChunkInfos().get<MergeTreeReadInfo>();
|
||||
if (read_info)
|
||||
return read_info->origin_merge_tree_part_level;
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline bool isVirtualRow(const Chunk & chunk)
|
||||
{
|
||||
const auto read_info = chunk.getChunkInfos().get<MergeTreeReadInfo>();
|
||||
if (read_info)
|
||||
return read_info->pk_block.columns() > 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline void setVirtualRow(Chunk & chunk, const Block & header, bool apply_virtual_row_conversions)
|
||||
{
|
||||
auto read_info = chunk.getChunkInfos().get<MergeTreeReadInfo>();
|
||||
chassert(read_info);
|
||||
|
||||
Block & pk_block = read_info->pk_block;
|
||||
|
||||
// std::cerr << apply_virtual_row_conversions << std::endl;
|
||||
// std::cerr << read_info->virtual_row_conversions->dumpActions() << std::endl;
|
||||
|
||||
if (apply_virtual_row_conversions)
|
||||
read_info->virtual_row_conversions->execute(pk_block);
|
||||
|
||||
// std::cerr << "++++" << pk_block.dumpStructure() << std::endl;
|
||||
|
||||
Columns ordered_columns;
|
||||
ordered_columns.reserve(pk_block.columns());
|
||||
|
||||
for (size_t i = 0; i < header.columns(); ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & col = header.getByPosition(i);
|
||||
if (const auto * pk_col = pk_block.findByName(col.name))
|
||||
{
|
||||
if (!col.type->equals(*pk_col->type))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Virtual row has different type for {}. Expected {}, got {}",
|
||||
col.name, col.dumpStructure(), pk_col->dumpStructure());
|
||||
|
||||
ordered_columns.push_back(pk_col->column);
|
||||
}
|
||||
else
|
||||
ordered_columns.push_back(col.type->createColumnConstWithDefaultValue(1));
|
||||
}
|
||||
|
||||
chunk.setColumns(ordered_columns, 1);
|
||||
}
|
||||
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
#include <Processors/Merges/Algorithms/MergingSortedAlgorithm.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
@ -16,12 +17,14 @@ MergingSortedAlgorithm::MergingSortedAlgorithm(
|
||||
SortingQueueStrategy sorting_queue_strategy_,
|
||||
UInt64 limit_,
|
||||
WriteBuffer * out_row_sources_buf_,
|
||||
bool use_average_block_sizes)
|
||||
bool use_average_block_sizes,
|
||||
bool apply_virtual_row_conversions_)
|
||||
: header(std::move(header_))
|
||||
, merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_)
|
||||
, description(description_)
|
||||
, limit(limit_)
|
||||
, out_row_sources_buf(out_row_sources_buf_)
|
||||
, apply_virtual_row_conversions(apply_virtual_row_conversions_)
|
||||
, current_inputs(num_inputs)
|
||||
, sorting_queue_strategy(sorting_queue_strategy_)
|
||||
, cursors(num_inputs)
|
||||
@ -49,6 +52,15 @@ void MergingSortedAlgorithm::addInput()
|
||||
|
||||
void MergingSortedAlgorithm::initialize(Inputs inputs)
|
||||
{
|
||||
for (auto & input : inputs)
|
||||
{
|
||||
if (!isVirtualRow(input.chunk))
|
||||
continue;
|
||||
|
||||
setVirtualRow(input.chunk, header, apply_virtual_row_conversions);
|
||||
input.skip_last_row = true;
|
||||
}
|
||||
|
||||
removeConstAndSparse(inputs);
|
||||
merged_data.initialize(header, inputs);
|
||||
current_inputs = std::move(inputs);
|
||||
|
@ -22,7 +22,8 @@ public:
|
||||
SortingQueueStrategy sorting_queue_strategy_,
|
||||
UInt64 limit_ = 0,
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false);
|
||||
bool use_average_block_sizes = false,
|
||||
bool apply_virtual_row_conversions_ = true);
|
||||
|
||||
void addInput();
|
||||
|
||||
@ -47,6 +48,8 @@ private:
|
||||
/// If it is not nullptr then it should be populated during execution
|
||||
WriteBuffer * out_row_sources_buf = nullptr;
|
||||
|
||||
bool apply_virtual_row_conversions;
|
||||
|
||||
/// Chunks currently being merged.
|
||||
Inputs current_inputs;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
#include <Processors/Merges/IMergingTransform.h>
|
||||
|
||||
namespace DB
|
||||
@ -101,11 +102,16 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs()
|
||||
/// setNotNeeded after reading first chunk, because in optimismtic case
|
||||
/// (e.g. with optimized 'ORDER BY primary_key LIMIT n' and small 'n')
|
||||
/// we won't have to read any chunks anymore;
|
||||
auto chunk = input.pull(limit_hint != 0);
|
||||
if ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end)
|
||||
/// If virtual row exists, let it pass through, so don't read more chunks.
|
||||
auto chunk = input.pull(true);
|
||||
bool virtual_row = isVirtualRow(chunk);
|
||||
if (limit_hint == 0 && !virtual_row)
|
||||
input.setNeeded();
|
||||
|
||||
if (!chunk.hasRows())
|
||||
if (!virtual_row && ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end))
|
||||
input.setNeeded();
|
||||
|
||||
if (!virtual_row && !chunk.hasRows())
|
||||
{
|
||||
if (!input.isFinished())
|
||||
{
|
||||
|
@ -22,6 +22,7 @@ MergingSortedTransform::MergingSortedTransform(
|
||||
bool always_read_till_end_,
|
||||
WriteBuffer * out_row_sources_buf_,
|
||||
bool use_average_block_sizes,
|
||||
bool apply_virtual_row_conversions,
|
||||
bool have_all_inputs_)
|
||||
: IMergingTransform(
|
||||
num_inputs,
|
||||
@ -38,7 +39,8 @@ MergingSortedTransform::MergingSortedTransform(
|
||||
sorting_queue_strategy,
|
||||
limit_,
|
||||
out_row_sources_buf_,
|
||||
use_average_block_sizes)
|
||||
use_average_block_sizes,
|
||||
apply_virtual_row_conversions)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ public:
|
||||
bool always_read_till_end_ = false,
|
||||
WriteBuffer * out_row_sources_buf_ = nullptr,
|
||||
bool use_average_block_sizes = false,
|
||||
bool apply_virtual_row_conversions = true,
|
||||
bool have_all_inputs_ = true);
|
||||
|
||||
String getName() const override { return "MergingSortedTransform"; }
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Processors/QueryPlan/BufferChunksTransform.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -48,14 +49,27 @@ IProcessor::Status BufferChunksTransform::prepare()
|
||||
}
|
||||
else if (input.hasData())
|
||||
{
|
||||
auto chunk = pullChunk();
|
||||
bool virtual_row;
|
||||
auto chunk = pullChunk(virtual_row);
|
||||
output.push(std::move(chunk));
|
||||
if (virtual_row)
|
||||
{
|
||||
input.setNotNeeded();
|
||||
return Status::PortFull;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (input.hasData() && (num_buffered_rows < max_rows_to_buffer || num_buffered_bytes < max_bytes_to_buffer))
|
||||
{
|
||||
auto chunk = pullChunk();
|
||||
bool virtual_row;
|
||||
auto chunk = pullChunk(virtual_row);
|
||||
if (virtual_row)
|
||||
{
|
||||
output.push(std::move(chunk));
|
||||
input.setNotNeeded();
|
||||
return Status::PortFull;
|
||||
}
|
||||
num_buffered_rows += chunk.getNumRows();
|
||||
num_buffered_bytes += chunk.bytes();
|
||||
chunks.push(std::move(chunk));
|
||||
@ -71,10 +85,12 @@ IProcessor::Status BufferChunksTransform::prepare()
|
||||
return Status::NeedData;
|
||||
}
|
||||
|
||||
Chunk BufferChunksTransform::pullChunk()
|
||||
Chunk BufferChunksTransform::pullChunk(bool & virtual_row)
|
||||
{
|
||||
auto chunk = input.pull();
|
||||
num_processed_rows += chunk.getNumRows();
|
||||
virtual_row = isVirtualRow(chunk);
|
||||
if (!virtual_row)
|
||||
num_processed_rows += chunk.getNumRows();
|
||||
|
||||
if (limit && num_processed_rows >= limit)
|
||||
input.close();
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
String getName() const override { return "BufferChunks"; }
|
||||
|
||||
private:
|
||||
Chunk pullChunk();
|
||||
Chunk pullChunk(bool & virtual_row);
|
||||
|
||||
InputPort & input;
|
||||
OutputPort & output;
|
||||
|
@ -211,6 +211,8 @@ MatchedTrees::Matches matchTrees(const ActionsDAG::NodeRawConstPtrs & inner_dag,
|
||||
MatchedTrees::Monotonicity monotonicity;
|
||||
monotonicity.direction *= info.is_positive ? 1 : -1;
|
||||
monotonicity.strict = info.is_strict;
|
||||
monotonicity.child_match = &child_match;
|
||||
monotonicity.child_node = monotonic_child;
|
||||
|
||||
if (child_match.monotonicity)
|
||||
{
|
||||
|
@ -22,12 +22,16 @@ namespace DB
|
||||
/// DAG for PK does not contain aliases and ambiguous nodes.
|
||||
struct MatchedTrees
|
||||
{
|
||||
struct Match;
|
||||
|
||||
/// Monotonicity is calculated for monotonic functions chain.
|
||||
/// Chain is not strict if there is any non-strict monotonic function.
|
||||
struct Monotonicity
|
||||
{
|
||||
int direction = 1;
|
||||
bool strict = true;
|
||||
const Match * child_match = nullptr;
|
||||
const ActionsDAG::Node * child_node = nullptr;
|
||||
};
|
||||
|
||||
struct Match
|
||||
|
@ -124,7 +124,7 @@ SortingProperty applyOrder(QueryPlan::Node * parent, SortingProperty * propertie
|
||||
auto common_prefix = commonPrefix(properties->sort_description, sorting_step->getSortDescription());
|
||||
if (!common_prefix.empty())
|
||||
/// Buffering is useful for reading from MergeTree, and it is applied in optimizeReadInOrder only.
|
||||
sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false);
|
||||
sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false, false);
|
||||
}
|
||||
|
||||
auto scope = sorting_step->hasPartitions() ? SortingProperty::SortScope::Stream : SortingProperty::SortScope::Global;
|
||||
|
@ -324,12 +324,43 @@ void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
|
||||
}
|
||||
}
|
||||
|
||||
const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match, const std::string & input_name)
|
||||
{
|
||||
if (!match->monotonicity)
|
||||
return &dag.addInput(input_name, node->result_type);
|
||||
|
||||
if (node->type == ActionsDAG::ActionType::ALIAS)
|
||||
return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match, input_name), node->result_name);
|
||||
|
||||
ActionsDAG::NodeRawConstPtrs args;
|
||||
args.reserve(node->children.size());
|
||||
for (const auto * child : node->children)
|
||||
{
|
||||
if (child == match->monotonicity->child_node)
|
||||
args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match, input_name));
|
||||
else
|
||||
args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name}));
|
||||
}
|
||||
|
||||
return &dag.addFunction(node->function_base, std::move(args), {});
|
||||
}
|
||||
|
||||
struct SortingInputOrder
|
||||
{
|
||||
InputOrderInfoPtr input_order{};
|
||||
/// This is needed for virtual row optimization.
|
||||
/// Convert the PR values to ORDER BY key.
|
||||
/// If empty, the optimization cannot be applied.
|
||||
std::optional<ActionsDAG> virtual_row_conversion{};
|
||||
};
|
||||
|
||||
/// For the case when the order of keys is important (ORDER BY keys).
|
||||
InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
SortingInputOrder buildInputOrderFromSortDescription(
|
||||
const FixedColumns & fixed_columns,
|
||||
const std::optional<ActionsDAG> & dag,
|
||||
const SortDescription & description,
|
||||
const KeyDescription & sorting_key,
|
||||
const Names & pk_column_names,
|
||||
size_t limit)
|
||||
{
|
||||
//std::cerr << "------- buildInputOrderInfo " << std::endl;
|
||||
@ -369,6 +400,18 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
size_t next_description_column = 0;
|
||||
size_t next_sort_key = 0;
|
||||
|
||||
bool can_optimize_virtual_row = true;
|
||||
|
||||
struct MatchInfo
|
||||
{
|
||||
const ActionsDAG::Node * source = nullptr;
|
||||
const ActionsDAG::Node * fixed_column = nullptr;
|
||||
const MatchedTrees::Match * monotonic = nullptr;
|
||||
};
|
||||
|
||||
std::vector<MatchInfo> match_infos;
|
||||
match_infos.reserve(description.size());
|
||||
|
||||
while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size())
|
||||
{
|
||||
const auto & sorting_key_column = sorting_key.column_names[next_sort_key];
|
||||
@ -410,6 +453,7 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
|
||||
//std::cerr << "====== (no dag) Found direct match" << std::endl;
|
||||
|
||||
match_infos.push_back({.source = sort_column_node});
|
||||
++next_description_column;
|
||||
++next_sort_key;
|
||||
}
|
||||
@ -438,24 +482,46 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
{
|
||||
current_direction *= match.monotonicity->direction;
|
||||
strict_monotonic = match.monotonicity->strict;
|
||||
match_infos.push_back({.source = sort_node, .monotonic = &match});
|
||||
}
|
||||
else
|
||||
match_infos.push_back({.source = sort_node});
|
||||
|
||||
++next_description_column;
|
||||
++next_sort_key;
|
||||
}
|
||||
else if (fixed_key_columns.contains(sort_column_node))
|
||||
{
|
||||
|
||||
if (next_sort_key == 0)
|
||||
{
|
||||
// Disable virtual row optimization.
|
||||
// For example, when pk is (a,b), a = 1, order by b, virtual row should be
|
||||
// disabled in the following case:
|
||||
// 1st part (0, 100), (1, 2), (1, 3), (1, 4)
|
||||
// 2nd part (0, 100), (1, 2), (1, 3), (1, 4).
|
||||
|
||||
can_optimize_virtual_row = false;
|
||||
}
|
||||
|
||||
//std::cerr << "+++++++++ Found fixed key by match" << std::endl;
|
||||
++next_sort_key;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
//std::cerr << "====== Check for fixed const : " << bool(sort_node->column) << " fixed : " << fixed_columns.contains(sort_node) << std::endl;
|
||||
bool is_fixed_column = sort_node->column || fixed_columns.contains(sort_node);
|
||||
if (!is_fixed_column)
|
||||
break;
|
||||
|
||||
if (!sort_node->column)
|
||||
/// Virtual row for fixed column from order by is not supported now.
|
||||
/// TODO: we can do it for the simple case,
|
||||
/// But it's better to remove fixed columns from ORDER BY completely, e.g:
|
||||
/// WHERE x = 42 ORDER BY x, y => WHERE x = 42 ORDER BY y
|
||||
can_optimize_virtual_row = false;
|
||||
|
||||
match_infos.push_back({.source = sort_node, .fixed_column = sort_node});
|
||||
order_key_prefix_descr.push_back(sort_column_description);
|
||||
++next_description_column;
|
||||
}
|
||||
@ -477,9 +543,46 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
}
|
||||
|
||||
if (read_direction == 0 || order_key_prefix_descr.empty())
|
||||
return nullptr;
|
||||
return {};
|
||||
|
||||
return std::make_shared<InputOrderInfo>(order_key_prefix_descr, next_sort_key, read_direction, limit);
|
||||
/// If the prefix description is used, we can't restore the full description from PK value.
|
||||
/// TODO: partial sort description can be used as well. Implement support later.
|
||||
if (order_key_prefix_descr.size() < description.size() || pk_column_names.size() < next_sort_key)
|
||||
can_optimize_virtual_row = false;
|
||||
|
||||
auto order_info = std::make_shared<InputOrderInfo>(order_key_prefix_descr, next_sort_key, read_direction, limit);
|
||||
|
||||
std::optional<ActionsDAG> virtual_row_conversion;
|
||||
if (can_optimize_virtual_row)
|
||||
{
|
||||
ActionsDAG virtual_row_dag;
|
||||
virtual_row_dag.getOutputs().reserve(match_infos.size());
|
||||
size_t next_pk_name = 0;
|
||||
for (const auto & info : match_infos)
|
||||
{
|
||||
const ActionsDAG::Node * output;
|
||||
if (info.fixed_column)
|
||||
output = &virtual_row_dag.addColumn({info.fixed_column->column, info.fixed_column->result_type, info.fixed_column->result_name});
|
||||
else
|
||||
{
|
||||
if (info.monotonic)
|
||||
output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic, pk_column_names[next_pk_name]);
|
||||
else
|
||||
{
|
||||
output = &virtual_row_dag.addInput(pk_column_names[next_pk_name], info.source->result_type);
|
||||
if (pk_column_names[next_pk_name] != info.source->result_name)
|
||||
output = &virtual_row_dag.addAlias(*output, info.source->result_name);
|
||||
}
|
||||
|
||||
++next_pk_name;
|
||||
}
|
||||
|
||||
virtual_row_dag.getOutputs().push_back(output);
|
||||
}
|
||||
virtual_row_conversion = std::move(virtual_row_dag);
|
||||
}
|
||||
|
||||
return {std::move(order_info), std::move(virtual_row_conversion)};
|
||||
}
|
||||
|
||||
/// We may need a few different sort descriptions here.
|
||||
@ -689,7 +792,7 @@ InputOrder buildInputOrderFromUnorderedKeys(
|
||||
return { std::move(input_order), std::move(sort_description) }; // std::move(group_by_sort_description) };
|
||||
}
|
||||
|
||||
InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
SortingInputOrder buildInputOrderFromSortDescription(
|
||||
const ReadFromMergeTree * reading,
|
||||
const FixedColumns & fixed_columns,
|
||||
const std::optional<ActionsDAG> & dag,
|
||||
@ -697,15 +800,17 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
size_t limit)
|
||||
{
|
||||
const auto & sorting_key = reading->getStorageMetadata()->getSortingKey();
|
||||
const auto & pk_column_names = reading->getStorageMetadata()->getPrimaryKey().column_names;
|
||||
|
||||
return buildInputOrderFromSortDescription(
|
||||
fixed_columns,
|
||||
dag, description,
|
||||
sorting_key,
|
||||
pk_column_names,
|
||||
limit);
|
||||
}
|
||||
|
||||
InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
SortingInputOrder buildInputOrderFromSortDescription(
|
||||
ReadFromMerge * merge,
|
||||
const FixedColumns & fixed_columns,
|
||||
const std::optional<ActionsDAG> & dag,
|
||||
@ -714,28 +819,31 @@ InputOrderInfoPtr buildInputOrderFromSortDescription(
|
||||
{
|
||||
const auto & tables = merge->getSelectedTables();
|
||||
|
||||
InputOrderInfoPtr order_info;
|
||||
SortingInputOrder order_info;
|
||||
for (const auto & table : tables)
|
||||
{
|
||||
auto storage = std::get<StoragePtr>(table);
|
||||
const auto & sorting_key = storage->getInMemoryMetadataPtr()->getSortingKey();
|
||||
auto metadata = storage->getInMemoryMetadataPtr();
|
||||
const auto & sorting_key = metadata->getSortingKey();
|
||||
// const auto & pk_column_names = metadata->getPrimaryKey().column_names;
|
||||
|
||||
if (sorting_key.column_names.empty())
|
||||
return nullptr;
|
||||
return {};
|
||||
|
||||
auto table_order_info = buildInputOrderFromSortDescription(
|
||||
fixed_columns,
|
||||
dag, description,
|
||||
sorting_key,
|
||||
{},
|
||||
limit);
|
||||
|
||||
if (!table_order_info)
|
||||
return nullptr;
|
||||
if (!table_order_info.input_order)
|
||||
return {};
|
||||
|
||||
if (!order_info)
|
||||
order_info = table_order_info;
|
||||
else if (*order_info != *table_order_info)
|
||||
return nullptr;
|
||||
if (!order_info.input_order)
|
||||
order_info = std::move(table_order_info);
|
||||
else if (*order_info.input_order != *table_order_info.input_order)
|
||||
return {};
|
||||
}
|
||||
|
||||
return order_info;
|
||||
@ -791,7 +899,7 @@ InputOrder buildInputOrderFromUnorderedKeys(
|
||||
return order_info;
|
||||
}
|
||||
|
||||
InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & node)
|
||||
InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, bool & apply_virtual_row, QueryPlan::Node & node)
|
||||
{
|
||||
QueryPlan::Node * reading_node = findReadingStep(node, /*allow_existing_order=*/ false);
|
||||
if (!reading_node)
|
||||
@ -815,14 +923,21 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n
|
||||
dag, description,
|
||||
limit);
|
||||
|
||||
if (order_info)
|
||||
if (order_info.input_order)
|
||||
{
|
||||
bool can_read = reading->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit);
|
||||
apply_virtual_row = order_info.virtual_row_conversion != std::nullopt;
|
||||
|
||||
bool can_read = reading->requestReadingInOrder(
|
||||
order_info.input_order->used_prefix_of_sorting_key_size,
|
||||
order_info.input_order->direction,
|
||||
order_info.input_order->limit,
|
||||
std::move(order_info.virtual_row_conversion));
|
||||
|
||||
if (!can_read)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return order_info;
|
||||
return order_info.input_order;
|
||||
}
|
||||
if (auto * merge = typeid_cast<ReadFromMerge *>(reading_node->step.get()))
|
||||
{
|
||||
@ -832,14 +947,14 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n
|
||||
dag, description,
|
||||
limit);
|
||||
|
||||
if (order_info)
|
||||
if (order_info.input_order)
|
||||
{
|
||||
bool can_read = merge->requestReadingInOrder(order_info);
|
||||
bool can_read = merge->requestReadingInOrder(order_info.input_order);
|
||||
if (!can_read)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return order_info;
|
||||
return order_info.input_order;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
@ -873,7 +988,8 @@ InputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPlan::Node &
|
||||
bool can_read = reading->requestReadingInOrder(
|
||||
order_info.input_order->used_prefix_of_sorting_key_size,
|
||||
order_info.input_order->direction,
|
||||
order_info.input_order->limit);
|
||||
order_info.input_order->limit,
|
||||
{});
|
||||
if (!can_read)
|
||||
return {};
|
||||
}
|
||||
@ -962,7 +1078,7 @@ InputOrder buildInputOrderInfo(DistinctStep & distinct, QueryPlan::Node & node)
|
||||
if (!reading->requestReadingInOrder(
|
||||
order_info.input_order->used_prefix_of_sorting_key_size,
|
||||
order_info.input_order->direction,
|
||||
order_info.input_order->limit))
|
||||
order_info.input_order->limit, {}))
|
||||
return {};
|
||||
|
||||
return order_info;
|
||||
@ -1014,6 +1130,8 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
|
||||
if (sorting->getType() != SortingStep::Type::Full)
|
||||
return;
|
||||
|
||||
bool apply_virtual_row = false;
|
||||
|
||||
if (typeid_cast<UnionStep *>(node.children.front()->step.get()))
|
||||
{
|
||||
auto & union_node = node.children.front();
|
||||
@ -1036,7 +1154,7 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
|
||||
|
||||
for (auto * child : union_node->children)
|
||||
{
|
||||
infos.push_back(buildInputOrderInfo(*sorting, *child));
|
||||
infos.push_back(buildInputOrderInfo(*sorting, apply_virtual_row, *child));
|
||||
|
||||
if (infos.back())
|
||||
{
|
||||
@ -1088,13 +1206,13 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
|
||||
}
|
||||
}
|
||||
|
||||
sorting->convertToFinishSorting(*max_sort_descr, use_buffering);
|
||||
sorting->convertToFinishSorting(*max_sort_descr, use_buffering, false);
|
||||
}
|
||||
else if (auto order_info = buildInputOrderInfo(*sorting, *node.children.front()))
|
||||
else if (auto order_info = buildInputOrderInfo(*sorting, apply_virtual_row, *node.children.front()))
|
||||
{
|
||||
/// Use buffering only if have filter or don't have limit.
|
||||
bool use_buffering = order_info->limit == 0;
|
||||
sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering);
|
||||
sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering, apply_virtual_row);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1233,10 +1351,10 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node,
|
||||
|
||||
if (order_info)
|
||||
{
|
||||
bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit);
|
||||
bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit, {});
|
||||
if (!can_read)
|
||||
return 0;
|
||||
sorting->convertToFinishSorting(order_info->sort_description_for_merging, false);
|
||||
sorting->convertToFinishSorting(order_info->sort_description_for_merging, false, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <Processors/Transforms/FilterTransform.h>
|
||||
#include <Processors/Transforms/ReverseTransform.h>
|
||||
#include <Processors/Transforms/SelectByIndicesTransform.h>
|
||||
#include <Processors/Transforms/VirtualRowTransform.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h>
|
||||
@ -176,6 +177,7 @@ namespace Setting
|
||||
extern const SettingsBool use_skip_indexes_if_final;
|
||||
extern const SettingsBool use_uncompressed_cache;
|
||||
extern const SettingsUInt64 merge_tree_min_read_task_size;
|
||||
extern const SettingsBool read_in_order_use_virtual_row;
|
||||
}
|
||||
|
||||
namespace MergeTreeSetting
|
||||
@ -680,7 +682,34 @@ Pipe ReadFromMergeTree::readInOrder(
|
||||
if (set_total_rows_approx)
|
||||
source->addTotalRowsApprox(total_rows);
|
||||
|
||||
pipes.emplace_back(std::move(source));
|
||||
Pipe pipe(source);
|
||||
|
||||
if (virtual_row_conversion && (read_type == ReadType::InOrder))
|
||||
{
|
||||
const auto & index = part_with_ranges.data_part->getIndex();
|
||||
const auto & primary_key = storage_snapshot->metadata->primary_key;
|
||||
size_t mark_range_begin = part_with_ranges.ranges.front().begin;
|
||||
|
||||
ColumnsWithTypeAndName pk_columns;
|
||||
size_t num_columns = virtual_row_conversion->getRequiredColumnsWithTypes().size();
|
||||
pk_columns.reserve(num_columns);
|
||||
|
||||
for (size_t j = 0; j < num_columns; ++j)
|
||||
{
|
||||
auto column = primary_key.data_types[j]->createColumn()->cloneEmpty();
|
||||
column->insert((*(*index)[j])[mark_range_begin]);
|
||||
pk_columns.push_back({std::move(column), primary_key.data_types[j], primary_key.column_names[j]});
|
||||
}
|
||||
|
||||
Block pk_block(std::move(pk_columns));
|
||||
|
||||
pipe.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<VirtualRowTransform>(header, pk_block, virtual_row_conversion);
|
||||
});
|
||||
}
|
||||
|
||||
pipes.emplace_back(std::move(pipe));
|
||||
}
|
||||
|
||||
auto pipe = Pipe::unitePipes(std::move(pipes));
|
||||
@ -1140,7 +1169,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder(
|
||||
if (pipe.numOutputPorts() > 1)
|
||||
{
|
||||
auto transform = std::make_shared<MergingSortedTransform>(
|
||||
pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch);
|
||||
pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch,
|
||||
0, false, nullptr, false, /*apply_virtual_row_conversions*/ false);
|
||||
|
||||
pipe.addTransform(std::move(transform));
|
||||
}
|
||||
@ -1799,7 +1829,7 @@ void ReadFromMergeTree::updateSortDescription()
|
||||
enable_vertical_final);
|
||||
}
|
||||
|
||||
bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit)
|
||||
bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit, std::optional<ActionsDAG> virtual_row_conversion_)
|
||||
{
|
||||
/// if dirction is not set, use current one
|
||||
if (!direction)
|
||||
@ -1822,6 +1852,10 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction,
|
||||
/// Let prefer in-order optimization over vertical FINAL for now
|
||||
enable_vertical_final = false;
|
||||
|
||||
/// Disable virtual row for FINAL.
|
||||
if (virtual_row_conversion_ && !isQueryWithFinal() && context->getSettingsRef()[Setting::read_in_order_use_virtual_row])
|
||||
virtual_row_conversion = std::make_shared<ExpressionActions>(std::move(*virtual_row_conversion_));
|
||||
|
||||
updateSortDescription();
|
||||
|
||||
return true;
|
||||
@ -2238,6 +2272,12 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const
|
||||
expression->describeActions(format_settings.out, prefix);
|
||||
}
|
||||
}
|
||||
|
||||
if (virtual_row_conversion)
|
||||
{
|
||||
format_settings.out << prefix << "Virtual row conversions" << '\n';
|
||||
virtual_row_conversion->describeActions(format_settings.out, prefix);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const
|
||||
@ -2277,6 +2317,9 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const
|
||||
|
||||
map.add("Prewhere info", std::move(prewhere_info_map));
|
||||
}
|
||||
|
||||
if (virtual_row_conversion)
|
||||
map.add("Virtual row conversions", virtual_row_conversion->toTree());
|
||||
}
|
||||
|
||||
void ReadFromMergeTree::describeIndexes(FormatSettings & format_settings) const
|
||||
|
@ -187,7 +187,7 @@ public:
|
||||
StorageMetadataPtr getStorageMetadata() const { return storage_snapshot->metadata; }
|
||||
|
||||
/// Returns `false` if requested reading cannot be performed.
|
||||
bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit);
|
||||
bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit, std::optional<ActionsDAG> virtual_row_conversion_);
|
||||
bool readsInOrder() const;
|
||||
const InputOrderInfoPtr & getInputOrder() const { return query_info.input_order_info; }
|
||||
const SortDescription & getSortDescription() const override { return result_sort_description; }
|
||||
@ -281,6 +281,9 @@ private:
|
||||
std::optional<MergeTreeReadTaskCallback> read_task_callback;
|
||||
bool enable_vertical_final = false;
|
||||
bool enable_remove_parts_from_snapshot_optimization = true;
|
||||
|
||||
ExpressionActionsPtr virtual_row_conversion;
|
||||
|
||||
std::optional<size_t> number_of_current_replica;
|
||||
};
|
||||
|
||||
|
@ -145,11 +145,12 @@ void SortingStep::updateLimit(size_t limit_)
|
||||
}
|
||||
}
|
||||
|
||||
void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_)
|
||||
void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_, bool apply_virtual_row_conversions_)
|
||||
{
|
||||
type = Type::FinishSorting;
|
||||
prefix_description = std::move(prefix_description_);
|
||||
use_buffering = use_buffering_;
|
||||
apply_virtual_row_conversions = apply_virtual_row_conversions_;
|
||||
}
|
||||
|
||||
void SortingStep::scatterByPartitionIfNeeded(QueryPipelineBuilder& pipeline)
|
||||
@ -253,7 +254,10 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr
|
||||
/*max_block_size_bytes=*/0,
|
||||
SortingQueueStrategy::Batch,
|
||||
limit_,
|
||||
always_read_till_end);
|
||||
always_read_till_end,
|
||||
nullptr,
|
||||
false,
|
||||
apply_virtual_row_conversions);
|
||||
|
||||
pipeline.addTransform(std::move(transform));
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
|
||||
bool isSortingForMergeJoin() const { return is_sorting_for_merge_join; }
|
||||
|
||||
void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_);
|
||||
void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_, bool apply_virtual_row_conversions_);
|
||||
|
||||
Type getType() const { return type; }
|
||||
const Settings & getSettings() const { return sort_settings; }
|
||||
@ -134,6 +134,7 @@ private:
|
||||
UInt64 limit;
|
||||
bool always_read_till_end = false;
|
||||
bool use_buffering = false;
|
||||
bool apply_virtual_row_conversions = false;
|
||||
|
||||
Settings sort_settings;
|
||||
};
|
||||
|
@ -187,6 +187,7 @@ void MergeSortingTransform::consume(Chunk chunk)
|
||||
{
|
||||
bool have_all_inputs = false;
|
||||
bool use_average_block_sizes = false;
|
||||
bool apply_virtual_row = false;
|
||||
|
||||
external_merging_sorted = std::make_shared<MergingSortedTransform>(
|
||||
header_without_constants,
|
||||
@ -199,6 +200,7 @@ void MergeSortingTransform::consume(Chunk chunk)
|
||||
/*always_read_till_end_=*/ false,
|
||||
nullptr,
|
||||
use_average_block_sizes,
|
||||
apply_virtual_row,
|
||||
have_all_inputs);
|
||||
|
||||
processors.emplace_back(external_merging_sorted);
|
||||
|
111
src/Processors/Transforms/VirtualRowTransform.cpp
Normal file
111
src/Processors/Transforms/VirtualRowTransform.cpp
Normal file
@ -0,0 +1,111 @@
|
||||
#include <Processors/Transforms/VirtualRowTransform.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_)
|
||||
: IProcessor({header_}, {header_})
|
||||
, input(inputs.front()), output(outputs.front())
|
||||
, pk_block(pk_block_)
|
||||
, virtual_row_conversions(std::move(virtual_row_conversions_))
|
||||
{
|
||||
}
|
||||
|
||||
VirtualRowTransform::Status VirtualRowTransform::prepare()
|
||||
{
|
||||
/// Check can output.
|
||||
|
||||
if (output.isFinished())
|
||||
{
|
||||
input.close();
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
if (!output.canPush())
|
||||
{
|
||||
input.setNotNeeded();
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
/// Output if has data.
|
||||
if (generated)
|
||||
{
|
||||
output.push(std::move(current_chunk));
|
||||
generated = false;
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
if (can_generate)
|
||||
return Status::Ready;
|
||||
|
||||
/// Check can input.
|
||||
if (!has_input)
|
||||
{
|
||||
if (input.isFinished())
|
||||
{
|
||||
output.finish();
|
||||
return Status::Finished;
|
||||
}
|
||||
|
||||
input.setNeeded();
|
||||
|
||||
if (!input.hasData())
|
||||
return Status::NeedData;
|
||||
|
||||
/// Set input port NotNeeded after chunk was pulled.
|
||||
current_chunk = input.pull(true);
|
||||
has_input = true;
|
||||
}
|
||||
|
||||
/// Now transform.
|
||||
return Status::Ready;
|
||||
}
|
||||
|
||||
void VirtualRowTransform::work()
|
||||
{
|
||||
if (can_generate)
|
||||
{
|
||||
if (generated)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it already was generated");
|
||||
|
||||
generated = true;
|
||||
can_generate = false;
|
||||
|
||||
if (!is_first)
|
||||
{
|
||||
if (current_chunk.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform");
|
||||
return;
|
||||
}
|
||||
|
||||
is_first = false;
|
||||
|
||||
Columns empty_columns;
|
||||
const auto & header = getOutputs().front().getHeader();
|
||||
empty_columns.reserve(header.columns());
|
||||
for (size_t i = 0; i < header.columns(); ++i)
|
||||
{
|
||||
const ColumnWithTypeAndName & type_and_name = header.getByPosition(i);
|
||||
empty_columns.push_back(type_and_name.type->createColumn()->cloneEmpty());
|
||||
}
|
||||
|
||||
current_chunk.setColumns(empty_columns, 0);
|
||||
current_chunk.getChunkInfos().add(std::make_shared<MergeTreeReadInfo>(0, pk_block, virtual_row_conversions));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!has_input)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it wasn't read");
|
||||
|
||||
has_input = false;
|
||||
can_generate = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
35
src/Processors/Transforms/VirtualRowTransform.h
Normal file
35
src/Processors/Transforms/VirtualRowTransform.h
Normal file
@ -0,0 +1,35 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <Storages/KeyDescription.h>
|
||||
#include <Storages/MergeTree/IMergeTreeDataPart.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Virtual row is useful for read-in-order optimization when multiple parts exist.
|
||||
class VirtualRowTransform : public IProcessor
|
||||
{
|
||||
public:
|
||||
explicit VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_);
|
||||
|
||||
String getName() const override { return "VirtualRowTransform"; }
|
||||
|
||||
Status prepare() override;
|
||||
void work() override;
|
||||
|
||||
private:
|
||||
InputPort & input;
|
||||
OutputPort & output;
|
||||
|
||||
Chunk current_chunk;
|
||||
bool has_input = false;
|
||||
bool generated = false;
|
||||
bool can_generate = true;
|
||||
bool is_first = true;
|
||||
|
||||
Block pk_block;
|
||||
ExpressionActionsPtr virtual_row_conversions;
|
||||
};
|
||||
|
||||
}
|
@ -1161,6 +1161,7 @@ bool KeyCondition::tryPrepareSetIndex(
|
||||
const RPNBuilderFunctionTreeNode & func,
|
||||
RPNElement & out,
|
||||
size_t & out_key_column_num,
|
||||
bool & allow_constant_transformation,
|
||||
bool & is_constant_transformed)
|
||||
{
|
||||
const auto & left_arg = func.getArgumentAt(0);
|
||||
@ -1187,7 +1188,9 @@ bool KeyCondition::tryPrepareSetIndex(
|
||||
set_transforming_chains.push_back(set_transforming_chain);
|
||||
}
|
||||
// For partition index, checking if set can be transformed to prune any partitions
|
||||
else if (single_point && canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain))
|
||||
else if (
|
||||
single_point && allow_constant_transformation
|
||||
&& canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain))
|
||||
{
|
||||
indexes_mapping.push_back(index_mapping);
|
||||
data_types.push_back(data_type);
|
||||
@ -1957,7 +1960,7 @@ bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNEleme
|
||||
|
||||
if (functionIsInOrGlobalInOperator(func_name))
|
||||
{
|
||||
if (tryPrepareSetIndex(func, out, key_column_num, is_constant_transformed))
|
||||
if (tryPrepareSetIndex(func, out, key_column_num, allow_constant_transformation, is_constant_transformed))
|
||||
{
|
||||
key_arg_pos = 0;
|
||||
is_set_const = true;
|
||||
|
@ -312,6 +312,7 @@ private:
|
||||
const RPNBuilderFunctionTreeNode & func,
|
||||
RPNElement & out,
|
||||
size_t & out_key_column_num,
|
||||
bool & allow_constant_transformation,
|
||||
bool & is_constant_transformed);
|
||||
|
||||
/// Checks that the index can not be used.
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <Common/ElapsedTimeProfileEventIncrement.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreePartLevelInfo.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <Processors/Chunk.h>
|
||||
@ -203,7 +203,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read()
|
||||
|
||||
auto chunk = Chunk(ordered_columns, res.row_count);
|
||||
if (add_part_level)
|
||||
chunk.getChunkInfos().add(std::make_shared<MergeTreePartLevelInfo>(task->getInfo().data_part->info.level));
|
||||
chunk.getChunkInfos().add(std::make_shared<MergeTreeReadInfo>(task->getInfo().data_part->info.level));
|
||||
|
||||
return ChunkAndProgress{
|
||||
.chunk = std::move(chunk),
|
||||
|
@ -14,9 +14,10 @@
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/FilterStep.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreePartLevelInfo.h>
|
||||
#include <Processors/Merges/Algorithms/MergeTreeReadInfo.h>
|
||||
#include <Storages/MergeTree/checkDataPart.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -271,7 +272,7 @@ try
|
||||
|
||||
auto result = Chunk(std::move(res_columns), rows_read);
|
||||
if (add_part_level)
|
||||
result.getChunkInfos().add(std::make_shared<MergeTreePartLevelInfo>(data_part->info.level));
|
||||
result.getChunkInfos().add(std::make_shared<MergeTreeReadInfo>(data_part->info.level));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -1563,7 +1563,7 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_)
|
||||
auto request_read_in_order = [order_info_](ReadFromMergeTree & read_from_merge_tree)
|
||||
{
|
||||
return read_from_merge_tree.requestReadingInOrder(
|
||||
order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit);
|
||||
order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit, {});
|
||||
};
|
||||
|
||||
bool ok = true;
|
||||
|
@ -34,8 +34,9 @@ from typing import List, Optional
|
||||
|
||||
import __main__
|
||||
|
||||
from ci_buddy import CIBuddy
|
||||
from ci_config import Labels
|
||||
from env_helper import TEMP_PATH
|
||||
from env_helper import IS_CI, TEMP_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from git_helper import GIT_PREFIX, git_runner, is_shallow
|
||||
from github_helper import GitHub, PullRequest, PullRequests, Repository
|
||||
@ -97,7 +98,7 @@ close it.
|
||||
self.pr = pr
|
||||
self.repo = repo
|
||||
|
||||
self.cherrypick_branch = f"cherrypick/{name}/{pr.merge_commit_sha}"
|
||||
self.cherrypick_branch = f"cherrypick/{name}/{pr.number}"
|
||||
self.backport_branch = f"backport/{name}/{pr.number}"
|
||||
self.cherrypick_pr = None # type: Optional[PullRequest]
|
||||
self.backport_pr = None # type: Optional[PullRequest]
|
||||
@ -653,6 +654,14 @@ def main():
|
||||
bp.process_backports()
|
||||
if bp.error is not None:
|
||||
logging.error("Finished successfully, but errors occurred!")
|
||||
if IS_CI:
|
||||
ci_buddy = CIBuddy()
|
||||
ci_buddy.post_job_error(
|
||||
f"The cherry-pick finished with errors: {bp.error}",
|
||||
with_instance_info=True,
|
||||
with_wf_link=True,
|
||||
critical=True,
|
||||
)
|
||||
raise bp.error
|
||||
|
||||
|
||||
|
@ -3,14 +3,13 @@ import json
|
||||
import os
|
||||
from typing import Dict, List, Union
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from ci_config import CI
|
||||
from ci_utils import WithIter
|
||||
from commit_status_helper import get_commit_filtered_statuses, get_repo
|
||||
from get_robot_token import get_best_robot_token
|
||||
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
|
||||
from github_helper import GitHub
|
||||
from pr_info import PRInfo
|
||||
|
||||
@ -89,15 +88,9 @@ class CIBuddy:
|
||||
def _get_webhooks():
|
||||
name = "ci_buddy_web_hooks"
|
||||
|
||||
session = boto3.Session(region_name="us-east-1") # Replace with your region
|
||||
ssm_client = session.client("ssm")
|
||||
json_string = None
|
||||
try:
|
||||
response = ssm_client.get_parameter(
|
||||
Name=name,
|
||||
WithDecryption=True, # Set to True if the parameter is a SecureString
|
||||
)
|
||||
json_string = response["Parameter"]["Value"]
|
||||
json_string = get_parameter_from_ssm(name, decrypt=True)
|
||||
except ClientError as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
|
@ -1000,6 +1000,9 @@ def test_max_set_age(started_cluster):
|
||||
assert "Cannot parse input" in node.query(
|
||||
f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'"
|
||||
)
|
||||
assert "Cannot parse input" in node.query(
|
||||
f"SELECT exception FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' ORDER BY processing_end_time DESC LIMIT 1"
|
||||
)
|
||||
|
||||
assert 1 == int(
|
||||
node.query(
|
||||
|
@ -86,11 +86,17 @@
|
||||
ReadType: InOrder
|
||||
Parts: 1
|
||||
Granules: 3
|
||||
Virtual row conversions
|
||||
Actions: INPUT :: 0 -> x UInt32 : 0
|
||||
Positions: 0
|
||||
-----------------
|
||||
ReadFromMergeTree (default.test_index)
|
||||
ReadType: InReverseOrder
|
||||
Parts: 1
|
||||
Granules: 3
|
||||
Virtual row conversions
|
||||
Actions: INPUT :: 0 -> x UInt32 : 0
|
||||
Positions: 0
|
||||
ReadFromMergeTree (default.idx)
|
||||
Indexes:
|
||||
PrimaryKey
|
||||
@ -174,11 +180,19 @@
|
||||
ReadType: InOrder
|
||||
Parts: 1
|
||||
Granules: 3
|
||||
Virtual row conversions
|
||||
Actions: INPUT : 0 -> x UInt32 : 0
|
||||
ALIAS x :: 0 -> __table1.x UInt32 : 1
|
||||
Positions: 1
|
||||
-----------------
|
||||
ReadFromMergeTree (default.test_index)
|
||||
ReadType: InReverseOrder
|
||||
Parts: 1
|
||||
Granules: 3
|
||||
Virtual row conversions
|
||||
Actions: INPUT : 0 -> x UInt32 : 0
|
||||
ALIAS x :: 0 -> __table1.x UInt32 : 1
|
||||
Positions: 1
|
||||
ReadFromMergeTree (default.idx)
|
||||
Indexes:
|
||||
PrimaryKey
|
||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
for i in $(seq 0 1)
|
||||
do
|
||||
CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --enable_analyzer=$i"
|
||||
CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --read_in_order_use_virtual_row=1 --enable_analyzer=$i"
|
||||
|
||||
$CH_CLIENT -q "drop table if exists test_index"
|
||||
$CH_CLIENT -q "drop table if exists idx"
|
||||
|
@ -172,7 +172,8 @@ ExpressionTransform
|
||||
(Expression)
|
||||
ExpressionTransform
|
||||
(ReadFromMergeTree)
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
2020-10-10 00:00:00 0.01
|
||||
2020-10-10 00:00:00 0.01
|
||||
2020-10-10 00:00:00 0.01
|
||||
@ -186,7 +187,8 @@ ExpressionTransform
|
||||
(Expression)
|
||||
ExpressionTransform
|
||||
(ReadFromMergeTree)
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
2020-10-10 00:00:00 0.01
|
||||
2020-10-10 00:00:00 0.01
|
||||
2020-10-10 00:00:00 0.01
|
||||
|
@ -2,6 +2,7 @@ SET max_threads=0;
|
||||
SET optimize_read_in_order=1;
|
||||
SET optimize_trivial_insert_select = 1;
|
||||
SET read_in_order_two_level_merge_threshold=100;
|
||||
SET read_in_order_use_virtual_row = 1;
|
||||
|
||||
DROP TABLE IF EXISTS t_read_in_order;
|
||||
|
||||
|
@ -0,0 +1,42 @@
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
16384
|
||||
========
|
||||
16385
|
||||
16386
|
||||
16387
|
||||
16388
|
||||
24576
|
||||
========
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
16384
|
||||
========
|
||||
16385
|
||||
16386
|
||||
16387
|
||||
16388
|
||||
24576
|
||||
========
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
1 3
|
||||
1 4
|
||||
1 4
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
1 3
|
||||
1 4
|
||||
1 4
|
||||
========
|
||||
1 3
|
||||
1 2
|
||||
1 1
|
||||
-- test distinct ----
|
||||
0
|
@ -0,0 +1,218 @@
|
||||
|
||||
SET read_in_order_use_virtual_row = 1;
|
||||
|
||||
DROP TABLE IF EXISTS t;
|
||||
|
||||
CREATE TABLE t
|
||||
(
|
||||
`x` UInt64,
|
||||
`y` UInt64,
|
||||
`z` UInt64,
|
||||
`k` UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (x, y, z)
|
||||
SETTINGS index_granularity = 8192,
|
||||
index_granularity_bytes = 10485760;
|
||||
|
||||
SYSTEM STOP MERGES t;
|
||||
|
||||
INSERT INTO t SELECT
|
||||
number,
|
||||
number,
|
||||
number,
|
||||
number
|
||||
FROM numbers(8192 * 3);
|
||||
|
||||
INSERT INTO t SELECT
|
||||
number + (8192 * 3),
|
||||
number + (8192 * 3),
|
||||
number + (8192 * 3),
|
||||
number
|
||||
FROM numbers(8192 * 3);
|
||||
|
||||
-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192),
|
||||
-- both chunks come from the same part.
|
||||
SELECT x
|
||||
FROM t
|
||||
ORDER BY x ASC
|
||||
LIMIT 4
|
||||
SETTINGS max_block_size = 8192,
|
||||
read_in_order_two_level_merge_threshold = 0, --force preliminary merge
|
||||
max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
log_comment = 'preliminary merge, no filter';
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT read_rows
|
||||
FROM system.query_log
|
||||
WHERE current_database = currentDatabase()
|
||||
AND log_comment = 'preliminary merge, no filter'
|
||||
AND type = 'QueryFinish'
|
||||
ORDER BY query_start_time DESC
|
||||
limit 1;
|
||||
|
||||
SELECT '========';
|
||||
-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192),
|
||||
-- all chunks come from the same part.
|
||||
SELECT k
|
||||
FROM t
|
||||
WHERE k > 8192 * 2
|
||||
ORDER BY x ASC
|
||||
LIMIT 4
|
||||
SETTINGS max_block_size = 8192,
|
||||
read_in_order_two_level_merge_threshold = 0, --force preliminary merge
|
||||
max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
log_comment = 'preliminary merge with filter';
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT read_rows
|
||||
FROM system.query_log
|
||||
WHERE current_database = currentDatabase()
|
||||
AND log_comment = 'preliminary merge with filter'
|
||||
AND type = 'QueryFinish'
|
||||
ORDER BY query_start_time DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT '========';
|
||||
-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192),
|
||||
-- both chunks come from the same part.
|
||||
SELECT x
|
||||
FROM t
|
||||
ORDER BY x ASC
|
||||
LIMIT 4
|
||||
SETTINGS max_block_size = 8192,
|
||||
read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge
|
||||
max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
log_comment = 'no preliminary merge, no filter';
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT read_rows
|
||||
FROM system.query_log
|
||||
WHERE current_database = currentDatabase()
|
||||
AND log_comment = 'no preliminary merge, no filter'
|
||||
AND type = 'QueryFinish'
|
||||
ORDER BY query_start_time DESC
|
||||
LIMIT 1;
|
||||
|
||||
SELECT '========';
|
||||
-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192),
|
||||
-- all chunks come from the same part.
|
||||
SELECT k
|
||||
FROM t
|
||||
WHERE k > 8192 * 2
|
||||
ORDER BY x ASC
|
||||
LIMIT 4
|
||||
SETTINGS max_block_size = 8192,
|
||||
read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge
|
||||
max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
log_comment = 'no preliminary merge, with filter';
|
||||
|
||||
SYSTEM FLUSH LOGS;
|
||||
|
||||
SELECT read_rows
|
||||
FROM system.query_log
|
||||
WHERE current_database = currentDatabase()
|
||||
AND log_comment = 'no preliminary merge, with filter'
|
||||
AND type = 'QueryFinish'
|
||||
ORDER BY query_start_time DESC
|
||||
LIMIT 1;
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
SELECT '========';
|
||||
-- from 02149_read_in_order_fixed_prefix
|
||||
DROP TABLE IF EXISTS fixed_prefix;
|
||||
|
||||
CREATE TABLE fixed_prefix(a UInt32, b UInt32)
|
||||
ENGINE = MergeTree ORDER BY (a, b)
|
||||
SETTINGS index_granularity = 3;
|
||||
|
||||
SYSTEM STOP MERGES fixed_prefix;
|
||||
|
||||
INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5);
|
||||
INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5);
|
||||
|
||||
SELECT a, b
|
||||
FROM fixed_prefix
|
||||
WHERE a = 1
|
||||
ORDER BY b
|
||||
SETTINGS max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
read_in_order_two_level_merge_threshold = 0; --force preliminary merge
|
||||
|
||||
SELECT a, b
|
||||
FROM fixed_prefix
|
||||
WHERE a = 1
|
||||
ORDER BY b
|
||||
SETTINGS max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge
|
||||
|
||||
DROP TABLE fixed_prefix;
|
||||
|
||||
SELECT '========';
|
||||
DROP TABLE IF EXISTS function_pk;
|
||||
|
||||
CREATE TABLE function_pk
|
||||
(
|
||||
`A` Int64,
|
||||
`B` Int64
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY (A, -B)
|
||||
SETTINGS index_granularity = 1;
|
||||
|
||||
SYSTEM STOP MERGES function_pk;
|
||||
|
||||
INSERT INTO function_pk values(1,1);
|
||||
INSERT INTO function_pk values(1,3);
|
||||
INSERT INTO function_pk values(1,2);
|
||||
|
||||
SELECT *
|
||||
FROM function_pk
|
||||
ORDER BY (A,-B) ASC
|
||||
limit 3
|
||||
SETTINGS max_threads = 1,
|
||||
optimize_read_in_order = 1,
|
||||
read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge
|
||||
|
||||
DROP TABLE function_pk;
|
||||
|
||||
-- modified from 02317_distinct_in_order_optimization
|
||||
SELECT '-- test distinct ----';
|
||||
|
||||
DROP TABLE IF EXISTS distinct_in_order SYNC;
|
||||
|
||||
CREATE TABLE distinct_in_order
|
||||
(
|
||||
`a` int,
|
||||
`b` int,
|
||||
`c` int
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (a, b)
|
||||
SETTINGS index_granularity = 8192,
|
||||
index_granularity_bytes = '10Mi';
|
||||
|
||||
SYSTEM STOP MERGES distinct_in_order;
|
||||
|
||||
INSERT INTO distinct_in_order SELECT
|
||||
number % number,
|
||||
number % 5,
|
||||
number % 10
|
||||
FROM numbers(1, 1000000);
|
||||
|
||||
SELECT DISTINCT a
|
||||
FROM distinct_in_order
|
||||
ORDER BY a ASC
|
||||
SETTINGS read_in_order_two_level_merge_threshold = 0,
|
||||
optimize_read_in_order = 1,
|
||||
max_threads = 2;
|
||||
|
||||
DROP TABLE distinct_in_order;
|
@ -0,0 +1,25 @@
|
||||
(Expression)
|
||||
ExpressionTransform
|
||||
(Sorting)
|
||||
MergingSortedTransform 4 → 1
|
||||
(Expression)
|
||||
ExpressionTransform × 4
|
||||
(ReadFromMergeTree)
|
||||
ExpressionTransform × 5
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
MergingSortedTransform 2 → 1
|
||||
ExpressionTransform × 2
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
MergingSortedTransform 2 → 1
|
||||
ExpressionTransform × 2
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
||||
ExpressionTransform
|
||||
VirtualRowTransform
|
||||
MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1
|
@ -0,0 +1,26 @@
|
||||
-- Tags: no-random-merge-tree-settings, no-object-storage
|
||||
|
||||
SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000, read_in_order_use_virtual_row = 1;
|
||||
|
||||
DROP TABLE IF EXISTS tab;
|
||||
|
||||
CREATE TABLE tab
|
||||
(
|
||||
`t` DateTime
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY t
|
||||
SETTINGS index_granularity = 1;
|
||||
|
||||
SYSTEM STOP MERGES tab;
|
||||
|
||||
INSERT INTO tab SELECT toDateTime('2024-01-10') + number FROM numbers(10000);
|
||||
INSERT INTO tab SELECT toDateTime('2024-01-30') + number FROM numbers(10000);
|
||||
INSERT INTO tab SELECT toDateTime('2024-01-20') + number FROM numbers(10000);
|
||||
|
||||
EXPLAIN PIPELINE
|
||||
SELECT *
|
||||
FROM tab
|
||||
ORDER BY t ASC
|
||||
SETTINGS read_in_order_two_level_merge_threshold = 0, max_threads = 4, read_in_order_use_buffering = 0
|
||||
FORMAT tsv;
|
@ -0,0 +1,2 @@
|
||||
dist
|
||||
src
|
@ -0,0 +1,21 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
-- modified from test_01155_ordinary, to test special optimization path for virtual row
|
||||
DROP DATABASE IF EXISTS test_03031;
|
||||
|
||||
CREATE DATABASE test_03031;
|
||||
|
||||
USE test_03031;
|
||||
|
||||
SET read_in_order_use_virtual_row = 1;
|
||||
|
||||
CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s;
|
||||
INSERT INTO src(s) VALUES ('before moving tables');
|
||||
CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src);
|
||||
|
||||
SET enable_analyzer=0;
|
||||
SELECT _table FROM merge('test_03031', '') ORDER BY _table, s;
|
||||
|
||||
DROP TABLE src;
|
||||
DROP TABLE dist;
|
||||
DROP DATABASE test_03031;
|
@ -0,0 +1,6 @@
|
||||
123
|
||||
Hello
|
||||
['Hello','world']
|
||||
Hello
|
||||
Hello
|
||||
['Hello','world']
|
24
tests/queries/0_stateless/03199_atomic_clickhouse_local.sh
Executable file
24
tests/queries/0_stateless/03199_atomic_clickhouse_local.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_LOCAL} -n "
|
||||
CREATE TABLE test (x UInt8) ORDER BY x;
|
||||
INSERT INTO test VALUES (123);
|
||||
SELECT * FROM test;
|
||||
CREATE OR REPLACE TABLE test (s String) ORDER BY s;
|
||||
INSERT INTO test VALUES ('Hello');
|
||||
SELECT * FROM test;
|
||||
RENAME TABLE test TO test2;
|
||||
CREATE OR REPLACE TABLE test (s Array(String)) ORDER BY s;
|
||||
INSERT INTO test VALUES (['Hello', 'world']);
|
||||
SELECT * FROM test;
|
||||
SELECT * FROM test2;
|
||||
EXCHANGE TABLES test AND test2;
|
||||
SELECT * FROM test;
|
||||
SELECT * FROM test2;
|
||||
DROP TABLE test;
|
||||
DROP TABLE test2;
|
||||
"
|
@ -0,0 +1,11 @@
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
Hello from a file
|
||||
max_local_read_bandwidth 1 100
|
||||
max_local_read_bandwidth 1 200
|
33
tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh
Executable file
33
tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-random-settings
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
FILE=${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}_without_extension
|
||||
echo "SELECT 'Hello from a file'" > ${FILE}
|
||||
|
||||
# Queries can be read from a file.
|
||||
${CLICKHOUSE_BINARY} --queries-file ${FILE}
|
||||
|
||||
# Or from stdin.
|
||||
${CLICKHOUSE_BINARY} < ${FILE}
|
||||
|
||||
# Also the positional argument can be interpreted as a file.
|
||||
${CLICKHOUSE_BINARY} ${FILE}
|
||||
|
||||
${CLICKHOUSE_LOCAL} --queries-file ${FILE}
|
||||
${CLICKHOUSE_LOCAL} < ${FILE}
|
||||
${CLICKHOUSE_LOCAL} ${FILE}
|
||||
|
||||
${CLICKHOUSE_CLIENT} --queries-file ${FILE}
|
||||
${CLICKHOUSE_CLIENT} < ${FILE}
|
||||
${CLICKHOUSE_CLIENT} ${FILE}
|
||||
|
||||
# Check that positional arguments work in any place
|
||||
echo "Select name, changed, value FROM system.settings where name = 'max_local_read_bandwidth'" > ${FILE}
|
||||
${CLICKHOUSE_BINARY} ${FILE} --max-local-read-bandwidth 100
|
||||
${CLICKHOUSE_BINARY} --max-local-read-bandwidth 200 ${FILE}
|
||||
|
||||
rm ${FILE}
|
@ -0,0 +1,13 @@
|
||||
-- Monotonic function in partition key
|
||||
48
|
||||
48
|
||||
-- Non-monotonic function in partition key
|
||||
48
|
||||
48
|
||||
-- Multiple partition columns
|
||||
50
|
||||
50
|
||||
96
|
||||
96
|
||||
98
|
||||
98
|
81
tests/queries/0_stateless/03269_partition_key_not_in_set.sql
Normal file
81
tests/queries/0_stateless/03269_partition_key_not_in_set.sql
Normal file
@ -0,0 +1,81 @@
|
||||
-- Related to https://github.com/ClickHouse/ClickHouse/issues/69829
|
||||
--
|
||||
-- The main goal of the test is to assert that constant transformation
|
||||
-- for set constant while partition pruning won't be performed
|
||||
-- if it's not allowed (NOT IN operator case)
|
||||
|
||||
DROP TABLE IF EXISTS 03269_filters;
|
||||
CREATE TABLE 03269_filters (
|
||||
id Int32,
|
||||
dt Date
|
||||
)
|
||||
engine = MergeTree
|
||||
order by id;
|
||||
|
||||
INSERT INTO 03269_filters
|
||||
SELECT 6, '2020-01-01'
|
||||
UNION ALL
|
||||
SELECT 38, '2021-01-01';
|
||||
|
||||
SELECT '-- Monotonic function in partition key';
|
||||
|
||||
DROP TABLE IF EXISTS 03269_single_monotonic;
|
||||
CREATE TABLE 03269_single_monotonic(
|
||||
id Int32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY intDiv(id, 10)
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO 03269_single_monotonic SELECT number FROM numbers(50);
|
||||
|
||||
SELECT count() FROM 03269_single_monotonic WHERE id NOT IN (6, 38);
|
||||
SELECT count() FROM 03269_single_monotonic WHERE id NOT IN (
|
||||
SELECT id FROM 03269_filters
|
||||
);
|
||||
|
||||
DROP TABLE 03269_single_monotonic;
|
||||
|
||||
SELECT '-- Non-monotonic function in partition key';
|
||||
|
||||
DROP TABLE IF EXISTS 03269_single_non_monotonic;
|
||||
CREATE TABLE 03269_single_non_monotonic (
|
||||
id Int32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY id % 10
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO 03269_single_non_monotonic SELECT number FROM numbers(50);
|
||||
|
||||
SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (6, 38);
|
||||
SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (SELECT id FROM 03269_filters);
|
||||
|
||||
DROP TABLE 03269_single_non_monotonic;
|
||||
|
||||
SELECT '-- Multiple partition columns';
|
||||
|
||||
DROP TABLE IF EXISTS 03269_multiple_part_cols;
|
||||
CREATE TABLE 03269_multiple_part_cols (
|
||||
id Int32,
|
||||
dt Date,
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY (dt, intDiv(id, 10))
|
||||
ORDER BY id;
|
||||
|
||||
INSERT INTO 03269_multiple_part_cols
|
||||
SELECT number, '2020-01-01' FROM numbers(50)
|
||||
UNION ALL
|
||||
SELECT number, '2021-01-01' FROM numbers(50);
|
||||
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN ('2020-01-01');
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN (SELECT dt FROM 03269_filters WHERE dt < '2021-01-01');
|
||||
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (6, 38);
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (SELECT id FROM 03269_filters);
|
||||
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN ((6, '2020-01-01'), (38, '2021-01-01'));
|
||||
SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN (SELECT id, dt FROM 03269_filters);
|
||||
|
||||
DROP TABLE 03269_multiple_part_cols;
|
@ -1811,6 +1811,7 @@ geocode
|
||||
geohash
|
||||
geohashDecode
|
||||
geohashEncode
|
||||
geohashes
|
||||
geohashesInBox
|
||||
geoip
|
||||
geospatial
|
||||
|
Loading…
Reference in New Issue
Block a user