mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge remote-tracking branch 'rschu1ze/master' into dotProduct-memcpy
This commit is contained in:
commit
939f4b6db0
@ -174,7 +174,14 @@ function fuzz
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 &
|
||||
# server.log -> CH logs
|
||||
# stderr.log -> Process logs (sanitizer)
|
||||
clickhouse-server \
|
||||
--config-file db/config.xml \
|
||||
--pid-file /var/run/clickhouse-server/clickhouse-server.pid \
|
||||
-- --path db \
|
||||
--logger.console=0 \
|
||||
--logger.log=server.log > stderr.log 2>&1 &
|
||||
server_pid=$!
|
||||
|
||||
kill -0 $server_pid
|
||||
@ -303,7 +310,7 @@ quit
|
||||
if [ "$server_died" == 1 ]
|
||||
then
|
||||
# The server has died.
|
||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log > description.txt
|
||||
if ! rg --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*|.*Child process was terminated by signal 9.*' server.log stderr.log > description.txt
|
||||
then
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
@ -427,6 +434,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log.zst">fuzzer.log.zst</a>
|
||||
<a href="server.log.zst">server.log.zst</a>
|
||||
<a href="stderr.log">stderr.log</a>
|
||||
<a href="main.log">main.log</a>
|
||||
<a href="dmesg.log">dmesg.log</a>
|
||||
${CORE_LINK}
|
||||
|
@ -61,6 +61,18 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
@ -77,7 +89,7 @@ fi
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -88,10 +100,10 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
# simplest way to forward env variables to server
|
||||
|
@ -215,7 +215,7 @@ function check_server_start()
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' >> /test_output/tmp
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
|
@ -27,7 +27,7 @@ install_packages package_folder
|
||||
# and find more potential issues.
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -38,11 +38,11 @@ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY=0.01
|
||||
export THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY=0.01
|
||||
|
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
@ -0,0 +1,293 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/tw-weather
|
||||
sidebar_label: Taiwan Historical Weather Datasets
|
||||
sidebar_position: 1
|
||||
description: 131 million rows of weather observation data for the last 128 yrs
|
||||
---
|
||||
|
||||
# Taiwan Historical Weather Datasets
|
||||
|
||||
This dataset contains historical meteorological observations measurements for the last 128 years. Each row is a measurement for a point in date time and weather station.
|
||||
|
||||
The origin of this dataset is available [here](https://github.com/Raingel/historical_weather) and the list of weather station numbers can be found [here](https://github.com/Raingel/weather_station_list).
|
||||
|
||||
> The sources of meteorological datasets include the meteorological stations that are established by the Central Weather Administration (station code is beginning with C0, C1, and 4) and the agricultural meteorological stations belonging to the Council of Agriculture (station code other than those mentioned above):
|
||||
|
||||
- StationId
|
||||
- MeasuredDate, the observation time
|
||||
- StnPres, the station air pressure
|
||||
- SeaPres, the sea level pressure
|
||||
- Td, the dew point temperature
|
||||
- RH, the relative humidity
|
||||
- Other elements where available
|
||||
|
||||
## Downloading the data
|
||||
|
||||
- A [pre-processed version](#pre-processed-data) of the data for the ClickHouse, which has been cleaned, re-structured, and enriched. This dataset covers the years from 1896 to 2023.
|
||||
- [Download the original raw data](#original-raw-data) and convert to the format required by ClickHouse. Users wanting to add their own columns may wish to explore or complete their approaches.
|
||||
|
||||
### Pre-processed data
|
||||
|
||||
The dataset has also been re-structured from a measurement per line to a row per weather station id and measured date, i.e.
|
||||
|
||||
```csv
|
||||
StationId,MeasuredDate,StnPres,Tx,RH,WS,WD,WSGust,WDGust,Precp,GloblRad,TxSoil0cm,TxSoil5cm,TxSoil20cm,TxSoil50cm,TxSoil100cm,SeaPres,Td,PrecpHour,SunShine,TxSoil10cm,EvapA,Visb,UVI,Cloud Amount,TxSoil30cm,TxSoil200cm,TxSoil300cm,TxSoil500cm,VaporPressure
|
||||
C0X100,2016-01-01 01:00:00,1022.1,16.1,72,1.1,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 02:00:00,1021.6,16.0,73,1.2,358.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 03:00:00,1021.3,15.8,74,1.5,353.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 04:00:00,1021.2,15.8,74,1.7,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
```
|
||||
|
||||
It is easy to query and ensure that the resulting table has less sparse and some elements are null because they're not available to be measured in this weather station.
|
||||
|
||||
This dataset is available in the following Google CloudStorage location. Either download the dataset to your local filesystem (and insert them with the ClickHouse client) or insert them directly into the ClickHouse (see [Inserting from URL](#inserting-from-url)).
|
||||
|
||||
To download:
|
||||
|
||||
```bash
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/preprocessed_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum preprocessed_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: 11b484f5bd9ddafec5cfb131eb2dd008
|
||||
|
||||
tar -xzvf preprocessed_weather_daily_1896_2023.tar.gz
|
||||
daily_weather_preprocessed_1896_2023.csv
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum daily_weather_preprocessed_1896_2023.csv
|
||||
# Checksum should be equal to: 1132248c78195c43d93f843753881754
|
||||
```
|
||||
|
||||
### Original raw data
|
||||
|
||||
The following details are about the steps to download the original raw data to transform and convert as you want.
|
||||
|
||||
#### Download
|
||||
|
||||
To download the original raw data:
|
||||
|
||||
```bash
|
||||
mkdir tw_raw_weather_data && cd tw_raw_weather_data
|
||||
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/raw_data_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum raw_data_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: b66b9f137217454d655e3004d7d1b51a
|
||||
|
||||
tar -xzvf raw_data_weather_daily_1896_2023.tar.gz
|
||||
466920_1928.csv
|
||||
466920_1929.csv
|
||||
466920_1930.csv
|
||||
466920_1931.csv
|
||||
...
|
||||
|
||||
# Option: Validate the checksum
|
||||
cat *.csv | md5sum
|
||||
# Checksum should be equal to: b26db404bf84d4063fac42e576464ce1
|
||||
```
|
||||
|
||||
#### Retrieve the Taiwan weather stations
|
||||
|
||||
```bash
|
||||
wget -O weather_sta_list.csv https://github.com/Raingel/weather_station_list/raw/main/data/weather_sta_list.csv
|
||||
|
||||
# Option: Convert the UTF-8-BOM to UTF-8 encoding
|
||||
sed -i '1s/^\xEF\xBB\xBF//' weather_sta_list.csv
|
||||
```
|
||||
|
||||
## Create table schema
|
||||
|
||||
Create the MergeTree table in ClickHouse (from the ClickHouse client).
|
||||
|
||||
```bash
|
||||
CREATE TABLE tw_weather_data (
|
||||
StationId String null,
|
||||
MeasuredDate DateTime64,
|
||||
StnPres Float64 null,
|
||||
SeaPres Float64 null,
|
||||
Tx Float64 null,
|
||||
Td Float64 null,
|
||||
RH Float64 null,
|
||||
WS Float64 null,
|
||||
WD Float64 null,
|
||||
WSGust Float64 null,
|
||||
WDGust Float64 null,
|
||||
Precp Float64 null,
|
||||
PrecpHour Float64 null,
|
||||
SunShine Float64 null,
|
||||
GloblRad Float64 null,
|
||||
TxSoil0cm Float64 null,
|
||||
TxSoil5cm Float64 null,
|
||||
TxSoil10cm Float64 null,
|
||||
TxSoil20cm Float64 null,
|
||||
TxSoil50cm Float64 null,
|
||||
TxSoil100cm Float64 null,
|
||||
TxSoil30cm Float64 null,
|
||||
TxSoil200cm Float64 null,
|
||||
TxSoil300cm Float64 null,
|
||||
TxSoil500cm Float64 null,
|
||||
VaporPressure Float64 null,
|
||||
UVI Float64 null,
|
||||
"Cloud Amount" Float64 null,
|
||||
EvapA Float64 null,
|
||||
Visb Float64 null
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (MeasuredDate);
|
||||
```
|
||||
|
||||
## Inserting into ClickHouse
|
||||
|
||||
### Inserting from local file
|
||||
|
||||
Data can be inserted from a local file as follows (from the ClickHouse client):
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data FROM INFILE '/path/to/daily_weather_preprocessed_1896_2023.csv'
|
||||
```
|
||||
|
||||
where `/path/to` represents the specific user path to the local file on the disk.
|
||||
|
||||
And the sample response output is as follows after inserting data into the ClickHouse:
|
||||
|
||||
```response
|
||||
Query id: 90e4b524-6e14-4855-817c-7e6f98fbeabb
|
||||
|
||||
Ok.
|
||||
131985329 rows in set. Elapsed: 71.770 sec. Processed 131.99 million rows, 10.06 GB (1.84 million rows/s., 140.14 MB/s.)
|
||||
Peak memory usage: 583.23 MiB.
|
||||
```
|
||||
|
||||
### Inserting from URL
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data SELECT *
|
||||
FROM url('https://storage.googleapis.com/taiwan-weather-observaiton-datasets/daily_weather_preprocessed_1896_2023.csv', 'CSVWithNames')
|
||||
|
||||
```
|
||||
To know how to speed this up, please see our blog post on [tuning large data loads](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2).
|
||||
|
||||
## Check data rows and sizes
|
||||
|
||||
1. Let's see how many rows are inserted:
|
||||
|
||||
```sql
|
||||
SELECT formatReadableQuantity(count())
|
||||
FROM tw_weather_data;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─formatReadableQuantity(count())─┐
|
||||
│ 131.99 million │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
2. Let's see how much disk space are used for this table:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
formatReadableSize(sum(bytes)) AS disk_size,
|
||||
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size
|
||||
FROM system.parts
|
||||
WHERE (`table` = 'tw_weather_data') AND active
|
||||
```
|
||||
|
||||
```response
|
||||
┌─disk_size─┬─uncompressed_size─┐
|
||||
│ 2.13 GiB │ 32.94 GiB │
|
||||
└───────────┴───────────────────┘
|
||||
```
|
||||
|
||||
## Sample queries
|
||||
|
||||
### Q1: Retrieve the highest dew point temperature for each weather station in the specific year
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StationId,
|
||||
max(Td) AS max_td
|
||||
FROM tw_weather_data
|
||||
WHERE (year(MeasuredDate) = 2023) AND (Td IS NOT NULL)
|
||||
GROUP BY StationId
|
||||
|
||||
┌─StationId─┬─max_td─┐
|
||||
│ 466940 │ 1 │
|
||||
│ 467300 │ 1 │
|
||||
│ 467540 │ 1 │
|
||||
│ 467490 │ 1 │
|
||||
│ 467080 │ 1 │
|
||||
│ 466910 │ 1 │
|
||||
│ 467660 │ 1 │
|
||||
│ 467270 │ 1 │
|
||||
│ 467350 │ 1 │
|
||||
│ 467571 │ 1 │
|
||||
│ 466920 │ 1 │
|
||||
│ 467650 │ 1 │
|
||||
│ 467550 │ 1 │
|
||||
│ 467480 │ 1 │
|
||||
│ 467610 │ 1 │
|
||||
│ 467050 │ 1 │
|
||||
│ 467590 │ 1 │
|
||||
│ 466990 │ 1 │
|
||||
│ 467060 │ 1 │
|
||||
│ 466950 │ 1 │
|
||||
│ 467620 │ 1 │
|
||||
│ 467990 │ 1 │
|
||||
│ 466930 │ 1 │
|
||||
│ 467110 │ 1 │
|
||||
│ 466881 │ 1 │
|
||||
│ 467410 │ 1 │
|
||||
│ 467441 │ 1 │
|
||||
│ 467420 │ 1 │
|
||||
│ 467530 │ 1 │
|
||||
│ 466900 │ 1 │
|
||||
└───────────┴────────┘
|
||||
|
||||
30 rows in set. Elapsed: 0.045 sec. Processed 6.41 million rows, 187.33 MB (143.92 million rows/s., 4.21 GB/s.)
|
||||
```
|
||||
|
||||
### Q2: Raw data fetching with the specific duration time range, fields and weather station
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StnPres,
|
||||
SeaPres,
|
||||
Tx,
|
||||
Td,
|
||||
RH,
|
||||
WS,
|
||||
WD,
|
||||
WSGust,
|
||||
WDGust,
|
||||
Precp,
|
||||
PrecpHour
|
||||
FROM tw_weather_data
|
||||
WHERE (StationId = 'C0UB10') AND (MeasuredDate >= '2023-12-23') AND (MeasuredDate < '2023-12-24')
|
||||
ORDER BY MeasuredDate ASC
|
||||
LIMIT 10
|
||||
```
|
||||
|
||||
```response
|
||||
┌─StnPres─┬─SeaPres─┬───Tx─┬───Td─┬─RH─┬──WS─┬──WD─┬─WSGust─┬─WDGust─┬─Precp─┬─PrecpHour─┐
|
||||
│ 1029.5 │ ᴺᵁᴸᴸ │ 11.8 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 271 │ 5.5 │ 275 │ -99.8 │ -99.8 │
|
||||
│ 1029.8 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 289 │ 5.5 │ 308 │ -99.8 │ -99.8 │
|
||||
│ 1028.6 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 79 │ 2.3 │ 251 │ 6.1 │ 289 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 13 │ ᴺᵁᴸᴸ │ 75 │ 4.3 │ 312 │ 7.5 │ 316 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.1 │ ᴺᵁᴸᴸ │ 89 │ 7.1 │ 310 │ 11.6 │ 322 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.6 │ ᴺᵁᴸᴸ │ 90 │ 3.1 │ 269 │ 10.7 │ 295 │ -99.8 │ -99.8 │
|
||||
│ 1027.9 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 89 │ 4.7 │ 296 │ 8.1 │ 310 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 12.2 │ ᴺᵁᴸᴸ │ 94 │ 2.5 │ 246 │ 7.1 │ 283 │ -99.8 │ -99.8 │
|
||||
│ 1028.4 │ ᴺᵁᴸᴸ │ 12.5 │ ᴺᵁᴸᴸ │ 94 │ 3.1 │ 265 │ 4.8 │ 297 │ -99.8 │ -99.8 │
|
||||
│ 1028.3 │ ᴺᵁᴸᴸ │ 13.6 │ ᴺᵁᴸᴸ │ 91 │ 1.2 │ 273 │ 4.4 │ 256 │ -99.8 │ -99.8 │
|
||||
└─────────┴─────────┴──────┴──────┴────┴─────┴─────┴────────┴────────┴───────┴───────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.009 sec. Processed 91.70 thousand rows, 2.33 MB (9.67 million rows/s., 245.31 MB/s.)
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
We would like to acknowledge the efforts of the Central Weather Administration and Agricultural Meteorological Observation Network (Station) of the Council of Agriculture for preparing, cleaning, and distributing this dataset. We appreciate your efforts.
|
||||
|
||||
Ou, J.-H., Kuo, C.-H., Wu, Y.-F., Lin, G.-C., Lee, M.-H., Chen, R.-K., Chou, H.-P., Wu, H.-Y., Chu, S.-C., Lai, Q.-J., Tsai, Y.-C., Lin, C.-C., Kuo, C.-C., Liao, C.-T., Chen, Y.-N., Chu, Y.-W., Chen, C.-Y., 2023. Application-oriented deep learning model for early warning of rice blast in Taiwan. Ecological Informatics 73, 101950. https://doi.org/10.1016/j.ecoinf.2022.101950 [13/12/2022]
|
@ -178,7 +178,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--ask-password` - Prompt the user to enter a password.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. `--query` can be specified multiple times, e.g. `--query "SELECT 1" --query "SELECT 2"`. Cannot be used simultaneously with `--queries-file`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--query queries1.sql --query queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--queries-file queries1.sql --queries-file queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
|
@ -4,6 +4,67 @@ sidebar_label: Polygons
|
||||
title: "Functions for Working with Polygons"
|
||||
---
|
||||
|
||||
## WKT
|
||||
|
||||
Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are:
|
||||
|
||||
- POINT
|
||||
- POLYGON
|
||||
- MULTIPOLYGON
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
WKT(geo_data)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
`geo_data` can be one of the following [Geo Data Types](../../data-types/geo.md) or their underlying primitive types:
|
||||
|
||||
- [Point](../../data-types/geo.md#point)
|
||||
- [Ring](../../data-types/geo.md#ring)
|
||||
- [Polygon](../../data-types/geo.md#polygon)
|
||||
- [MultiPolygon](../../data-types/geo.md#multipolygon)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- WKT geometric object `POINT` is returned for a Point.
|
||||
- WKT geometric object `POLYGON` is returned for a Polygon
|
||||
- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon.
|
||||
|
||||
**Examples**
|
||||
|
||||
POINT from tuple:
|
||||
|
||||
```sql
|
||||
SELECT wkt((0., 0.));
|
||||
```
|
||||
|
||||
```response
|
||||
POINT(0 0)
|
||||
```
|
||||
|
||||
POLYGON from an array of tuples or an array of tuple arrays:
|
||||
|
||||
```sql
|
||||
SELECT wkt([(0., 0.), (10., 0.), (10., 10.), (0., 10.)]);
|
||||
```
|
||||
|
||||
```response
|
||||
POLYGON((0 0,10 0,10 10,0 10))
|
||||
```
|
||||
|
||||
MULTIPOLYGON from an array of multi-dimensional tuple arrays:
|
||||
|
||||
```sql
|
||||
SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), (5., 5.), (4., 5.)]], [[(-10., -10.), (-10., -9.), (-9., 10.)]]]);
|
||||
```
|
||||
|
||||
```response
|
||||
MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))
|
||||
```
|
||||
|
||||
## readWKTMultiPolygon
|
||||
|
||||
Converts a WKT (Well Known Text) MultiPolygon into a MultiPolygon type.
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Backups/BackupCoordinationRemote.h>
|
||||
|
||||
#include <base/hex.h>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Backups/BackupCoordinationReplicatedAccess.h>
|
||||
|
@ -109,6 +109,9 @@ public:
|
||||
|
||||
using Base::Base;
|
||||
|
||||
FixedHashMap() = default;
|
||||
FixedHashMap(size_t ) {} /// NOLINT
|
||||
|
||||
template <typename Func, bool>
|
||||
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
|
||||
{
|
||||
|
@ -38,6 +38,7 @@ public:
|
||||
Impl impls[NUM_BUCKETS];
|
||||
|
||||
TwoLevelStringHashTable() = default;
|
||||
TwoLevelStringHashTable(size_t ) {} /// NOLINT
|
||||
|
||||
template <typename Source>
|
||||
explicit TwoLevelStringHashTable(const Source & src)
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <Common/NamedCollections/NamedCollections.h>
|
||||
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
@ -86,12 +86,12 @@ static std::atomic<int> num_cpus = 0;
|
||||
static std::atomic<double> NAME##_before_yield_probability = 0; \
|
||||
static std::atomic<double> NAME##_before_migrate_probability = 0; \
|
||||
static std::atomic<double> NAME##_before_sleep_probability = 0; \
|
||||
static std::atomic<double> NAME##_before_sleep_time_us = 0; \
|
||||
static std::atomic<double> NAME##_before_sleep_time_us_max = 0; \
|
||||
\
|
||||
static std::atomic<double> NAME##_after_yield_probability = 0; \
|
||||
static std::atomic<double> NAME##_after_migrate_probability = 0; \
|
||||
static std::atomic<double> NAME##_after_sleep_probability = 0; \
|
||||
static std::atomic<double> NAME##_after_sleep_time_us = 0;
|
||||
static std::atomic<double> NAME##_after_sleep_time_us_max = 0;
|
||||
|
||||
FOR_EACH_WRAPPED_FUNCTION(DEFINE_WRAPPER_PARAMS)
|
||||
|
||||
@ -110,7 +110,7 @@ void ThreadFuzzer::initConfiguration()
|
||||
initFromEnv(yield_probability, "THREAD_FUZZER_YIELD_PROBABILITY");
|
||||
initFromEnv(migrate_probability, "THREAD_FUZZER_MIGRATE_PROBABILITY");
|
||||
initFromEnv(sleep_probability, "THREAD_FUZZER_SLEEP_PROBABILITY");
|
||||
initFromEnv(sleep_time_us, "THREAD_FUZZER_SLEEP_TIME_US");
|
||||
initFromEnv(sleep_time_us_max, "THREAD_FUZZER_SLEEP_TIME_US_MAX");
|
||||
initFromEnv(explicit_sleep_probability, "THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY");
|
||||
initFromEnv(explicit_memory_exception_probability, "THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY");
|
||||
|
||||
@ -119,13 +119,12 @@ void ThreadFuzzer::initConfiguration()
|
||||
initFromEnv(NAME##_before_yield_probability, "THREAD_FUZZER_" #NAME "_BEFORE_YIELD_PROBABILITY"); \
|
||||
initFromEnv(NAME##_before_migrate_probability, "THREAD_FUZZER_" #NAME "_BEFORE_MIGRATE_PROBABILITY"); \
|
||||
initFromEnv(NAME##_before_sleep_probability, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_PROBABILITY"); \
|
||||
initFromEnv(NAME##_before_sleep_time_us, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US"); \
|
||||
initFromEnv(NAME##_before_sleep_time_us_max, "THREAD_FUZZER_" #NAME "_BEFORE_SLEEP_TIME_US_MAX"); \
|
||||
\
|
||||
initFromEnv(NAME##_after_yield_probability, "THREAD_FUZZER_" #NAME "_AFTER_YIELD_PROBABILITY"); \
|
||||
initFromEnv(NAME##_after_migrate_probability, "THREAD_FUZZER_" #NAME "_AFTER_MIGRATE_PROBABILITY"); \
|
||||
initFromEnv(NAME##_after_sleep_probability, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_PROBABILITY"); \
|
||||
initFromEnv(NAME##_after_sleep_time_us, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US");
|
||||
|
||||
initFromEnv(NAME##_after_sleep_time_us_max, "THREAD_FUZZER_" #NAME "_AFTER_SLEEP_TIME_US_MAX");
|
||||
FOR_EACH_WRAPPED_FUNCTION(INIT_WRAPPER_PARAMS)
|
||||
|
||||
# undef INIT_WRAPPER_PARAMS
|
||||
@ -146,7 +145,7 @@ bool ThreadFuzzer::isEffective() const
|
||||
return true; \
|
||||
if (NAME##_before_sleep_probability.load(std::memory_order_relaxed) > 0.0) \
|
||||
return true; \
|
||||
if (NAME##_before_sleep_time_us.load(std::memory_order_relaxed) > 0.0) \
|
||||
if (NAME##_before_sleep_time_us_max.load(std::memory_order_relaxed) > 0.0) \
|
||||
return true; \
|
||||
\
|
||||
if (NAME##_after_yield_probability.load(std::memory_order_relaxed) > 0.0) \
|
||||
@ -155,7 +154,7 @@ bool ThreadFuzzer::isEffective() const
|
||||
return true; \
|
||||
if (NAME##_after_sleep_probability.load(std::memory_order_relaxed) > 0.0) \
|
||||
return true; \
|
||||
if (NAME##_after_sleep_time_us.load(std::memory_order_relaxed) > 0.0) \
|
||||
if (NAME##_after_sleep_time_us_max.load(std::memory_order_relaxed) > 0.0) \
|
||||
return true;
|
||||
|
||||
FOR_EACH_WRAPPED_FUNCTION(CHECK_WRAPPER_PARAMS)
|
||||
@ -166,7 +165,7 @@ bool ThreadFuzzer::isEffective() const
|
||||
return cpu_time_period_us != 0
|
||||
&& (yield_probability > 0
|
||||
|| migrate_probability > 0
|
||||
|| (sleep_probability > 0 && sleep_time_us > 0));
|
||||
|| (sleep_probability > 0 && sleep_time_us_max > 0));
|
||||
}
|
||||
|
||||
void ThreadFuzzer::stop()
|
||||
@ -190,7 +189,7 @@ static void injectionImpl(
|
||||
double yield_probability,
|
||||
double migrate_probability,
|
||||
double sleep_probability,
|
||||
double sleep_time_us)
|
||||
double sleep_time_us_max)
|
||||
{
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
if (!ThreadFuzzer::isStarted())
|
||||
@ -221,10 +220,10 @@ static void injectionImpl(
|
||||
#endif
|
||||
|
||||
if (sleep_probability > 0
|
||||
&& sleep_time_us > 0
|
||||
&& sleep_time_us_max > 0
|
||||
&& std::bernoulli_distribution(sleep_probability)(thread_local_rng))
|
||||
{
|
||||
sleepForNanoseconds(static_cast<uint64_t>(sleep_time_us * 1000));
|
||||
sleepForNanoseconds((thread_local_rng() % static_cast<uint64_t>(sleep_time_us_max)) * 1000); /*may sleep(0)*/
|
||||
}
|
||||
}
|
||||
|
||||
@ -232,19 +231,19 @@ static ALWAYS_INLINE void injection(
|
||||
double yield_probability,
|
||||
double migrate_probability,
|
||||
double sleep_probability,
|
||||
double sleep_time_us)
|
||||
double sleep_time_us_max)
|
||||
{
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
if (!ThreadFuzzer::isStarted())
|
||||
return;
|
||||
|
||||
injectionImpl(yield_probability, migrate_probability, sleep_probability, sleep_time_us);
|
||||
injectionImpl(yield_probability, migrate_probability, sleep_probability, sleep_time_us_max);
|
||||
}
|
||||
|
||||
void ThreadFuzzer::maybeInjectSleep()
|
||||
{
|
||||
auto & fuzzer = ThreadFuzzer::instance();
|
||||
injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.explicit_sleep_probability, fuzzer.sleep_time_us);
|
||||
injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.explicit_sleep_probability, fuzzer.sleep_time_us_max);
|
||||
}
|
||||
|
||||
/// Sometimes maybeInjectSleep() is not enough and we need to inject an exception.
|
||||
@ -265,7 +264,7 @@ void ThreadFuzzer::signalHandler(int)
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
auto saved_errno = errno;
|
||||
auto & fuzzer = ThreadFuzzer::instance();
|
||||
injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us);
|
||||
injection(fuzzer.yield_probability, fuzzer.migrate_probability, fuzzer.sleep_probability, fuzzer.sleep_time_us_max);
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
@ -309,13 +308,13 @@ void ThreadFuzzer::setup() const
|
||||
NAME##_before_yield_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_before_migrate_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_before_sleep_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_before_sleep_time_us.load(std::memory_order_relaxed));
|
||||
NAME##_before_sleep_time_us_max.load(std::memory_order_relaxed));
|
||||
#define INJECTION_AFTER(NAME) \
|
||||
injectionImpl( \
|
||||
NAME##_after_yield_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_after_migrate_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_after_sleep_probability.load(std::memory_order_relaxed), \
|
||||
NAME##_after_sleep_time_us.load(std::memory_order_relaxed));
|
||||
NAME##_after_sleep_time_us_max.load(std::memory_order_relaxed));
|
||||
|
||||
/// ThreadFuzzer intercepts pthread_mutex_lock()/pthread_mutex_unlock().
|
||||
///
|
||||
|
@ -16,7 +16,7 @@ namespace DB
|
||||
* THREAD_FUZZER_YIELD_PROBABILITY - probability to do 'sched_yield'.
|
||||
* THREAD_FUZZER_MIGRATE_PROBABILITY - probability to set CPU affinity to random CPU core.
|
||||
* THREAD_FUZZER_SLEEP_PROBABILITY - probability to sleep.
|
||||
* THREAD_FUZZER_SLEEP_TIME_US - amount of time to sleep in microseconds.
|
||||
* THREAD_FUZZER_SLEEP_TIME_US_MAX - max amount of time to sleep in microseconds, actual sleep time is randomized.
|
||||
*
|
||||
* ThreadFuzzer will do nothing if environment variables are not set accordingly.
|
||||
*
|
||||
@ -33,16 +33,14 @@ namespace DB
|
||||
*
|
||||
* Notes:
|
||||
* - it can be also implemented with instrumentation (example: LLVM Xray) instead of signals.
|
||||
* - we should also make the sleep time random.
|
||||
* - sleep and migration obviously helps, but the effect of yield is unclear.
|
||||
*
|
||||
* In addition, we allow to inject glitches around thread synchronization functions.
|
||||
* Example:
|
||||
*
|
||||
* THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
* THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
* THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
* THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
* THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
* THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
*/
|
||||
class ThreadFuzzer
|
||||
{
|
||||
@ -67,7 +65,8 @@ private:
|
||||
double yield_probability = 0;
|
||||
double migrate_probability = 0;
|
||||
double sleep_probability = 0;
|
||||
double sleep_time_us = 0;
|
||||
double sleep_time_us_max = 0;
|
||||
|
||||
double explicit_sleep_probability = 0;
|
||||
double explicit_memory_exception_probability = 0;
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
/** Proves that ThreadFuzzer helps to find concurrency bugs.
|
||||
*
|
||||
* for i in {1..10}; do ./chaos_sanitizer 1000000; done
|
||||
* for i in {1..10}; do THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 THREAD_FUZZER_SLEEP_PROBABILITY=0.1 THREAD_FUZZER_SLEEP_TIME_US=100000 ./chaos_sanitizer 1000000; done
|
||||
* for i in {1..10}; do THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 THREAD_FUZZER_SLEEP_PROBABILITY=0.1 THREAD_FUZZER_SLEEP_TIME_US_MAX=100000 ./chaos_sanitizer 1000000; done
|
||||
*/
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
|
@ -23,6 +23,9 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -190,14 +190,26 @@ public:
|
||||
{
|
||||
if (col_haystack_const && col_needle_const)
|
||||
{
|
||||
const auto is_col_start_pos_const = !column_start_pos || isColumnConst(*column_start_pos);
|
||||
auto column_start_position_arg = column_start_pos;
|
||||
bool is_col_start_pos_const = false;
|
||||
if (column_start_pos)
|
||||
{
|
||||
if (const ColumnConst * const_column_start_pos = typeid_cast<const ColumnConst *>(&*column_start_pos))
|
||||
{
|
||||
is_col_start_pos_const = true;
|
||||
column_start_position_arg = const_column_start_pos->getDataColumnPtr();
|
||||
}
|
||||
}
|
||||
else
|
||||
is_col_start_pos_const = true;
|
||||
|
||||
vec_res.resize(is_col_start_pos_const ? 1 : column_start_pos->size());
|
||||
const auto null_map = create_null_map();
|
||||
|
||||
Impl::constantConstant(
|
||||
col_haystack_const->getValue<String>(),
|
||||
col_needle_const->getValue<String>(),
|
||||
column_start_pos,
|
||||
column_start_position_arg,
|
||||
vec_res,
|
||||
null_map.get());
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
@ -44,7 +44,7 @@ struct RepeatImpl
|
||||
ColumnString::Offsets & res_offsets,
|
||||
T repeat_time)
|
||||
{
|
||||
repeat_time = repeat_time < 0 ? 0 : repeat_time;
|
||||
repeat_time = repeat_time < 0 ? static_cast<T>(0) : repeat_time;
|
||||
checkRepeatTime(repeat_time);
|
||||
|
||||
UInt64 data_size = 0;
|
||||
@ -76,7 +76,7 @@ struct RepeatImpl
|
||||
res_offsets.assign(offsets);
|
||||
for (UInt64 i = 0; i < col_num.size(); ++i)
|
||||
{
|
||||
T repeat_time = col_num[i] < 0 ? 0 : col_num[i];
|
||||
T repeat_time = col_num[i] < 0 ? static_cast<T>(0) : col_num[i];
|
||||
size_t repeated_size = (offsets[i] - offsets[i - 1] - 1) * repeat_time + 1;
|
||||
checkStringSize(repeated_size);
|
||||
data_size += repeated_size;
|
||||
@ -86,7 +86,7 @@ struct RepeatImpl
|
||||
|
||||
for (UInt64 i = 0; i < col_num.size(); ++i)
|
||||
{
|
||||
T repeat_time = col_num[i] < 0 ? 0 : col_num[i];
|
||||
T repeat_time = col_num[i] < 0 ? static_cast<T>(0) : col_num[i];
|
||||
checkRepeatTime(repeat_time);
|
||||
process(data.data() + offsets[i - 1], res_data.data() + res_offsets[i - 1], offsets[i] - offsets[i - 1], repeat_time);
|
||||
}
|
||||
@ -105,7 +105,7 @@ struct RepeatImpl
|
||||
UInt64 col_size = col_num.size();
|
||||
for (UInt64 i = 0; i < col_size; ++i)
|
||||
{
|
||||
T repeat_time = col_num[i] < 0 ? 0 : col_num[i];
|
||||
T repeat_time = col_num[i] < 0 ? static_cast<T>(0) : col_num[i];
|
||||
size_t repeated_size = str_size * repeat_time + 1;
|
||||
checkStringSize(repeated_size);
|
||||
data_size += repeated_size;
|
||||
@ -114,7 +114,7 @@ struct RepeatImpl
|
||||
res_data.resize(data_size);
|
||||
for (UInt64 i = 0; i < col_size; ++i)
|
||||
{
|
||||
T repeat_time = col_num[i] < 0 ? 0 : col_num[i];
|
||||
T repeat_time = col_num[i] < 0 ? static_cast<T>(0) : col_num[i];
|
||||
checkRepeatTime(repeat_time);
|
||||
process(
|
||||
reinterpret_cast<UInt8 *>(const_cast<char *>(copy_str.data())),
|
||||
@ -169,8 +169,19 @@ class FunctionRepeat : public IFunction
|
||||
template <typename F>
|
||||
static bool castType(const IDataType * type, F && f)
|
||||
{
|
||||
return castTypeToEither<DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64,
|
||||
DataTypeUInt8, DataTypeUInt16, DataTypeUInt32, DataTypeUInt64>(type, std::forward<F>(f));
|
||||
return castTypeToEither<
|
||||
DataTypeInt8,
|
||||
DataTypeInt16,
|
||||
DataTypeInt32,
|
||||
DataTypeInt64,
|
||||
DataTypeInt128,
|
||||
DataTypeInt256,
|
||||
DataTypeUInt8,
|
||||
DataTypeUInt16,
|
||||
DataTypeUInt32,
|
||||
DataTypeUInt64,
|
||||
DataTypeUInt128,
|
||||
DataTypeUInt256>(type, std::forward<F>(f));
|
||||
}
|
||||
|
||||
public:
|
||||
@ -208,7 +219,7 @@ public:
|
||||
if (const ColumnConst * col_num_const = checkAndGetColumn<ColumnConst>(col_num.get()))
|
||||
{
|
||||
auto col_res = ColumnString::create();
|
||||
castType(arguments[1].type.get(), [&](const auto & type)
|
||||
auto success = castType(arguments[1].type.get(), [&](const auto & type)
|
||||
{
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
using T = typename DataType::FieldType;
|
||||
@ -216,6 +227,11 @@ public:
|
||||
RepeatImpl::vectorStrConstRepeat(col->getChars(), col->getOffsets(), col_res->getChars(), col_res->getOffsets(), times);
|
||||
return true;
|
||||
});
|
||||
|
||||
if (!success)
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column type {} of 'n' of function {}",
|
||||
arguments[1].column->getName(), getName());
|
||||
|
||||
return col_res;
|
||||
}
|
||||
else if (castType(arguments[1].type.get(), [&](const auto & type)
|
||||
|
@ -336,8 +336,12 @@ public:
|
||||
|
||||
/// Split ActionsDAG into two DAGs, where first part contains all nodes from split_nodes and their children.
|
||||
/// Execution of first then second parts on block is equivalent to execution of initial DAG.
|
||||
/// First DAG and initial DAG have equal inputs, second DAG and initial DAG has equal outputs.
|
||||
/// Second DAG inputs may contain less inputs then first DAG (but also include other columns).
|
||||
/// Inputs and outputs of original DAG are split between the first and the second DAGs.
|
||||
/// Intermediate result can apper in first outputs and second inputs.
|
||||
/// Example:
|
||||
/// initial DAG : (a, b, c, d, e) -> (w, x, y, z) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 3 4 5 6 w x y z
|
||||
/// split (first) : (a, c, d) -> (i, j, k, w, y) | 1 a 2 b 3 c 4 d 5 e 6 -> 1 2 b 3 4 5 e 6 i j k w y
|
||||
/// split (second) : (i, j, k, y, b, e) -> (x, y, z) | 1 2 b 3 4 5 e 6 i j k w y -> 1 2 3 4 5 6 w x y z
|
||||
SplitResult split(std::unordered_set<const Node *> split_nodes, bool create_split_nodes_mapping = false) const;
|
||||
|
||||
/// Splits actions into two parts. Returned first half may be swapped with ARRAY JOIN.
|
||||
|
142
src/Interpreters/AggregatedData.h
Normal file
142
src/Interpreters/AggregatedData.h
Normal file
@ -0,0 +1,142 @@
|
||||
#pragma once
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <Common/HashTable/FixedHashMap.h>
|
||||
#include <Common/HashTable/StringHashMap.h>
|
||||
#include <Common/HashTable/TwoLevelHashMap.h>
|
||||
#include <Common/HashTable/TwoLevelStringHashMap.h>
|
||||
namespace DB
|
||||
{
|
||||
/** Different data structures that can be used for aggregation
|
||||
* For efficiency, the aggregation data itself is put into the pool.
|
||||
* Data and pool ownership (states of aggregate functions)
|
||||
* is acquired later - in `convertToBlocks` function, by the ColumnAggregateFunction object.
|
||||
*
|
||||
* Most data structures exist in two versions: normal and two-level (TwoLevel).
|
||||
* A two-level hash table works a little slower with a small number of different keys,
|
||||
* but with a large number of different keys scales better, because it allows
|
||||
* parallelize some operations (merging, post-processing) in a natural way.
|
||||
*
|
||||
* To ensure efficient work over a wide range of conditions,
|
||||
* first single-level hash tables are used,
|
||||
* and when the number of different keys is large enough,
|
||||
* they are converted to two-level ones.
|
||||
*
|
||||
* PS. There are many different approaches to the effective implementation of parallel and distributed aggregation,
|
||||
* best suited for different cases, and this approach is just one of them, chosen for a combination of reasons.
|
||||
*/
|
||||
|
||||
using AggregatedDataWithoutKey = AggregateDataPtr;
|
||||
|
||||
using AggregatedDataWithUInt8Key = FixedImplicitZeroHashMapWithCalculatedSize<UInt8, AggregateDataPtr>;
|
||||
using AggregatedDataWithUInt16Key = FixedImplicitZeroHashMap<UInt16, AggregateDataPtr>;
|
||||
|
||||
using AggregatedDataWithUInt32Key = HashMap<UInt32, AggregateDataPtr, HashCRC32<UInt32>>;
|
||||
using AggregatedDataWithUInt64Key = HashMap<UInt64, AggregateDataPtr, HashCRC32<UInt64>>;
|
||||
|
||||
using AggregatedDataWithShortStringKey = StringHashMap<AggregateDataPtr>;
|
||||
|
||||
using AggregatedDataWithStringKey = HashMapWithSavedHash<StringRef, AggregateDataPtr>;
|
||||
|
||||
using AggregatedDataWithKeys128 = HashMap<UInt128, AggregateDataPtr, UInt128HashCRC32>;
|
||||
using AggregatedDataWithKeys256 = HashMap<UInt256, AggregateDataPtr, UInt256HashCRC32>;
|
||||
|
||||
using AggregatedDataWithUInt32KeyTwoLevel = TwoLevelHashMap<UInt32, AggregateDataPtr, HashCRC32<UInt32>>;
|
||||
using AggregatedDataWithUInt64KeyTwoLevel = TwoLevelHashMap<UInt64, AggregateDataPtr, HashCRC32<UInt64>>;
|
||||
|
||||
using AggregatedDataWithShortStringKeyTwoLevel = TwoLevelStringHashMap<AggregateDataPtr>;
|
||||
|
||||
using AggregatedDataWithStringKeyTwoLevel = TwoLevelHashMapWithSavedHash<StringRef, AggregateDataPtr>;
|
||||
|
||||
using AggregatedDataWithKeys128TwoLevel = TwoLevelHashMap<UInt128, AggregateDataPtr, UInt128HashCRC32>;
|
||||
using AggregatedDataWithKeys256TwoLevel = TwoLevelHashMap<UInt256, AggregateDataPtr, UInt256HashCRC32>;
|
||||
|
||||
/** Variants with better hash function, using more than 32 bits for hash.
|
||||
* Using for merging phase of external aggregation, where number of keys may be far greater than 4 billion,
|
||||
* but we keep in memory and merge only sub-partition of them simultaneously.
|
||||
* TODO We need to switch for better hash function not only for external aggregation,
|
||||
* but also for huge aggregation results on machines with terabytes of RAM.
|
||||
*/
|
||||
|
||||
using AggregatedDataWithUInt64KeyHash64 = HashMap<UInt64, AggregateDataPtr, DefaultHash<UInt64>>;
|
||||
using AggregatedDataWithStringKeyHash64 = HashMapWithSavedHash<StringRef, AggregateDataPtr, StringRefHash64>;
|
||||
using AggregatedDataWithKeys128Hash64 = HashMap<UInt128, AggregateDataPtr, UInt128Hash>;
|
||||
using AggregatedDataWithKeys256Hash64 = HashMap<UInt256, AggregateDataPtr, UInt256Hash>;
|
||||
|
||||
template <typename Base>
|
||||
struct AggregationDataWithNullKey : public Base
|
||||
{
|
||||
using Base::Base;
|
||||
|
||||
bool & hasNullKeyData() { return has_null_key_data; }
|
||||
AggregateDataPtr & getNullKeyData() { return null_key_data; }
|
||||
bool hasNullKeyData() const { return has_null_key_data; }
|
||||
const AggregateDataPtr & getNullKeyData() const { return null_key_data; }
|
||||
size_t size() const { return Base::size() + (has_null_key_data ? 1 : 0); }
|
||||
bool empty() const { return Base::empty() && !has_null_key_data; }
|
||||
void clear()
|
||||
{
|
||||
Base::clear();
|
||||
has_null_key_data = false;
|
||||
}
|
||||
void clearAndShrink()
|
||||
{
|
||||
Base::clearAndShrink();
|
||||
has_null_key_data = false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool has_null_key_data = false;
|
||||
AggregateDataPtr null_key_data = nullptr;
|
||||
};
|
||||
|
||||
template <typename Base>
|
||||
struct AggregationDataWithNullKeyTwoLevel : public Base
|
||||
{
|
||||
using Base::Base;
|
||||
using Base::impls;
|
||||
|
||||
AggregationDataWithNullKeyTwoLevel() = default;
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationDataWithNullKeyTwoLevel(const Other & other) : Base(other)
|
||||
{
|
||||
impls[0].hasNullKeyData() = other.hasNullKeyData();
|
||||
impls[0].getNullKeyData() = other.getNullKeyData();
|
||||
}
|
||||
|
||||
bool & hasNullKeyData() { return impls[0].hasNullKeyData(); }
|
||||
AggregateDataPtr & getNullKeyData() { return impls[0].getNullKeyData(); }
|
||||
bool hasNullKeyData() const { return impls[0].hasNullKeyData(); }
|
||||
const AggregateDataPtr & getNullKeyData() const { return impls[0].getNullKeyData(); }
|
||||
};
|
||||
|
||||
template <typename ... Types>
|
||||
using HashTableWithNullKey = AggregationDataWithNullKey<HashMapTable<Types ...>>;
|
||||
template <typename ... Types>
|
||||
using StringHashTableWithNullKey = AggregationDataWithNullKey<StringHashMap<Types ...>>;
|
||||
|
||||
using AggregatedDataWithNullableUInt8Key = AggregationDataWithNullKey<AggregatedDataWithUInt8Key>;
|
||||
using AggregatedDataWithNullableUInt16Key = AggregationDataWithNullKey<AggregatedDataWithUInt16Key>;
|
||||
using AggregatedDataWithNullableUInt32Key = AggregationDataWithNullKey<AggregatedDataWithUInt32Key>;
|
||||
|
||||
|
||||
using AggregatedDataWithNullableUInt64Key = AggregationDataWithNullKey<AggregatedDataWithUInt64Key>;
|
||||
using AggregatedDataWithNullableStringKey = AggregationDataWithNullKey<AggregatedDataWithStringKey>;
|
||||
using AggregatedDataWithNullableShortStringKey = AggregationDataWithNullKey<AggregatedDataWithShortStringKey>;
|
||||
|
||||
|
||||
using AggregatedDataWithNullableUInt32KeyTwoLevel = AggregationDataWithNullKeyTwoLevel<
|
||||
TwoLevelHashMap<UInt32, AggregateDataPtr, HashCRC32<UInt32>,
|
||||
TwoLevelHashTableGrower<>, HashTableAllocator, HashTableWithNullKey>>;
|
||||
using AggregatedDataWithNullableUInt64KeyTwoLevel = AggregationDataWithNullKeyTwoLevel<
|
||||
TwoLevelHashMap<UInt64, AggregateDataPtr, HashCRC32<UInt64>,
|
||||
TwoLevelHashTableGrower<>, HashTableAllocator, HashTableWithNullKey>>;
|
||||
|
||||
using AggregatedDataWithNullableShortStringKeyTwoLevel = AggregationDataWithNullKeyTwoLevel<
|
||||
TwoLevelStringHashMap<AggregateDataPtr, HashTableAllocator, StringHashTableWithNullKey>>;
|
||||
|
||||
using AggregatedDataWithNullableStringKeyTwoLevel = AggregationDataWithNullKeyTwoLevel<
|
||||
TwoLevelHashMapWithSavedHash<StringRef, AggregateDataPtr, DefaultHash<StringRef>,
|
||||
TwoLevelHashTableGrower<>, HashTableAllocator, HashTableWithNullKey>>;
|
||||
}
|
256
src/Interpreters/AggregatedDataVariants.cpp
Normal file
256
src/Interpreters/AggregatedDataVariants.cpp
Normal file
@ -0,0 +1,256 @@
|
||||
#include <Interpreters/AggregatedDataVariants.h>
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event AggregationPreallocatedElementsInHashTables;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_AGGREGATED_DATA_VARIANT;
|
||||
extern const int LOGICAL_ERROR;
|
||||
|
||||
}
|
||||
using ColumnsHashing::HashMethodContext;
|
||||
using ColumnsHashing::HashMethodContextPtr;
|
||||
|
||||
AggregatedDataVariants::AggregatedDataVariants() : aggregates_pools(1, std::make_shared<Arena>()), aggregates_pool(aggregates_pools.back().get()) {}
|
||||
|
||||
AggregatedDataVariants::~AggregatedDataVariants()
|
||||
{
|
||||
if (aggregator && !aggregator->all_aggregates_has_trivial_destructor)
|
||||
{
|
||||
try
|
||||
{
|
||||
aggregator->destroyAllAggregateStates(*this);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The std::is_constructible trait isn't suitable here because some classes have template constructors with semantics different from providing size hints.
|
||||
// Also string hash table variants are not supported due to the fact that both local perf tests and tests in CI showed slowdowns for them.
|
||||
template <typename...>
|
||||
struct HasConstructorOfNumberOfElements : std::false_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<HashMapTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename Key, typename Cell, typename Hash, typename Grower, typename Allocator, template <typename...> typename ImplTable>
|
||||
struct HasConstructorOfNumberOfElements<TwoLevelHashMapTable<Key, Cell, Hash, Grower, Allocator, ImplTable>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<HashTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<TwoLevelHashTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <template <typename> typename Method, typename Base>
|
||||
struct HasConstructorOfNumberOfElements<Method<Base>> : HasConstructorOfNumberOfElements<Base>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename Method>
|
||||
auto constructWithReserveIfPossible(size_t size_hint)
|
||||
{
|
||||
if constexpr (HasConstructorOfNumberOfElements<typename Method::Data>::value)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AggregationPreallocatedElementsInHashTables, size_hint);
|
||||
return std::make_unique<Method>(size_hint);
|
||||
}
|
||||
else
|
||||
return std::make_unique<Method>();
|
||||
}
|
||||
|
||||
void AggregatedDataVariants::init(Type type_, std::optional<size_t> size_hint)
|
||||
{
|
||||
switch (type_)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
case Type::without_key:
|
||||
break;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
if (size_hint) \
|
||||
(NAME) = constructWithReserveIfPossible<decltype(NAME)::element_type>(*size_hint); \
|
||||
else \
|
||||
(NAME) = std::make_unique<decltype(NAME)::element_type>(); \
|
||||
break;
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
type = type_;
|
||||
}
|
||||
|
||||
size_t AggregatedDataVariants::size() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
return 0;
|
||||
case Type::without_key:
|
||||
return 1;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
return (NAME)->data.size() + (without_key != nullptr);
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t AggregatedDataVariants::sizeWithoutOverflowRow() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
return 0;
|
||||
case Type::without_key:
|
||||
return 1;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
return (NAME)->data.size();
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
const char * AggregatedDataVariants::getMethodName() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
return "EMPTY";
|
||||
case Type::without_key:
|
||||
return "without_key";
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
return #NAME;
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool AggregatedDataVariants::isTwoLevel() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
return false;
|
||||
case Type::without_key:
|
||||
return false;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
return IS_TWO_LEVEL;
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool AggregatedDataVariants::isConvertibleToTwoLevel() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#define M(NAME) \
|
||||
case Type::NAME: \
|
||||
return true;
|
||||
|
||||
APPLY_FOR_VARIANTS_CONVERTIBLE_TO_TWO_LEVEL(M)
|
||||
|
||||
#undef M
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void AggregatedDataVariants::convertToTwoLevel()
|
||||
{
|
||||
if (aggregator)
|
||||
LOG_TRACE(aggregator->log, "Converting aggregation data to two-level.");
|
||||
|
||||
switch (type)
|
||||
{
|
||||
#define M(NAME) \
|
||||
case Type::NAME: \
|
||||
NAME ## _two_level = std::make_unique<decltype(NAME ## _two_level)::element_type>(*(NAME)); \
|
||||
(NAME).reset(); \
|
||||
type = Type::NAME ## _two_level; \
|
||||
break;
|
||||
|
||||
APPLY_FOR_VARIANTS_CONVERTIBLE_TO_TWO_LEVEL(M)
|
||||
|
||||
#undef M
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong data variant passed.");
|
||||
}
|
||||
}
|
||||
|
||||
bool AggregatedDataVariants::isLowCardinality() const
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#define M(NAME) \
|
||||
case Type::NAME: \
|
||||
return true;
|
||||
|
||||
APPLY_FOR_LOW_CARDINALITY_VARIANTS(M)
|
||||
#undef M
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
HashMethodContextPtr AggregatedDataVariants::createCache(Type type, const HashMethodContext::Settings & settings)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case Type::without_key:
|
||||
return nullptr;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: { \
|
||||
using TPtr##NAME = decltype(AggregatedDataVariants::NAME); \
|
||||
using T##NAME = typename TPtr##NAME ::element_type; \
|
||||
return T##NAME ::State::createContext(settings); \
|
||||
}
|
||||
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT, "Unknown aggregated data variant.");
|
||||
}
|
||||
}
|
||||
}
|
320
src/Interpreters/AggregatedDataVariants.h
Normal file
320
src/Interpreters/AggregatedDataVariants.h
Normal file
@ -0,0 +1,320 @@
|
||||
#pragma once
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <memory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Common/ColumnsHashing.h>
|
||||
#include <Interpreters/AggregatedData.h>
|
||||
#include <Interpreters/AggregationMethod.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Arena;
|
||||
class Aggregator;
|
||||
|
||||
struct AggregatedDataVariants : private boost::noncopyable
|
||||
{
|
||||
/** Working with states of aggregate functions in the pool is arranged in the following (inconvenient) way:
|
||||
* - when aggregating, states are created in the pool using IAggregateFunction::create (inside - `placement new` of arbitrary structure);
|
||||
* - they must then be destroyed using IAggregateFunction::destroy (inside - calling the destructor of arbitrary structure);
|
||||
* - if aggregation is complete, then, in the Aggregator::convertToBlocks function, pointers to the states of aggregate functions
|
||||
* are written to ColumnAggregateFunction; ColumnAggregateFunction "acquires ownership" of them, that is - calls `destroy` in its destructor.
|
||||
* - if during the aggregation, before call to Aggregator::convertToBlocks, an exception was thrown,
|
||||
* then the states of aggregate functions must still be destroyed,
|
||||
* otherwise, for complex states (eg, AggregateFunctionUniq), there will be memory leaks;
|
||||
* - in this case, to destroy states, the destructor calls Aggregator::destroyAggregateStates method,
|
||||
* but only if the variable aggregator (see below) is not nullptr;
|
||||
* - that is, until you transfer ownership of the aggregate function states in the ColumnAggregateFunction, set the variable `aggregator`,
|
||||
* so that when an exception occurs, the states are correctly destroyed.
|
||||
*
|
||||
* PS. This can be corrected by making a pool that knows about which states of aggregate functions and in which order are put in it, and knows how to destroy them.
|
||||
* But this can hardly be done simply because it is planned to put variable-length strings into the same pool.
|
||||
* In this case, the pool will not be able to know with what offsets objects are stored.
|
||||
*/
|
||||
const Aggregator * aggregator = nullptr;
|
||||
|
||||
size_t keys_size{}; /// Number of keys. NOTE do we need this field?
|
||||
Sizes key_sizes; /// Dimensions of keys, if keys of fixed length
|
||||
|
||||
/// Pools for states of aggregate functions. Ownership will be later transferred to ColumnAggregateFunction.
|
||||
using ArenaPtr = std::shared_ptr<Arena>;
|
||||
using Arenas = std::vector<ArenaPtr>;
|
||||
Arenas aggregates_pools;
|
||||
Arena * aggregates_pool{}; /// The pool that is currently used for allocation.
|
||||
|
||||
/** Specialization for the case when there are no keys, and for keys not fitted into max_rows_to_group_by.
|
||||
*/
|
||||
AggregatedDataWithoutKey without_key = nullptr;
|
||||
|
||||
/// Stats of a cache for consecutive keys optimization.
|
||||
/// Stats can be used to disable the cache in case of a lot of misses.
|
||||
ColumnsHashing::LastElementCacheStats consecutive_keys_cache_stats;
|
||||
|
||||
// Disable consecutive key optimization for Uint8/16, because they use a FixedHashMap
|
||||
// and the lookup there is almost free, so we don't need to cache the last lookup result
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt8, AggregatedDataWithUInt8Key, false>> key8;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt16, AggregatedDataWithUInt16Key, false>> key16;
|
||||
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt32, AggregatedDataWithUInt64Key>> key32;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64Key>> key64;
|
||||
std::unique_ptr<AggregationMethodStringNoCache<AggregatedDataWithShortStringKey>> key_string;
|
||||
std::unique_ptr<AggregationMethodFixedStringNoCache<AggregatedDataWithShortStringKey>> key_fixed_string;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithUInt16Key, false, false, false>> keys16;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithUInt32Key>> keys32;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithUInt64Key>> keys64;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128>> keys128;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256>> keys256;
|
||||
std::unique_ptr<AggregationMethodSerialized<AggregatedDataWithStringKey>> serialized;
|
||||
std::unique_ptr<AggregationMethodNullableSerialized<AggregatedDataWithStringKey>> nullable_serialized;
|
||||
std::unique_ptr<AggregationMethodPreallocSerialized<AggregatedDataWithStringKey>> prealloc_serialized;
|
||||
std::unique_ptr<AggregationMethodNullablePreallocSerialized<AggregatedDataWithStringKey>> nullable_prealloc_serialized;
|
||||
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt32, AggregatedDataWithUInt64KeyTwoLevel>> key32_two_level;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64KeyTwoLevel>> key64_two_level;
|
||||
std::unique_ptr<AggregationMethodStringNoCache<AggregatedDataWithShortStringKeyTwoLevel>> key_string_two_level;
|
||||
std::unique_ptr<AggregationMethodFixedStringNoCache<AggregatedDataWithShortStringKeyTwoLevel>> key_fixed_string_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithUInt32KeyTwoLevel>> keys32_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithUInt64KeyTwoLevel>> keys64_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel>> keys128_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel>> keys256_two_level;
|
||||
std::unique_ptr<AggregationMethodSerialized<AggregatedDataWithStringKeyTwoLevel>> serialized_two_level;
|
||||
std::unique_ptr<AggregationMethodNullableSerialized<AggregatedDataWithStringKeyTwoLevel>> nullable_serialized_two_level;
|
||||
std::unique_ptr<AggregationMethodPreallocSerialized<AggregatedDataWithStringKeyTwoLevel>> prealloc_serialized_two_level;
|
||||
std::unique_ptr<AggregationMethodNullablePreallocSerialized<AggregatedDataWithStringKeyTwoLevel>> nullable_prealloc_serialized_two_level;
|
||||
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64KeyHash64>> key64_hash64;
|
||||
std::unique_ptr<AggregationMethodString<AggregatedDataWithStringKeyHash64>> key_string_hash64;
|
||||
std::unique_ptr<AggregationMethodFixedString<AggregatedDataWithStringKeyHash64>> key_fixed_string_hash64;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128Hash64>> keys128_hash64;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256Hash64>> keys256_hash64;
|
||||
std::unique_ptr<AggregationMethodSerialized<AggregatedDataWithStringKeyHash64>> serialized_hash64;
|
||||
std::unique_ptr<AggregationMethodNullableSerialized<AggregatedDataWithStringKeyHash64>> nullable_serialized_hash64;
|
||||
std::unique_ptr<AggregationMethodPreallocSerialized<AggregatedDataWithStringKeyHash64>> prealloc_serialized_hash64;
|
||||
std::unique_ptr<AggregationMethodNullablePreallocSerialized<AggregatedDataWithStringKeyHash64>> nullable_prealloc_serialized_hash64;
|
||||
|
||||
/// Support for nullable keys.
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false, true>> nullable_key8;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false, true>> nullable_key16;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt32Key, true, true>> nullable_key32;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key, true, true>> nullable_key64;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt32KeyTwoLevel, true, true>> nullable_key32_two_level;
|
||||
std::unique_ptr<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64KeyTwoLevel, true, true>> nullable_key64_two_level;
|
||||
|
||||
std::unique_ptr<AggregationMethodStringNoCache<AggregatedDataWithNullableShortStringKey, true>> nullable_key_string;
|
||||
std::unique_ptr<AggregationMethodFixedStringNoCache<AggregatedDataWithNullableShortStringKey, true>> nullable_key_fixed_string;
|
||||
std::unique_ptr<AggregationMethodStringNoCache<AggregatedDataWithNullableShortStringKeyTwoLevel, true>> nullable_key_string_two_level;
|
||||
std::unique_ptr<AggregationMethodFixedStringNoCache<AggregatedDataWithNullableShortStringKeyTwoLevel, true>> nullable_key_fixed_string_two_level;
|
||||
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128, true>> nullable_keys128;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256, true>> nullable_keys256;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel, true>> nullable_keys128_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel, true>> nullable_keys256_two_level;
|
||||
|
||||
/// Support for low cardinality.
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false>>> low_cardinality_key8;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false>>> low_cardinality_key16;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64Key>>> low_cardinality_key32;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key>>> low_cardinality_key64;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodString<AggregatedDataWithNullableStringKey>>> low_cardinality_key_string;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodFixedString<AggregatedDataWithNullableStringKey>>> low_cardinality_key_fixed_string;
|
||||
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64KeyTwoLevel>>> low_cardinality_key32_two_level;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64KeyTwoLevel>>> low_cardinality_key64_two_level;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodString<AggregatedDataWithNullableStringKeyTwoLevel>>> low_cardinality_key_string_two_level;
|
||||
std::unique_ptr<AggregationMethodSingleLowCardinalityColumn<AggregationMethodFixedString<AggregatedDataWithNullableStringKeyTwoLevel>>> low_cardinality_key_fixed_string_two_level;
|
||||
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128, false, true>> low_cardinality_keys128;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256, false, true>> low_cardinality_keys256;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel, false, true>> low_cardinality_keys128_two_level;
|
||||
std::unique_ptr<AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel, false, true>> low_cardinality_keys256_two_level;
|
||||
|
||||
/// In this and similar macros, the option without_key is not considered.
|
||||
#define APPLY_FOR_AGGREGATED_VARIANTS(M) \
|
||||
M(key8, false) \
|
||||
M(key16, false) \
|
||||
M(key32, false) \
|
||||
M(key64, false) \
|
||||
M(key_string, false) \
|
||||
M(key_fixed_string, false) \
|
||||
M(keys16, false) \
|
||||
M(keys32, false) \
|
||||
M(keys64, false) \
|
||||
M(keys128, false) \
|
||||
M(keys256, false) \
|
||||
M(serialized, false) \
|
||||
M(nullable_serialized, false) \
|
||||
M(prealloc_serialized, false) \
|
||||
M(nullable_prealloc_serialized, false) \
|
||||
M(key32_two_level, true) \
|
||||
M(key64_two_level, true) \
|
||||
M(key_string_two_level, true) \
|
||||
M(key_fixed_string_two_level, true) \
|
||||
M(keys32_two_level, true) \
|
||||
M(keys64_two_level, true) \
|
||||
M(keys128_two_level, true) \
|
||||
M(keys256_two_level, true) \
|
||||
M(serialized_two_level, true) \
|
||||
M(nullable_serialized_two_level, true) \
|
||||
M(prealloc_serialized_two_level, true) \
|
||||
M(nullable_prealloc_serialized_two_level, true) \
|
||||
M(key64_hash64, false) \
|
||||
M(key_string_hash64, false) \
|
||||
M(key_fixed_string_hash64, false) \
|
||||
M(keys128_hash64, false) \
|
||||
M(keys256_hash64, false) \
|
||||
M(serialized_hash64, false) \
|
||||
M(nullable_serialized_hash64, false) \
|
||||
M(prealloc_serialized_hash64, false) \
|
||||
M(nullable_prealloc_serialized_hash64, false) \
|
||||
M(nullable_key8, false) \
|
||||
M(nullable_key16, false) \
|
||||
M(nullable_key32, false) \
|
||||
M(nullable_key64, false) \
|
||||
M(nullable_key32_two_level, true) \
|
||||
M(nullable_key64_two_level, true) \
|
||||
M(nullable_key_string, false) \
|
||||
M(nullable_key_fixed_string, false) \
|
||||
M(nullable_key_string_two_level, true) \
|
||||
M(nullable_key_fixed_string_two_level, true) \
|
||||
M(nullable_keys128, false) \
|
||||
M(nullable_keys256, false) \
|
||||
M(nullable_keys128_two_level, true) \
|
||||
M(nullable_keys256_two_level, true) \
|
||||
M(low_cardinality_key8, false) \
|
||||
M(low_cardinality_key16, false) \
|
||||
M(low_cardinality_key32, false) \
|
||||
M(low_cardinality_key64, false) \
|
||||
M(low_cardinality_keys128, false) \
|
||||
M(low_cardinality_keys256, false) \
|
||||
M(low_cardinality_key_string, false) \
|
||||
M(low_cardinality_key_fixed_string, false) \
|
||||
M(low_cardinality_key32_two_level, true) \
|
||||
M(low_cardinality_key64_two_level, true) \
|
||||
M(low_cardinality_keys128_two_level, true) \
|
||||
M(low_cardinality_keys256_two_level, true) \
|
||||
M(low_cardinality_key_string_two_level, true) \
|
||||
M(low_cardinality_key_fixed_string_two_level, true) \
|
||||
|
||||
#define APPLY_FOR_VARIANTS_CONVERTIBLE_TO_TWO_LEVEL(M) \
|
||||
M(key32) \
|
||||
M(key64) \
|
||||
M(key_string) \
|
||||
M(key_fixed_string) \
|
||||
M(keys32) \
|
||||
M(keys64) \
|
||||
M(keys128) \
|
||||
M(keys256) \
|
||||
M(serialized) \
|
||||
M(nullable_serialized) \
|
||||
M(prealloc_serialized) \
|
||||
M(nullable_prealloc_serialized) \
|
||||
M(nullable_key32) \
|
||||
M(nullable_key64) \
|
||||
M(nullable_key_string) \
|
||||
M(nullable_key_fixed_string) \
|
||||
M(nullable_keys128) \
|
||||
M(nullable_keys256) \
|
||||
M(low_cardinality_key32) \
|
||||
M(low_cardinality_key64) \
|
||||
M(low_cardinality_keys128) \
|
||||
M(low_cardinality_keys256) \
|
||||
M(low_cardinality_key_string) \
|
||||
M(low_cardinality_key_fixed_string) \
|
||||
|
||||
/// NOLINTNEXTLINE
|
||||
#define APPLY_FOR_VARIANTS_NOT_CONVERTIBLE_TO_TWO_LEVEL(M) \
|
||||
M(key8) \
|
||||
M(key16) \
|
||||
M(nullable_key8) \
|
||||
M(nullable_key16) \
|
||||
M(keys16) \
|
||||
M(key64_hash64) \
|
||||
M(key_string_hash64)\
|
||||
M(key_fixed_string_hash64) \
|
||||
M(keys128_hash64) \
|
||||
M(keys256_hash64) \
|
||||
M(serialized_hash64) \
|
||||
M(nullable_serialized_hash64) \
|
||||
M(prealloc_serialized_hash64) \
|
||||
M(nullable_prealloc_serialized_hash64) \
|
||||
M(low_cardinality_key8) \
|
||||
M(low_cardinality_key16) \
|
||||
|
||||
/// NOLINTNEXTLINE
|
||||
#define APPLY_FOR_VARIANTS_SINGLE_LEVEL(M) \
|
||||
APPLY_FOR_VARIANTS_NOT_CONVERTIBLE_TO_TWO_LEVEL(M) \
|
||||
APPLY_FOR_VARIANTS_CONVERTIBLE_TO_TWO_LEVEL(M) \
|
||||
|
||||
/// NOLINTNEXTLINE
|
||||
#define APPLY_FOR_VARIANTS_TWO_LEVEL(M) \
|
||||
M(key32_two_level) \
|
||||
M(key64_two_level) \
|
||||
M(key_string_two_level) \
|
||||
M(key_fixed_string_two_level) \
|
||||
M(keys32_two_level) \
|
||||
M(keys64_two_level) \
|
||||
M(keys128_two_level) \
|
||||
M(keys256_two_level) \
|
||||
M(serialized_two_level) \
|
||||
M(nullable_serialized_two_level) \
|
||||
M(prealloc_serialized_two_level) \
|
||||
M(nullable_prealloc_serialized_two_level) \
|
||||
M(nullable_key32_two_level) \
|
||||
M(nullable_key64_two_level) \
|
||||
M(nullable_key_string_two_level) \
|
||||
M(nullable_key_fixed_string_two_level) \
|
||||
M(nullable_keys128_two_level) \
|
||||
M(nullable_keys256_two_level) \
|
||||
M(low_cardinality_key32_two_level) \
|
||||
M(low_cardinality_key64_two_level) \
|
||||
M(low_cardinality_keys128_two_level) \
|
||||
M(low_cardinality_keys256_two_level) \
|
||||
M(low_cardinality_key_string_two_level) \
|
||||
M(low_cardinality_key_fixed_string_two_level) \
|
||||
|
||||
#define APPLY_FOR_LOW_CARDINALITY_VARIANTS(M) \
|
||||
M(low_cardinality_key8) \
|
||||
M(low_cardinality_key16) \
|
||||
M(low_cardinality_key32) \
|
||||
M(low_cardinality_key64) \
|
||||
M(low_cardinality_keys128) \
|
||||
M(low_cardinality_keys256) \
|
||||
M(low_cardinality_key_string) \
|
||||
M(low_cardinality_key_fixed_string) \
|
||||
M(low_cardinality_key32_two_level) \
|
||||
M(low_cardinality_key64_two_level) \
|
||||
M(low_cardinality_keys128_two_level) \
|
||||
M(low_cardinality_keys256_two_level) \
|
||||
M(low_cardinality_key_string_two_level) \
|
||||
M(low_cardinality_key_fixed_string_two_level)
|
||||
|
||||
enum class Type
|
||||
{
|
||||
EMPTY = 0,
|
||||
without_key,
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) NAME,
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
};
|
||||
Type type = Type::EMPTY;
|
||||
AggregatedDataVariants();
|
||||
~AggregatedDataVariants();
|
||||
bool empty() const { return type == Type::EMPTY; }
|
||||
void invalidate() { type = Type::EMPTY; }
|
||||
void init(Type type_, std::optional<size_t> size_hint = std::nullopt);
|
||||
/// Number of rows (different keys).
|
||||
size_t size() const;
|
||||
size_t sizeWithoutOverflowRow() const;
|
||||
const char * getMethodName() const;
|
||||
bool isTwoLevel() const;
|
||||
bool isConvertibleToTwoLevel() const;
|
||||
void convertToTwoLevel();
|
||||
bool isLowCardinality() const;
|
||||
static ColumnsHashing::HashMethodContextPtr createCache(Type type, const ColumnsHashing::HashMethodContext::Settings & settings);
|
||||
|
||||
};
|
||||
|
||||
using AggregatedDataVariantsPtr = std::shared_ptr<AggregatedDataVariants>;
|
||||
using ManyAggregatedDataVariants = std::vector<AggregatedDataVariantsPtr>;
|
||||
using ManyAggregatedDataVariantsPtr = std::shared_ptr<ManyAggregatedDataVariants>;
|
||||
}
|
215
src/Interpreters/AggregationMethod.cpp
Normal file
215
src/Interpreters/AggregationMethod.cpp
Normal file
@ -0,0 +1,215 @@
|
||||
#include <Interpreters/AggregationMethod.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
template <typename FieldType, typename TData, bool consecutive_keys_optimization, bool nullable>
|
||||
void AggregationMethodOneNumber<FieldType, TData, consecutive_keys_optimization, nullable>::insertKeyIntoColumns(
|
||||
const AggregationMethodOneNumber::Key & key, std::vector<IColumn *> & key_columns, const Sizes & /*key_sizes*/)
|
||||
{
|
||||
ColumnFixedSizeHelper * column;
|
||||
if constexpr (nullable)
|
||||
{
|
||||
ColumnNullable & nullable_col = assert_cast<ColumnNullable &>(*key_columns[0]);
|
||||
ColumnUInt8 * null_map = assert_cast<ColumnUInt8 *>(&nullable_col.getNullMapColumn());
|
||||
null_map->insertDefault();
|
||||
column = static_cast<ColumnFixedSizeHelper *>(&nullable_col.getNestedColumn());
|
||||
}
|
||||
else
|
||||
{
|
||||
column = static_cast<ColumnFixedSizeHelper *>(key_columns[0]);
|
||||
}
|
||||
static_assert(sizeof(FieldType) <= sizeof(Key));
|
||||
const auto * key_holder = reinterpret_cast<const char *>(&key);
|
||||
if constexpr (sizeof(FieldType) < sizeof(Key) && std::endian::native == std::endian::big)
|
||||
column->insertRawData<sizeof(FieldType)>(key_holder + (sizeof(Key) - sizeof(FieldType)));
|
||||
else
|
||||
column->insertRawData<sizeof(FieldType)>(key_holder);
|
||||
}
|
||||
|
||||
template struct AggregationMethodOneNumber<UInt8, AggregatedDataWithUInt8Key, false>;
|
||||
template struct AggregationMethodOneNumber<UInt16, AggregatedDataWithUInt16Key, false>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithUInt64Key>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64Key>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithUInt64KeyTwoLevel>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64KeyTwoLevel>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithUInt64KeyHash64>;
|
||||
template struct AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false, true>;
|
||||
template struct AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false, true>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt32Key, true, true>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key, true, true>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt32KeyTwoLevel, true, true>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64KeyTwoLevel, true, true>;
|
||||
template struct AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false>;
|
||||
template struct AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64Key>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key>;
|
||||
template struct AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64KeyTwoLevel>;
|
||||
template struct AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64KeyTwoLevel>;
|
||||
|
||||
template <typename TData, bool nullable>
|
||||
void AggregationMethodStringNoCache<TData, nullable>::insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &)
|
||||
{
|
||||
if constexpr (nullable)
|
||||
{
|
||||
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*key_columns[0]);
|
||||
assert_cast<ColumnString &>(column_nullable.getNestedColumn()).insertData(key.data, key.size);
|
||||
column_nullable.getNullMapData().push_back(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert_cast<ColumnString &>(*key_columns[0]).insertData(key.data, key.size);
|
||||
}
|
||||
}
|
||||
template struct AggregationMethodStringNoCache<AggregatedDataWithShortStringKey>;
|
||||
template struct AggregationMethodStringNoCache<AggregatedDataWithShortStringKeyTwoLevel>;
|
||||
template struct AggregationMethodStringNoCache<AggregatedDataWithNullableShortStringKey, true>;
|
||||
template struct AggregationMethodStringNoCache<AggregatedDataWithNullableShortStringKeyTwoLevel, true>;
|
||||
|
||||
template <typename TData>
|
||||
void AggregationMethodFixedString<TData>::insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &)
|
||||
{
|
||||
assert_cast<ColumnFixedString &>(*key_columns[0]).insertData(key.data, key.size);
|
||||
}
|
||||
template struct AggregationMethodFixedString<AggregatedDataWithStringKeyHash64>;
|
||||
template struct AggregationMethodFixedString<AggregatedDataWithNullableStringKey>;
|
||||
template struct AggregationMethodFixedString<AggregatedDataWithNullableStringKeyTwoLevel>;
|
||||
|
||||
|
||||
template <typename TData, bool nullable>
|
||||
void AggregationMethodFixedStringNoCache<TData, nullable>::insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &)
|
||||
{
|
||||
if constexpr (nullable)
|
||||
assert_cast<ColumnNullable &>(*key_columns[0]).insertData(key.data, key.size);
|
||||
else
|
||||
assert_cast<ColumnFixedString &>(*key_columns[0]).insertData(key.data, key.size);
|
||||
}
|
||||
template struct AggregationMethodFixedStringNoCache<AggregatedDataWithShortStringKey>;
|
||||
template struct AggregationMethodFixedStringNoCache<AggregatedDataWithShortStringKeyTwoLevel>;
|
||||
template struct AggregationMethodFixedStringNoCache<AggregatedDataWithNullableShortStringKey, true>;
|
||||
template struct AggregationMethodFixedStringNoCache<AggregatedDataWithNullableShortStringKeyTwoLevel, true>;
|
||||
|
||||
|
||||
template <typename SingleColumnMethod>
|
||||
void AggregationMethodSingleLowCardinalityColumn<SingleColumnMethod>::insertKeyIntoColumns(
|
||||
const Key & key, std::vector<IColumn *> & key_columns_low_cardinality, const Sizes & /*key_sizes*/)
|
||||
{
|
||||
auto * col = assert_cast<ColumnLowCardinality *>(key_columns_low_cardinality[0]);
|
||||
|
||||
if constexpr (std::is_same_v<Key, StringRef>)
|
||||
col->insertData(key.data, key.size);
|
||||
else
|
||||
col->insertData(reinterpret_cast<const char *>(&key), sizeof(key));
|
||||
}
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt8, AggregatedDataWithNullableUInt8Key, false>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt16, AggregatedDataWithNullableUInt16Key, false>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64Key>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64Key>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodString<AggregatedDataWithNullableStringKey>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodFixedString<AggregatedDataWithNullableStringKey>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt32, AggregatedDataWithNullableUInt64KeyTwoLevel>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodOneNumber<UInt64, AggregatedDataWithNullableUInt64KeyTwoLevel>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodString<AggregatedDataWithNullableStringKeyTwoLevel>>;
|
||||
template struct AggregationMethodSingleLowCardinalityColumn<AggregationMethodFixedString<AggregatedDataWithNullableStringKeyTwoLevel>>;
|
||||
|
||||
|
||||
template <typename TData, bool has_nullable_keys, bool has_low_cardinality, bool consecutive_keys_optimization>
|
||||
void AggregationMethodKeysFixed<TData, has_nullable_keys, has_low_cardinality,consecutive_keys_optimization>::insertKeyIntoColumns(const Key & key, std::vector<IColumn *> & key_columns, const Sizes & key_sizes)
|
||||
{
|
||||
size_t keys_size = key_columns.size();
|
||||
|
||||
static constexpr auto bitmap_size = has_nullable_keys ? std::tuple_size<KeysNullMap<Key>>::value : 0;
|
||||
/// In any hash key value, column values to be read start just after the bitmap, if it exists.
|
||||
size_t pos = bitmap_size;
|
||||
|
||||
for (size_t i = 0; i < keys_size; ++i)
|
||||
{
|
||||
IColumn * observed_column;
|
||||
ColumnUInt8 * null_map;
|
||||
|
||||
bool column_nullable = false;
|
||||
if constexpr (has_nullable_keys)
|
||||
column_nullable = isColumnNullable(*key_columns[i]);
|
||||
|
||||
/// If we have a nullable column, get its nested column and its null map.
|
||||
if (column_nullable)
|
||||
{
|
||||
ColumnNullable & nullable_col = assert_cast<ColumnNullable &>(*key_columns[i]);
|
||||
observed_column = &nullable_col.getNestedColumn();
|
||||
null_map = assert_cast<ColumnUInt8 *>(&nullable_col.getNullMapColumn());
|
||||
}
|
||||
else
|
||||
{
|
||||
observed_column = key_columns[i];
|
||||
null_map = nullptr;
|
||||
}
|
||||
|
||||
bool is_null = false;
|
||||
if (column_nullable)
|
||||
{
|
||||
/// The current column is nullable. Check if the value of the
|
||||
/// corresponding key is nullable. Update the null map accordingly.
|
||||
size_t bucket = i / 8;
|
||||
size_t offset = i % 8;
|
||||
UInt8 val = (reinterpret_cast<const UInt8 *>(&key)[bucket] >> offset) & 1;
|
||||
null_map->insertValue(val);
|
||||
is_null = val == 1;
|
||||
}
|
||||
|
||||
if (has_nullable_keys && is_null)
|
||||
observed_column->insertDefault();
|
||||
else
|
||||
{
|
||||
size_t size = key_sizes[i];
|
||||
size_t offset_to = pos;
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
offset_to = sizeof(Key) - size - pos;
|
||||
observed_column->insertData(reinterpret_cast<const char *>(&key) + offset_to, size);
|
||||
pos += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithUInt16Key, false, false, false>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithUInt32Key>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithUInt64Key>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithUInt32KeyTwoLevel>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithUInt64KeyTwoLevel>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128Hash64>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256Hash64>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128, false, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256, false, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys128TwoLevel, false, true>;
|
||||
template struct AggregationMethodKeysFixed<AggregatedDataWithKeys256TwoLevel, false, true>;
|
||||
|
||||
|
||||
template <typename TData, bool nullable, bool prealloc>
|
||||
void AggregationMethodSerialized<TData, nullable, prealloc>::insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &)
|
||||
{
|
||||
const auto * pos = key.data;
|
||||
for (auto & column : key_columns)
|
||||
pos = column->deserializeAndInsertFromArena(pos);
|
||||
}
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKey>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyTwoLevel>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyHash64>;
|
||||
// AggregationMethodNullableSerialized
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKey, true, false>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyTwoLevel, true, false>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyHash64, true, false>;
|
||||
// AggregationMethodPreallocSerialized
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKey, false, true>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyTwoLevel, false, true>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyHash64, false, true>;
|
||||
// AggregationMethodNullablePreallocSerialized
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKey, true, true>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyTwoLevel, true, true>;
|
||||
template struct AggregationMethodSerialized<AggregatedDataWithStringKeyHash64, true, true>;
|
||||
|
||||
}
|
320
src/Interpreters/AggregationMethod.h
Normal file
320
src/Interpreters/AggregationMethod.h
Normal file
@ -0,0 +1,320 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include <Common/ColumnsHashing.h>
|
||||
#include <Interpreters/AggregationCommon.h>
|
||||
#include <Interpreters/AggregatedData.h>
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class IColumn;
|
||||
/// For the case where there is one numeric key.
|
||||
/// FieldType is UInt8/16/32/64 for any type with corresponding bit width.
|
||||
template <typename FieldType, typename TData,
|
||||
bool consecutive_keys_optimization = true, bool nullable = false>
|
||||
struct AggregationMethodOneNumber
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodOneNumber() = default;
|
||||
|
||||
explicit AggregationMethodOneNumber(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodOneNumber(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
/// To use one `Method` in different threads, use different `State`.
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodOneNumber<
|
||||
typename Data::value_type,
|
||||
Mapped,
|
||||
FieldType,
|
||||
use_cache && consecutive_keys_optimization,
|
||||
/*need_offset=*/ false,
|
||||
nullable>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
/// Use optimization for low cardinality.
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = nullable;
|
||||
|
||||
/// Shuffle key columns before `insertKeyIntoColumns` call if needed.
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
// Insert the key from the hash table into columns.
|
||||
static void insertKeyIntoColumns(const Key & key, std::vector<IColumn *> & key_columns, const Sizes & /*key_sizes*/);
|
||||
};
|
||||
|
||||
/// For the case where there is one string key.
|
||||
template <typename TData>
|
||||
struct AggregationMethodString
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodString() = default;
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodString(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
explicit AggregationMethodString(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodString<typename Data::value_type, Mapped, /*place_string_to_arena=*/ true, use_cache>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = false;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &)
|
||||
{
|
||||
static_cast<ColumnString *>(key_columns[0])->insertData(key.data, key.size);
|
||||
}
|
||||
};
|
||||
|
||||
/// Same as above but without cache
|
||||
template <typename TData, bool nullable = false>
|
||||
struct AggregationMethodStringNoCache
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodStringNoCache() = default;
|
||||
|
||||
explicit AggregationMethodStringNoCache(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodStringNoCache(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodString<typename Data::value_type, Mapped, true, false, false, nullable>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = nullable;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &);
|
||||
};
|
||||
|
||||
/// For the case where there is one fixed-length string key.
|
||||
template <typename TData>
|
||||
struct AggregationMethodFixedString
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodFixedString() = default;
|
||||
|
||||
explicit AggregationMethodFixedString(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodFixedString(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodFixedString<typename Data::value_type, Mapped, /*place_string_to_arena=*/ true, use_cache>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = false;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &);
|
||||
};
|
||||
|
||||
/// Same as above but without cache
|
||||
template <typename TData, bool nullable = false>
|
||||
struct AggregationMethodFixedStringNoCache
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodFixedStringNoCache() = default;
|
||||
|
||||
explicit AggregationMethodFixedStringNoCache(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodFixedStringNoCache(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodFixedString<typename Data::value_type, Mapped, true, false, false, nullable>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = nullable;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &);
|
||||
};
|
||||
|
||||
/// Single low cardinality column.
|
||||
template <typename SingleColumnMethod>
|
||||
struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
{
|
||||
using Base = SingleColumnMethod;
|
||||
using Data = typename Base::Data;
|
||||
using Key = typename Base::Key;
|
||||
using Mapped = typename Base::Mapped;
|
||||
using Base::data;
|
||||
|
||||
template <bool use_cache>
|
||||
using BaseStateImpl = typename Base::template StateImpl<use_cache>;
|
||||
|
||||
AggregationMethodSingleLowCardinalityColumn() = default;
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodSingleLowCardinalityColumn(const Other & other) : Base(other) {}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodSingleLowCardinalityColumn<BaseStateImpl<use_cache>, Mapped, use_cache>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = true;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(const Key & key,
|
||||
std::vector<IColumn *> & key_columns_low_cardinality, const Sizes & /*key_sizes*/);
|
||||
};
|
||||
|
||||
/// For the case where all keys are of fixed length, and they fit in N (for example, 128) bits.
|
||||
template <typename TData, bool has_nullable_keys_ = false, bool has_low_cardinality_ = false, bool consecutive_keys_optimization = false>
|
||||
struct AggregationMethodKeysFixed
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
static constexpr bool has_nullable_keys = has_nullable_keys_;
|
||||
static constexpr bool has_low_cardinality = has_low_cardinality_;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodKeysFixed() = default;
|
||||
|
||||
explicit AggregationMethodKeysFixed(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodKeysFixed(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodKeysFixed<
|
||||
typename Data::value_type,
|
||||
Key,
|
||||
Mapped,
|
||||
has_nullable_keys,
|
||||
has_low_cardinality,
|
||||
use_cache && consecutive_keys_optimization>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = false;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> & key_columns, const Sizes & key_sizes)
|
||||
{
|
||||
return State::shuffleKeyColumns(key_columns, key_sizes);
|
||||
}
|
||||
|
||||
static void insertKeyIntoColumns(const Key & key, std::vector<IColumn *> & key_columns, const Sizes & key_sizes);
|
||||
};
|
||||
|
||||
/** Aggregates by concatenating serialized key values.
|
||||
* The serialized value differs in that it uniquely allows to deserialize it, having only the position with which it starts.
|
||||
* That is, for example, for strings, it contains first the serialized length of the string, and then the bytes.
|
||||
* Therefore, when aggregating by several strings, there is no ambiguity.
|
||||
*/
|
||||
template <typename TData, bool nullable = false, bool prealloc = false>
|
||||
struct AggregationMethodSerialized
|
||||
{
|
||||
using Data = TData;
|
||||
using Key = typename Data::key_type;
|
||||
using Mapped = typename Data::mapped_type;
|
||||
|
||||
Data data;
|
||||
|
||||
AggregationMethodSerialized() = default;
|
||||
|
||||
explicit AggregationMethodSerialized(size_t size_hint) : data(size_hint) { }
|
||||
|
||||
template <typename Other>
|
||||
explicit AggregationMethodSerialized(const Other & other) : data(other.data)
|
||||
{
|
||||
}
|
||||
|
||||
template <bool use_cache>
|
||||
using StateImpl = ColumnsHashing::HashMethodSerialized<typename Data::value_type, Mapped, nullable, prealloc>;
|
||||
|
||||
using State = StateImpl<true>;
|
||||
using StateNoCache = StateImpl<false>;
|
||||
|
||||
static const bool low_cardinality_optimization = false;
|
||||
static const bool one_key_nullable_optimization = false;
|
||||
|
||||
std::optional<Sizes> shuffleKeyColumns(std::vector<IColumn *> &, const Sizes &) { return {}; }
|
||||
|
||||
static void insertKeyIntoColumns(StringRef key, std::vector<IColumn *> & key_columns, const Sizes &);
|
||||
};
|
||||
|
||||
template <typename TData>
|
||||
using AggregationMethodNullableSerialized = AggregationMethodSerialized<TData, true>;
|
||||
|
||||
template <typename TData>
|
||||
using AggregationMethodPreallocSerialized = AggregationMethodSerialized<TData, false, true>;
|
||||
|
||||
template <typename TData>
|
||||
using AggregationMethodNullablePreallocSerialized = AggregationMethodSerialized<TData, true, true>;
|
||||
|
||||
|
||||
}
|
@ -48,7 +48,6 @@ namespace ProfileEvents
|
||||
extern const Event ExternalAggregationUncompressedBytes;
|
||||
extern const Event ExternalProcessingCompressedBytesTotal;
|
||||
extern const Event ExternalProcessingUncompressedBytesTotal;
|
||||
extern const Event AggregationPreallocatedElementsInHashTables;
|
||||
extern const Event AggregationHashTablesInitializedAsTwoLevel;
|
||||
extern const Event OverflowThrow;
|
||||
extern const Event OverflowBreak;
|
||||
@ -270,50 +269,6 @@ void updateStatistics(const DB::ManyAggregatedDataVariants & data_variants, cons
|
||||
getHashTablesStatistics().update(sum_of_sizes, *median_size, params);
|
||||
}
|
||||
|
||||
// The std::is_constructible trait isn't suitable here because some classes have template constructors with semantics different from providing size hints.
|
||||
// Also string hash table variants are not supported due to the fact that both local perf tests and tests in CI showed slowdowns for them.
|
||||
template <typename...>
|
||||
struct HasConstructorOfNumberOfElements : std::false_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<HashMapTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename Key, typename Cell, typename Hash, typename Grower, typename Allocator, template <typename...> typename ImplTable>
|
||||
struct HasConstructorOfNumberOfElements<TwoLevelHashMapTable<Key, Cell, Hash, Grower, Allocator, ImplTable>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<HashTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
struct HasConstructorOfNumberOfElements<TwoLevelHashTable<Ts...>> : std::true_type
|
||||
{
|
||||
};
|
||||
|
||||
template <template <typename> typename Method, typename Base>
|
||||
struct HasConstructorOfNumberOfElements<Method<Base>> : HasConstructorOfNumberOfElements<Base>
|
||||
{
|
||||
};
|
||||
|
||||
template <typename Method>
|
||||
auto constructWithReserveIfPossible(size_t size_hint)
|
||||
{
|
||||
if constexpr (HasConstructorOfNumberOfElements<typename Method::Data>::value)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AggregationPreallocatedElementsInHashTables, size_hint);
|
||||
return std::make_unique<Method>(size_hint);
|
||||
}
|
||||
else
|
||||
return std::make_unique<Method>();
|
||||
}
|
||||
|
||||
DB::ColumnNumbers calculateKeysPositions(const DB::Block & header, const DB::Aggregator::Params & params)
|
||||
{
|
||||
DB::ColumnNumbers keys_positions(params.keys_size);
|
||||
@ -346,71 +301,11 @@ size_t getMinBytesForPrefetch()
|
||||
namespace DB
|
||||
{
|
||||
|
||||
AggregatedDataVariants::~AggregatedDataVariants()
|
||||
{
|
||||
if (aggregator && !aggregator->all_aggregates_has_trivial_destructor)
|
||||
{
|
||||
try
|
||||
{
|
||||
aggregator->destroyAllAggregateStates(*this);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<HashTablesCacheStatistics> getHashTablesCacheStatistics()
|
||||
{
|
||||
return getHashTablesStatistics().getCacheStats();
|
||||
}
|
||||
|
||||
void AggregatedDataVariants::convertToTwoLevel()
|
||||
{
|
||||
if (aggregator)
|
||||
LOG_TRACE(aggregator->log, "Converting aggregation data to two-level.");
|
||||
|
||||
switch (type)
|
||||
{
|
||||
#define M(NAME) \
|
||||
case Type::NAME: \
|
||||
NAME ## _two_level = std::make_unique<decltype(NAME ## _two_level)::element_type>(*(NAME)); \
|
||||
(NAME).reset(); \
|
||||
type = Type::NAME ## _two_level; \
|
||||
break;
|
||||
|
||||
APPLY_FOR_VARIANTS_CONVERTIBLE_TO_TWO_LEVEL(M)
|
||||
|
||||
#undef M
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong data variant passed.");
|
||||
}
|
||||
}
|
||||
|
||||
void AggregatedDataVariants::init(Type type_, std::optional<size_t> size_hint)
|
||||
{
|
||||
switch (type_)
|
||||
{
|
||||
case Type::EMPTY:
|
||||
case Type::without_key:
|
||||
break;
|
||||
|
||||
#define M(NAME, IS_TWO_LEVEL) \
|
||||
case Type::NAME: \
|
||||
if (size_hint) \
|
||||
(NAME) = constructWithReserveIfPossible<decltype(NAME)::element_type>(*size_hint); \
|
||||
else \
|
||||
(NAME) = std::make_unique<decltype(NAME)::element_type>(); \
|
||||
break;
|
||||
APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
#undef M
|
||||
}
|
||||
|
||||
type = type_;
|
||||
}
|
||||
|
||||
Aggregator::Params::StatsCollectingParams::StatsCollectingParams() = default;
|
||||
|
||||
Aggregator::Params::StatsCollectingParams::StatsCollectingParams(
|
||||
@ -1112,7 +1007,6 @@ void NO_INLINE Aggregator::executeImpl(
|
||||
bool all_keys_are_const,
|
||||
AggregateDataPtr overflow_row) const
|
||||
{
|
||||
bool use_compiled_functions = false;
|
||||
if (!no_more_keys)
|
||||
{
|
||||
/// Prefetching doesn't make sense for small hash tables, because they fit in caches entirely.
|
||||
@ -1120,47 +1014,33 @@ void NO_INLINE Aggregator::executeImpl(
|
||||
&& (method.data.getBufferSizeInBytes() > min_bytes_for_prefetch);
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
use_compiled_functions = compiled_aggregate_functions_holder && !hasSparseArguments(aggregate_instructions);
|
||||
#endif
|
||||
if (prefetch)
|
||||
executeImplBatch<false, true>(
|
||||
method,
|
||||
state,
|
||||
aggregates_pool,
|
||||
row_begin,
|
||||
row_end,
|
||||
aggregate_instructions,
|
||||
all_keys_are_const,
|
||||
use_compiled_functions,
|
||||
overflow_row);
|
||||
if (compiled_aggregate_functions_holder && !hasSparseArguments(aggregate_instructions))
|
||||
{
|
||||
if (prefetch)
|
||||
executeImplBatch<true>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, false, all_keys_are_const, true, overflow_row);
|
||||
else
|
||||
executeImplBatch<false>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, false, all_keys_are_const, true, overflow_row);
|
||||
}
|
||||
else
|
||||
executeImplBatch<false, false>(
|
||||
method,
|
||||
state,
|
||||
aggregates_pool,
|
||||
row_begin,
|
||||
row_end,
|
||||
aggregate_instructions,
|
||||
all_keys_are_const,
|
||||
use_compiled_functions,
|
||||
overflow_row);
|
||||
#endif
|
||||
{
|
||||
if (prefetch)
|
||||
executeImplBatch<true>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, false, all_keys_are_const, false, overflow_row);
|
||||
else
|
||||
executeImplBatch<false>(
|
||||
method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, false, all_keys_are_const, false, overflow_row);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
executeImplBatch<true, false>(
|
||||
method,
|
||||
state,
|
||||
aggregates_pool,
|
||||
row_begin,
|
||||
row_end,
|
||||
aggregate_instructions,
|
||||
all_keys_are_const,
|
||||
use_compiled_functions,
|
||||
overflow_row);
|
||||
executeImplBatch<false>(method, state, aggregates_pool, row_begin, row_end, aggregate_instructions, true, all_keys_are_const, false, overflow_row);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool no_more_keys, bool prefetch, typename Method, typename State>
|
||||
template <bool prefetch, typename Method, typename State>
|
||||
void NO_INLINE Aggregator::executeImplBatch(
|
||||
Method & method,
|
||||
State & state,
|
||||
@ -1168,6 +1048,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
bool no_more_keys,
|
||||
bool all_keys_are_const,
|
||||
bool use_compiled_functions [[maybe_unused]],
|
||||
AggregateDataPtr overflow_row) const
|
||||
@ -1181,7 +1062,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
/// Optimization for special case when there are no aggregate functions.
|
||||
if (params.aggregates_size == 0)
|
||||
{
|
||||
if constexpr (no_more_keys)
|
||||
if (no_more_keys)
|
||||
return;
|
||||
|
||||
/// This pointer is unused, but the logic will compare it for nullptr to check if the cell is set.
|
||||
@ -1214,39 +1095,42 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
}
|
||||
|
||||
/// Optimization for special case when aggregating by 8bit key.
|
||||
if constexpr (!no_more_keys && std::is_same_v<Method, typename decltype(AggregatedDataVariants::key8)::element_type>)
|
||||
if (!no_more_keys)
|
||||
{
|
||||
/// We use another method if there are aggregate functions with -Array combinator.
|
||||
bool has_arrays = false;
|
||||
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
|
||||
{
|
||||
if (inst->offsets)
|
||||
{
|
||||
has_arrays = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_arrays && !hasSparseArguments(aggregate_instructions) && !all_keys_are_const)
|
||||
if constexpr (std::is_same_v<Method, typename decltype(AggregatedDataVariants::key8)::element_type>)
|
||||
{
|
||||
/// We use another method if there are aggregate functions with -Array combinator.
|
||||
bool has_arrays = false;
|
||||
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
|
||||
{
|
||||
inst->batch_that->addBatchLookupTable8(
|
||||
row_begin,
|
||||
row_end,
|
||||
reinterpret_cast<AggregateDataPtr *>(method.data.data()),
|
||||
inst->state_offset,
|
||||
[&](AggregateDataPtr & aggregate_data)
|
||||
{
|
||||
AggregateDataPtr place = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states);
|
||||
createAggregateStates(place);
|
||||
aggregate_data = place;
|
||||
},
|
||||
state.getKeyData(),
|
||||
inst->batch_arguments,
|
||||
aggregates_pool);
|
||||
if (inst->offsets)
|
||||
{
|
||||
has_arrays = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_arrays && !hasSparseArguments(aggregate_instructions) && !all_keys_are_const)
|
||||
{
|
||||
for (AggregateFunctionInstruction * inst = aggregate_instructions; inst->that; ++inst)
|
||||
{
|
||||
inst->batch_that->addBatchLookupTable8(
|
||||
row_begin,
|
||||
row_end,
|
||||
reinterpret_cast<AggregateDataPtr *>(method.data.data()),
|
||||
inst->state_offset,
|
||||
[&](AggregateDataPtr & aggregate_data)
|
||||
{
|
||||
AggregateDataPtr place = aggregates_pool->alignedAlloc(total_size_of_aggregate_states, align_aggregate_states);
|
||||
createAggregateStates(place);
|
||||
aggregate_data = place;
|
||||
},
|
||||
state.getKeyData(),
|
||||
inst->batch_arguments,
|
||||
aggregates_pool);
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1272,12 +1156,12 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
state.resetCache();
|
||||
|
||||
/// For all rows.
|
||||
for (size_t i = key_start; i < key_end; ++i)
|
||||
if (!no_more_keys)
|
||||
{
|
||||
AggregateDataPtr aggregate_data = nullptr;
|
||||
|
||||
if constexpr (!no_more_keys)
|
||||
for (size_t i = key_start; i < key_end; ++i)
|
||||
{
|
||||
AggregateDataPtr aggregate_data = nullptr;
|
||||
|
||||
if constexpr (prefetch && HasPrefetchMemberFunc<decltype(method.data), KeyHolder>)
|
||||
{
|
||||
if (i == key_start + prefetching.iterationsToMeasure())
|
||||
@ -1323,24 +1207,47 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
aggregate_data = emplace_result.getMapped();
|
||||
|
||||
assert(aggregate_data != nullptr);
|
||||
places[i] = aggregate_data;
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = key_start; i < key_end; ++i)
|
||||
{
|
||||
AggregateDataPtr aggregate_data = nullptr;
|
||||
/// Add only if the key already exists.
|
||||
auto find_result = state.findKey(method.data, i, *aggregates_pool);
|
||||
if (find_result.isFound())
|
||||
{
|
||||
aggregate_data = find_result.getMapped();
|
||||
}
|
||||
else
|
||||
{
|
||||
aggregate_data = overflow_row;
|
||||
}
|
||||
places[i] = aggregate_data;
|
||||
}
|
||||
|
||||
places[i] = aggregate_data;
|
||||
}
|
||||
|
||||
executeAggregateInstructions(
|
||||
aggregates_pool,
|
||||
row_begin,
|
||||
row_end,
|
||||
aggregate_instructions,
|
||||
places,
|
||||
key_start,
|
||||
state.hasOnlyOneValueSinceLastReset(),
|
||||
all_keys_are_const,
|
||||
use_compiled_functions);
|
||||
}
|
||||
|
||||
void Aggregator::executeAggregateInstructions(
|
||||
Arena * aggregates_pool,
|
||||
size_t row_begin,
|
||||
size_t row_end,
|
||||
AggregateFunctionInstruction * aggregate_instructions,
|
||||
const std::unique_ptr<AggregateDataPtr[]> &places,
|
||||
size_t key_start,
|
||||
bool has_only_one_value_since_last_reset,
|
||||
bool all_keys_are_const,
|
||||
bool use_compiled_functions [[maybe_unused]]) const
|
||||
{
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (use_compiled_functions)
|
||||
{
|
||||
@ -1360,7 +1267,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
columns_data.emplace_back(getColumnData(inst->batch_arguments[argument_index]));
|
||||
}
|
||||
|
||||
if (all_keys_are_const || (can_optimize_equal_keys_ranges && state.hasOnlyOneValueSinceLastReset()))
|
||||
if (all_keys_are_const || (can_optimize_equal_keys_ranges && has_only_one_value_since_last_reset))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AggregationOptimizedEqualRangesOfKeys);
|
||||
auto add_into_aggregate_states_function_single_place = compiled_aggregate_functions_holder->compiled_aggregate_functions.add_into_aggregate_states_function_single_place;
|
||||
@ -1384,7 +1291,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
|
||||
AggregateFunctionInstruction * inst = aggregate_instructions + i;
|
||||
|
||||
if (all_keys_are_const || (inst->can_optimize_equal_keys_ranges && state.hasOnlyOneValueSinceLastReset()))
|
||||
if (all_keys_are_const || (inst->can_optimize_equal_keys_ranges && has_only_one_value_since_last_reset))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::AggregationOptimizedEqualRangesOfKeys);
|
||||
addBatchSinglePlace(row_begin, row_end, inst, places[key_start] + inst->state_offset, aggregates_pool);
|
||||
@ -1394,6 +1301,7 @@ void NO_INLINE Aggregator::executeImplBatch(
|
||||
addBatch(row_begin, row_end, inst, places.get(), aggregates_pool);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -1825,8 +1733,14 @@ Block Aggregator::convertOneBucketToBlock(
|
||||
{
|
||||
// Used in ConvertingAggregatedToChunksSource -> ConvertingAggregatedToChunksTransform (expects single chunk for each bucket_id).
|
||||
constexpr bool return_single_block = true;
|
||||
Block block = convertToBlockImpl<return_single_block>(
|
||||
method, method.data.impls[bucket], arena, data_variants.aggregates_pools, final, method.data.impls[bucket].size());
|
||||
Block block = std::get<Block>(convertToBlockImpl(
|
||||
method,
|
||||
method.data.impls[bucket],
|
||||
arena,
|
||||
data_variants.aggregates_pools,
|
||||
final,
|
||||
method.data.impls[bucket].size(),
|
||||
return_single_block));
|
||||
|
||||
block.info.bucket_num = static_cast<int>(bucket);
|
||||
return block;
|
||||
@ -1946,29 +1860,27 @@ bool Aggregator::checkLimits(size_t result_size, bool & no_more_keys) const
|
||||
}
|
||||
|
||||
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block>
|
||||
Aggregator::convertToBlockImpl(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool final, size_t rows) const
|
||||
template <typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockResVariant
|
||||
Aggregator::convertToBlockImpl(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool final,size_t rows, bool return_single_block) const
|
||||
{
|
||||
if (data.empty())
|
||||
{
|
||||
auto && out_cols = prepareOutputBlockColumns(params, aggregate_functions, getHeader(final), aggregates_pools, final, rows);
|
||||
return {finalizeBlock(params, getHeader(final), std::move(out_cols), final, rows)};
|
||||
}
|
||||
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
|
||||
ConvertToBlockResVariant res;
|
||||
bool use_compiled_functions = false;
|
||||
if (final)
|
||||
{
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
use_compiled_functions = compiled_aggregate_functions_holder != nullptr && !Method::low_cardinality_optimization;
|
||||
#endif
|
||||
res = convertToBlockImplFinal<Method, return_single_block>(method, data, arena, aggregates_pools, use_compiled_functions, rows);
|
||||
res = convertToBlockImplFinal<Method>(method, data, arena, aggregates_pools, use_compiled_functions, return_single_block);
|
||||
}
|
||||
else
|
||||
{
|
||||
res = convertToBlockImplNotFinal<return_single_block>(method, data, aggregates_pools, rows);
|
||||
res = convertToBlockImplNotFinal(method, data, aggregates_pools, rows, return_single_block);
|
||||
}
|
||||
|
||||
/// In order to release memory early.
|
||||
@ -2135,19 +2047,24 @@ Block Aggregator::insertResultsIntoColumns(
|
||||
return finalizeBlock(params, getHeader(/* final */ true), std::move(out_cols), /* final */ true, places.size());
|
||||
}
|
||||
|
||||
template <typename Method, bool return_single_block, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block> NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool use_compiled_functions [[maybe_unused]], size_t) const
|
||||
template <typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockResVariant Aggregator::convertToBlockImplFinal(
|
||||
Method & method,
|
||||
Table & data,
|
||||
Arena * arena,
|
||||
Arenas & aggregates_pools,
|
||||
bool use_compiled_functions [[maybe_unused]],
|
||||
bool return_single_block) const
|
||||
{
|
||||
/// +1 for nullKeyData, if `data` doesn't have it - not a problem, just some memory for one excessive row will be preallocated
|
||||
const size_t max_block_size = (return_single_block ? data.size() : std::min(params.max_block_size, data.size())) + 1;
|
||||
const bool final = true;
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
|
||||
std::optional<OutputBlockColumns> out_cols;
|
||||
std::optional<Sizes> shuffled_key_sizes;
|
||||
PaddedPODArray<AggregateDataPtr> places;
|
||||
bool has_null_key_data = false;
|
||||
BlocksList blocks;
|
||||
|
||||
auto init_out_cols = [&]()
|
||||
{
|
||||
@ -2189,40 +2106,38 @@ Aggregator::ConvertToBlockRes<return_single_block> NO_INLINE Aggregator::convert
|
||||
/// Mark the cell as destroyed so it will not be destroyed in destructor.
|
||||
mapped = nullptr;
|
||||
|
||||
if constexpr (!return_single_block)
|
||||
if (!return_single_block && places.size() >= max_block_size)
|
||||
{
|
||||
if (places.size() >= max_block_size)
|
||||
{
|
||||
res.emplace_back(
|
||||
insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions));
|
||||
places.clear();
|
||||
out_cols.reset();
|
||||
has_null_key_data = false;
|
||||
}
|
||||
blocks.emplace_back(
|
||||
insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions));
|
||||
places.clear();
|
||||
out_cols.reset();
|
||||
has_null_key_data = false;
|
||||
}
|
||||
});
|
||||
|
||||
if constexpr (return_single_block)
|
||||
if (return_single_block)
|
||||
{
|
||||
return insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (out_cols.has_value())
|
||||
res.emplace_back(
|
||||
insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions));
|
||||
return res;
|
||||
{
|
||||
blocks.emplace_back(insertResultsIntoColumns(places, std::move(out_cols.value()), arena, has_null_key_data, use_compiled_functions));
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
}
|
||||
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block> NO_INLINE
|
||||
Aggregator::convertToBlockImplNotFinal(Method & method, Table & data, Arenas & aggregates_pools, size_t) const
|
||||
template <typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockResVariant NO_INLINE
|
||||
Aggregator::convertToBlockImplNotFinal(Method & method, Table & data, Arenas & aggregates_pools, size_t, bool return_single_block) const
|
||||
{
|
||||
/// +1 for nullKeyData, if `data` doesn't have it - not a problem, just some memory for one excessive row will be preallocated
|
||||
const size_t max_block_size = (return_single_block ? data.size() : std::min(params.max_block_size, data.size())) + 1;
|
||||
const bool final = false;
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
BlocksList res_blocks;
|
||||
|
||||
std::optional<OutputBlockColumns> out_cols;
|
||||
std::optional<Sizes> shuffled_key_sizes;
|
||||
@ -2252,7 +2167,6 @@ Aggregator::convertToBlockImplNotFinal(Method & method, Table & data, Arenas & a
|
||||
|
||||
// should be invoked at least once, because null data might be the only content of the `data`
|
||||
init_out_cols();
|
||||
|
||||
data.forEachValue(
|
||||
[&](const auto & key, auto & mapped)
|
||||
{
|
||||
@ -2269,29 +2183,24 @@ Aggregator::convertToBlockImplNotFinal(Method & method, Table & data, Arenas & a
|
||||
mapped = nullptr;
|
||||
|
||||
++rows_in_current_block;
|
||||
|
||||
if constexpr (!return_single_block)
|
||||
if (!return_single_block && rows_in_current_block >= max_block_size)
|
||||
{
|
||||
if (rows_in_current_block >= max_block_size)
|
||||
{
|
||||
res.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols.value()), final, rows_in_current_block));
|
||||
out_cols.reset();
|
||||
rows_in_current_block = 0;
|
||||
}
|
||||
res_blocks.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols.value()), final, rows_in_current_block));
|
||||
out_cols.reset();
|
||||
rows_in_current_block = 0;
|
||||
}
|
||||
});
|
||||
|
||||
if constexpr (return_single_block)
|
||||
if (return_single_block)
|
||||
{
|
||||
return finalizeBlock(params, getHeader(final), std::move(out_cols).value(), final, rows_in_current_block);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (rows_in_current_block)
|
||||
res.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols).value(), final, rows_in_current_block));
|
||||
return res;
|
||||
res_blocks.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols).value(), final, rows_in_current_block));
|
||||
return res_blocks;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void Aggregator::addSingleKeyToAggregateColumns(
|
||||
@ -2397,18 +2306,23 @@ template <bool return_single_block>
|
||||
Aggregator::ConvertToBlockRes<return_single_block>
|
||||
Aggregator::prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const
|
||||
{
|
||||
ConvertToBlockResVariant res_variant;
|
||||
const size_t rows = data_variants.sizeWithoutOverflowRow();
|
||||
#define M(NAME) \
|
||||
else if (data_variants.type == AggregatedDataVariants::Type::NAME) \
|
||||
{ \
|
||||
return convertToBlockImpl<return_single_block>( \
|
||||
*data_variants.NAME, data_variants.NAME->data, data_variants.aggregates_pool, data_variants.aggregates_pools, final, rows); \
|
||||
res_variant = convertToBlockImpl( \
|
||||
*data_variants.NAME, data_variants.NAME->data, data_variants.aggregates_pool, data_variants.aggregates_pools, final, rows, return_single_block); \
|
||||
}
|
||||
|
||||
if (false) {} // NOLINT
|
||||
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
|
||||
#undef M
|
||||
else throw Exception(ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT, "Unknown aggregated data variant.");
|
||||
if constexpr (return_single_block)
|
||||
return std::get<Block>(res_variant);
|
||||
else
|
||||
return std::get<BlocksList>(res_variant);
|
||||
}
|
||||
|
||||
|
||||
@ -2534,7 +2448,7 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b
|
||||
if (data_variants.type != AggregatedDataVariants::Type::without_key)
|
||||
{
|
||||
if (!data_variants.isTwoLevel())
|
||||
blocks.splice(blocks.end(), prepareBlockAndFillSingleLevel</* return_single_block */ false>(data_variants, final));
|
||||
blocks.splice(blocks.end(), prepareBlockAndFillSingleLevel<false>(data_variants, final));
|
||||
else
|
||||
blocks.splice(blocks.end(), prepareBlocksAndFillTwoLevel(data_variants, final, thread_pool.get()));
|
||||
}
|
||||
@ -2600,9 +2514,9 @@ void NO_INLINE Aggregator::mergeDataNullKey(
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Method, bool prefetch, typename Table>
|
||||
void NO_INLINE
|
||||
Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]]) const
|
||||
template <typename Method, typename Table>
|
||||
void NO_INLINE Aggregator::mergeDataImpl(
|
||||
Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const
|
||||
{
|
||||
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
|
||||
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
|
||||
@ -2625,7 +2539,10 @@ Aggregator::mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, b
|
||||
src = nullptr;
|
||||
};
|
||||
|
||||
table_src.template mergeToViaEmplace<decltype(merge), prefetch>(table_dst, std::move(merge));
|
||||
if (prefetch)
|
||||
table_src.template mergeToViaEmplace<decltype(merge), true>(table_dst, std::move(merge));
|
||||
else
|
||||
table_src.template mergeToViaEmplace<decltype(merge), false>(table_dst, std::move(merge));
|
||||
table_src.clearAndShrink();
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
@ -2779,16 +2696,18 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
|
||||
|
||||
if (!no_more_keys)
|
||||
{
|
||||
bool use_compiled_functions = false;
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
use_compiled_functions = compiled_aggregate_functions_holder != nullptr;
|
||||
#endif
|
||||
if (prefetch)
|
||||
mergeDataImpl<Method, true>(
|
||||
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, use_compiled_functions);
|
||||
if (compiled_aggregate_functions_holder)
|
||||
{
|
||||
mergeDataImpl<Method>(
|
||||
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, true, prefetch);
|
||||
}
|
||||
else
|
||||
mergeDataImpl<Method, false>(
|
||||
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, use_compiled_functions);
|
||||
#endif
|
||||
{
|
||||
mergeDataImpl<Method>(
|
||||
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, false, prefetch);
|
||||
}
|
||||
}
|
||||
else if (res->without_key)
|
||||
{
|
||||
@ -2833,22 +2752,22 @@ void NO_INLINE Aggregator::mergeBucketImpl(
|
||||
return;
|
||||
|
||||
AggregatedDataVariants & current = *data[result_num];
|
||||
bool use_compiled_functions = false;
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
use_compiled_functions = compiled_aggregate_functions_holder != nullptr;
|
||||
#endif
|
||||
if (prefetch)
|
||||
mergeDataImpl<Method, true>(
|
||||
getDataVariant<Method>(*res).data.impls[bucket],
|
||||
getDataVariant<Method>(current).data.impls[bucket],
|
||||
arena,
|
||||
use_compiled_functions);
|
||||
if (compiled_aggregate_functions_holder)
|
||||
{
|
||||
mergeDataImpl<Method>(
|
||||
getDataVariant<Method>(*res).data.impls[bucket], getDataVariant<Method>(current).data.impls[bucket], arena, true, prefetch);
|
||||
}
|
||||
else
|
||||
mergeDataImpl<Method, false>(
|
||||
#endif
|
||||
{
|
||||
mergeDataImpl<Method>(
|
||||
getDataVariant<Method>(*res).data.impls[bucket],
|
||||
getDataVariant<Method>(current).data.impls[bucket],
|
||||
arena,
|
||||
use_compiled_functions);
|
||||
false,
|
||||
prefetch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3560,7 +3479,4 @@ void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result) cons
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT, "Unknown aggregated data variant.");
|
||||
}
|
||||
|
||||
|
||||
template Aggregator::ConvertToBlockRes<false>
|
||||
Aggregator::prepareBlockAndFillSingleLevel<false>(AggregatedDataVariants & data_variants, bool final) const;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -783,6 +783,17 @@ bool FileCache::tryReserve(
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::FilesystemCacheReserveMicroseconds);
|
||||
|
||||
assertInitialized();
|
||||
|
||||
/// A logical race on cache_is_being_resized is still possible,
|
||||
/// in this case we will try to lock cache with timeout, this is ok, timeout is small
|
||||
/// and as resizing of cache can take a long time then this small chance of a race is
|
||||
/// ok compared to the number of cases this check will help.
|
||||
if (cache_is_being_resized.load(std::memory_order_relaxed))
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto cache_lock = tryLockCache(std::chrono::milliseconds(lock_wait_timeout_milliseconds));
|
||||
if (!cache_lock)
|
||||
{
|
||||
@ -1264,12 +1275,14 @@ std::vector<String> FileCache::tryGetCachePaths(const Key & key)
|
||||
|
||||
size_t FileCache::getUsedCacheSize() const
|
||||
{
|
||||
return main_priority->getSize(lockCache());
|
||||
/// We use this method for metrics, so it is ok to get approximate result.
|
||||
return main_priority->getSizeApprox();
|
||||
}
|
||||
|
||||
size_t FileCache::getFileSegmentsNum() const
|
||||
{
|
||||
return main_priority->getElementsCount(lockCache());
|
||||
/// We use this method for metrics, so it is ok to get approximate result.
|
||||
return main_priority->getElementsCountApprox();
|
||||
}
|
||||
|
||||
void FileCache::assertCacheCorrectness()
|
||||
@ -1327,8 +1340,12 @@ void FileCache::applySettingsIfPossible(const FileCacheSettings & new_settings,
|
||||
if (new_settings.max_size != actual_settings.max_size
|
||||
|| new_settings.max_elements != actual_settings.max_elements)
|
||||
{
|
||||
auto cache_lock = lockCache();
|
||||
cache_is_being_resized.store(true, std::memory_order_relaxed);
|
||||
SCOPE_EXIT({
|
||||
cache_is_being_resized.store(false, std::memory_order_relaxed);
|
||||
});
|
||||
|
||||
auto cache_lock = lockCache();
|
||||
bool updated = false;
|
||||
try
|
||||
{
|
||||
|
@ -202,6 +202,7 @@ private:
|
||||
mutable std::mutex init_mutex;
|
||||
std::unique_ptr<StatusFile> status_file;
|
||||
std::atomic<bool> shutdown = false;
|
||||
std::atomic<bool> cache_is_being_resized = false;
|
||||
|
||||
std::mutex apply_settings_mutex;
|
||||
|
||||
|
@ -63,8 +63,12 @@ public:
|
||||
|
||||
virtual size_t getSize(const CachePriorityGuard::Lock &) const = 0;
|
||||
|
||||
virtual size_t getSizeApprox() const = 0;
|
||||
|
||||
virtual size_t getElementsCount(const CachePriorityGuard::Lock &) const = 0;
|
||||
|
||||
virtual size_t getElementsCountApprox() const = 0;
|
||||
|
||||
/// Throws exception if there is not enough size to fit it.
|
||||
virtual IteratorPtr add( /// NOLINT
|
||||
KeyMetadataPtr key_metadata,
|
||||
|
@ -28,6 +28,10 @@ public:
|
||||
|
||||
size_t getElementsCount(const CachePriorityGuard::Lock &) const override { return state->current_elements_num; }
|
||||
|
||||
size_t getSizeApprox() const override { return state->current_size; }
|
||||
|
||||
size_t getElementsCountApprox() const override { return state->current_elements_num; }
|
||||
|
||||
bool canFit( /// NOLINT
|
||||
size_t size,
|
||||
const CachePriorityGuard::Lock &,
|
||||
|
@ -44,6 +44,16 @@ size_t SLRUFileCachePriority::getElementsCount(const CachePriorityGuard::Lock &
|
||||
return protected_queue.getElementsCount(lock) + probationary_queue.getElementsCount(lock);
|
||||
}
|
||||
|
||||
size_t SLRUFileCachePriority::getSizeApprox() const
|
||||
{
|
||||
return protected_queue.getSizeApprox() + probationary_queue.getSizeApprox();
|
||||
}
|
||||
|
||||
size_t SLRUFileCachePriority::getElementsCountApprox() const
|
||||
{
|
||||
return protected_queue.getElementsCountApprox() + probationary_queue.getElementsCountApprox();
|
||||
}
|
||||
|
||||
bool SLRUFileCachePriority::canFit( /// NOLINT
|
||||
size_t size,
|
||||
const CachePriorityGuard::Lock & lock,
|
||||
|
@ -25,6 +25,10 @@ public:
|
||||
|
||||
size_t getElementsCount(const CachePriorityGuard::Lock &) const override;
|
||||
|
||||
size_t getSizeApprox() const override;
|
||||
|
||||
size_t getElementsCountApprox() const override;
|
||||
|
||||
bool canFit( /// NOLINT
|
||||
size_t size,
|
||||
const CachePriorityGuard::Lock &,
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <Common/noexcept_scope.h>
|
||||
#include <Common/checkStackSize.h>
|
||||
|
||||
#include "Interpreters/Context_fwd.h"
|
||||
#include "config.h"
|
||||
|
||||
#if USE_MYSQL
|
||||
|
@ -22,9 +22,6 @@
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -86,6 +86,7 @@ void replaceStorageInQueryTree(QueryTreeNodePtr & query_tree, const ContextPtr &
|
||||
continue;
|
||||
|
||||
auto replacement_table_expression = std::make_shared<TableNode>(storage, context);
|
||||
replacement_table_expression->setAlias(node->getAlias());
|
||||
|
||||
if (auto table_expression_modifiers = table_node.getTableExpressionModifiers())
|
||||
replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers);
|
||||
|
@ -52,7 +52,12 @@ BlockIO InterpreterTransactionControlQuery::executeCommit(ContextMutablePtr sess
|
||||
{
|
||||
auto txn = session_context->getCurrentTransaction();
|
||||
if (!txn)
|
||||
throw Exception(ErrorCodes::INVALID_TRANSACTION, "There is no current transaction");
|
||||
{
|
||||
if (session_context->getClientInfo().interface == ClientInfo::Interface::MYSQL)
|
||||
return {};
|
||||
else
|
||||
throw Exception(ErrorCodes::INVALID_TRANSACTION, "There is no current transaction");
|
||||
}
|
||||
if (txn->getState() != MergeTreeTransaction::RUNNING)
|
||||
throw Exception(ErrorCodes::INVALID_TRANSACTION, "Transaction is not in RUNNING state");
|
||||
|
||||
@ -111,7 +116,12 @@ BlockIO InterpreterTransactionControlQuery::executeRollback(ContextMutablePtr se
|
||||
{
|
||||
auto txn = session_context->getCurrentTransaction();
|
||||
if (!txn)
|
||||
throw Exception(ErrorCodes::INVALID_TRANSACTION, "There is no current transaction");
|
||||
{
|
||||
if (session_context->getClientInfo().interface == ClientInfo::Interface::MYSQL)
|
||||
return {};
|
||||
else
|
||||
throw Exception(ErrorCodes::INVALID_TRANSACTION, "There is no current transaction");
|
||||
}
|
||||
if (txn->getState() == MergeTreeTransaction::COMMITTED)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Transaction is in COMMITTED state");
|
||||
if (txn->getState() == MergeTreeTransaction::COMMITTING)
|
||||
|
@ -402,10 +402,17 @@ MutationsInterpreter::MutationsInterpreter(
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, commands(std::move(commands_))
|
||||
, available_columns(std::move(available_columns_))
|
||||
, context(Context::createCopy(context_))
|
||||
, settings(std::move(settings_))
|
||||
, select_limits(SelectQueryOptions().analyze(!settings.can_execute).ignoreLimits())
|
||||
{
|
||||
auto new_context = Context::createCopy(context_);
|
||||
if (new_context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
new_context->setSetting("allow_experimental_analyzer", false);
|
||||
LOG_DEBUG(&Poco::Logger::get("MutationsInterpreter"), "Will use old analyzer to prepare mutation");
|
||||
}
|
||||
context = std::move(new_context);
|
||||
|
||||
prepare(!settings.can_execute);
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ std::optional<AggregationAnalysisResult> analyzeAggregation(const QueryTreeNodeP
|
||||
continue;
|
||||
|
||||
auto expression_type_after_aggregation = group_by_use_nulls ? makeNullableSafe(expression_dag_node->result_type) : expression_dag_node->result_type;
|
||||
available_columns_after_aggregation.emplace_back(nullptr, expression_type_after_aggregation, expression_dag_node->result_name);
|
||||
available_columns_after_aggregation.emplace_back(expression_dag_node->column, expression_type_after_aggregation, expression_dag_node->result_name);
|
||||
aggregation_keys.push_back(expression_dag_node->result_name);
|
||||
before_aggregation_actions->getOutputs().push_back(expression_dag_node);
|
||||
before_aggregation_actions_output_node_names.insert(expression_dag_node->result_name);
|
||||
@ -161,7 +161,7 @@ std::optional<AggregationAnalysisResult> analyzeAggregation(const QueryTreeNodeP
|
||||
continue;
|
||||
|
||||
auto expression_type_after_aggregation = group_by_use_nulls ? makeNullableSafe(expression_dag_node->result_type) : expression_dag_node->result_type;
|
||||
available_columns_after_aggregation.emplace_back(nullptr, expression_type_after_aggregation, expression_dag_node->result_name);
|
||||
available_columns_after_aggregation.emplace_back(expression_dag_node->column, expression_type_after_aggregation, expression_dag_node->result_name);
|
||||
aggregation_keys.push_back(expression_dag_node->result_name);
|
||||
before_aggregation_actions->getOutputs().push_back(expression_dag_node);
|
||||
before_aggregation_actions_output_node_names.insert(expression_dag_node->result_name);
|
||||
|
@ -29,14 +29,14 @@ const DB::DataStream & getChildOutputStream(DB::QueryPlan::Node & node)
|
||||
namespace DB::QueryPlanOptimizations
|
||||
{
|
||||
|
||||
/// This is a check that output columns does not have the same name
|
||||
/// This is a check that nodes columns does not have the same name
|
||||
/// This is ok for DAG, but may introduce a bug in a SotringStep cause columns are selected by name.
|
||||
static bool areOutputsConvertableToBlock(const ActionsDAG::NodeRawConstPtrs & outputs)
|
||||
static bool areNodesConvertableToBlock(const ActionsDAG::NodeRawConstPtrs & nodes)
|
||||
{
|
||||
std::unordered_set<std::string_view> names;
|
||||
for (const auto & output : outputs)
|
||||
for (const auto & node : nodes)
|
||||
{
|
||||
if (!names.emplace(output->result_name).second)
|
||||
if (!names.emplace(node->result_name).second)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ size_t tryExecuteFunctionsAfterSorting(QueryPlan::Node * parent_node, QueryPlan:
|
||||
if (unneeded_for_sorting->trivial())
|
||||
return 0;
|
||||
|
||||
if (!areOutputsConvertableToBlock(needed_for_sorting->getOutputs()))
|
||||
if (!areNodesConvertableToBlock(needed_for_sorting->getOutputs()) || !areNodesConvertableToBlock(unneeded_for_sorting->getInputs()))
|
||||
return 0;
|
||||
|
||||
// Sorting (parent_node) -> Expression (child_node)
|
||||
|
@ -487,10 +487,15 @@ std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> Fetcher::fetchSelected
|
||||
creds.setPassword(password);
|
||||
}
|
||||
|
||||
ReadSettings read_settings = context->getReadSettings();
|
||||
/// Disable retries for fetches, this will be done by the engine itself.
|
||||
read_settings.http_max_tries = 1;
|
||||
|
||||
auto in = BuilderRWBufferFromHTTP(uri)
|
||||
.withConnectionGroup(HTTPConnectionGroupType::HTTP)
|
||||
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
|
||||
.withTimeouts(timeouts)
|
||||
.withSettings(read_settings)
|
||||
.withDelayInit(false)
|
||||
.create(creds);
|
||||
|
||||
|
@ -47,13 +47,22 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
int32_t metadata_version = metadata_snapshot->getMetadataVersion();
|
||||
const auto storage_settings_ptr = storage.getSettings();
|
||||
|
||||
stopwatch_ptr = std::make_unique<Stopwatch>();
|
||||
auto part_log_writer = [this, stopwatch = *stopwatch_ptr](const ExecutionStatus & execution_status)
|
||||
{
|
||||
auto profile_counters_snapshot = std::make_shared<ProfileEvents::Counters::Snapshot>(profile_counters.getPartiallyAtomicSnapshot());
|
||||
storage.writePartLog(
|
||||
PartLogElement::MERGE_PARTS, execution_status, stopwatch.elapsed(),
|
||||
entry.new_part_name, part, parts, merge_mutate_entry.get(), std::move(profile_counters_snapshot));
|
||||
};
|
||||
|
||||
if (storage_settings_ptr->always_fetch_merged_part)
|
||||
{
|
||||
LOG_INFO(log, "Will fetch part {} because setting 'always_fetch_merged_part' is true", entry.new_part_name);
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -68,7 +77,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -88,7 +97,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -107,7 +116,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -127,7 +136,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -139,7 +148,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -167,7 +176,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -227,7 +236,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -267,7 +276,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
else if (storage.findReplicaHavingCoveringPart(entry.new_part_name, /* active */ false))
|
||||
@ -284,7 +293,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
else
|
||||
@ -311,7 +320,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
task_context);
|
||||
|
||||
transaction_ptr = std::make_unique<MergeTreeData::Transaction>(storage, NO_TRANSACTION_RAW);
|
||||
stopwatch_ptr = std::make_unique<Stopwatch>();
|
||||
|
||||
merge_task = storage.merger_mutator.mergePartsToTemporaryPart(
|
||||
future_merged_part,
|
||||
@ -333,13 +341,11 @@ ReplicatedMergeMutateTaskBase::PrepareResult MergeFromLogEntryTask::prepare()
|
||||
for (auto & item : future_merged_part->parts)
|
||||
priority.value += item->getBytesOnDisk();
|
||||
|
||||
return {true, true, [this, stopwatch = *stopwatch_ptr] (const ExecutionStatus & execution_status)
|
||||
{
|
||||
auto profile_counters_snapshot = std::make_shared<ProfileEvents::Counters::Snapshot>(profile_counters.getPartiallyAtomicSnapshot());
|
||||
storage.writePartLog(
|
||||
PartLogElement::MERGE_PARTS, execution_status, stopwatch.elapsed(),
|
||||
entry.new_part_name, part, parts, merge_mutate_entry.get(), std::move(profile_counters_snapshot));
|
||||
}};
|
||||
return PrepareResult{
|
||||
.prepared_successfully = true,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
@ -112,9 +112,14 @@ void MergeTreeSink::consume(Chunk chunk)
|
||||
}
|
||||
}
|
||||
|
||||
size_t max_insert_delayed_streams_for_parallel_write = DEFAULT_DELAYED_STREAMS_FOR_PARALLEL_WRITE;
|
||||
if (!support_parallel_write || settings.max_insert_delayed_streams_for_parallel_write.changed)
|
||||
size_t max_insert_delayed_streams_for_parallel_write;
|
||||
|
||||
if (settings.max_insert_delayed_streams_for_parallel_write.changed)
|
||||
max_insert_delayed_streams_for_parallel_write = settings.max_insert_delayed_streams_for_parallel_write;
|
||||
else if (support_parallel_write)
|
||||
max_insert_delayed_streams_for_parallel_write = DEFAULT_DELAYED_STREAMS_FOR_PARALLEL_WRITE;
|
||||
else
|
||||
max_insert_delayed_streams_for_parallel_write = 0;
|
||||
|
||||
/// In case of too much columns/parts in block, flush explicitly.
|
||||
streams += temp_part.streams.size();
|
||||
|
@ -20,6 +20,22 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
const auto storage_settings_ptr = storage.getSettings();
|
||||
LOG_TRACE(log, "Executing log entry to mutate part {} to {}", source_part_name, entry.new_part_name);
|
||||
|
||||
new_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, storage.format_version);
|
||||
|
||||
future_mutated_part = std::make_shared<FutureMergedMutatedPart>();
|
||||
future_mutated_part->name = entry.new_part_name;
|
||||
future_mutated_part->uuid = entry.new_part_uuid;
|
||||
future_mutated_part->part_info = new_part_info;
|
||||
|
||||
stopwatch_ptr = std::make_unique<Stopwatch>();
|
||||
auto part_log_writer = [this](const ExecutionStatus & execution_status)
|
||||
{
|
||||
auto profile_counters_snapshot = std::make_shared<ProfileEvents::Counters::Snapshot>(profile_counters.getPartiallyAtomicSnapshot());
|
||||
storage.writePartLog(
|
||||
PartLogElement::MUTATE_PART, execution_status, stopwatch_ptr->elapsed(),
|
||||
entry.new_part_name, new_part, future_mutated_part->parts, merge_mutate_entry.get(), std::move(profile_counters_snapshot));
|
||||
};
|
||||
|
||||
MergeTreeData::DataPartPtr source_part = storage.getActiveContainingPart(source_part_name);
|
||||
if (!source_part)
|
||||
{
|
||||
@ -29,10 +45,13 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
future_mutated_part->parts.push_back(source_part);
|
||||
future_mutated_part->part_format = source_part->getFormat();
|
||||
|
||||
if (source_part->name != source_part_name)
|
||||
{
|
||||
LOG_WARNING(log,
|
||||
@ -44,7 +63,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -63,7 +82,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -84,13 +103,12 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
new_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, storage.format_version);
|
||||
Strings mutation_ids;
|
||||
commands = std::make_shared<MutationCommands>(storage.queue.getMutationCommands(source_part, new_part_info.mutation, mutation_ids));
|
||||
LOG_TRACE(log, "Mutating part {} with mutation commands from {} mutations ({}): {}",
|
||||
@ -99,6 +117,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
/// Once we mutate part, we must reserve space on the same disk, because mutations can possibly create hardlinks.
|
||||
/// Can throw an exception.
|
||||
reserved_space = storage.reserveSpace(estimated_space_for_result, source_part->getDataPartStorage());
|
||||
future_mutated_part->updatePath(storage, reserved_space.get());
|
||||
|
||||
table_lock_holder = storage.lockForShare(
|
||||
RWLockImpl::NO_QUERY, storage_settings_ptr->lock_acquire_timeout_for_background_operations);
|
||||
@ -106,14 +125,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
|
||||
transaction_ptr = std::make_unique<MergeTreeData::Transaction>(storage, NO_TRANSACTION_RAW);
|
||||
|
||||
future_mutated_part = std::make_shared<FutureMergedMutatedPart>();
|
||||
future_mutated_part->name = entry.new_part_name;
|
||||
future_mutated_part->uuid = entry.new_part_uuid;
|
||||
future_mutated_part->parts.push_back(source_part);
|
||||
future_mutated_part->part_info = new_part_info;
|
||||
future_mutated_part->updatePath(storage, reserved_space.get());
|
||||
future_mutated_part->part_format = source_part->getFormat();
|
||||
|
||||
if (storage_settings_ptr->allow_remote_fs_zero_copy_replication)
|
||||
{
|
||||
if (auto disk = reserved_space->getDisk(); disk->supportZeroCopyReplication())
|
||||
@ -124,7 +135,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
@ -163,7 +174,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = false,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
else if (storage.findReplicaHavingCoveringPart(entry.new_part_name, /* active */ false))
|
||||
@ -182,7 +193,7 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
return PrepareResult{
|
||||
.prepared_successfully = false,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = {}
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
else
|
||||
@ -201,8 +212,6 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
future_mutated_part,
|
||||
task_context);
|
||||
|
||||
stopwatch_ptr = std::make_unique<Stopwatch>();
|
||||
|
||||
mutate_task = storage.merger_mutator.mutatePartToTemporaryPart(
|
||||
future_mutated_part, metadata_snapshot, commands, merge_mutate_entry.get(),
|
||||
entry.create_time, task_context, NO_TRANSACTION_PTR, reserved_space, table_lock_holder);
|
||||
@ -211,13 +220,11 @@ ReplicatedMergeMutateTaskBase::PrepareResult MutateFromLogEntryTask::prepare()
|
||||
for (auto & item : future_mutated_part->parts)
|
||||
priority.value += item->getBytesOnDisk();
|
||||
|
||||
return {true, true, [this] (const ExecutionStatus & execution_status)
|
||||
{
|
||||
auto profile_counters_snapshot = std::make_shared<ProfileEvents::Counters::Snapshot>(profile_counters.getPartiallyAtomicSnapshot());
|
||||
storage.writePartLog(
|
||||
PartLogElement::MUTATE_PART, execution_status, stopwatch_ptr->elapsed(),
|
||||
entry.new_part_name, new_part, future_mutated_part->parts, merge_mutate_entry.get(), std::move(profile_counters_snapshot));
|
||||
}};
|
||||
return PrepareResult{
|
||||
.prepared_successfully = true,
|
||||
.need_to_check_missing_part_in_fetch = true,
|
||||
.part_log_writer = part_log_writer,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
@ -164,8 +164,16 @@ bool ReplicatedMergeMutateTaskBase::executeImpl()
|
||||
|
||||
auto execute_fetch = [&] (bool need_to_check_missing_part) -> bool
|
||||
{
|
||||
if (storage.executeFetch(entry, need_to_check_missing_part))
|
||||
return remove_processed_entry();
|
||||
try
|
||||
{
|
||||
if (storage.executeFetch(entry, need_to_check_missing_part))
|
||||
return remove_processed_entry();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
part_log_writer(ExecutionStatus::fromCurrentException("", true));
|
||||
throw;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
@ -205,8 +213,7 @@ bool ReplicatedMergeMutateTaskBase::executeImpl()
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (part_log_writer)
|
||||
part_log_writer(ExecutionStatus::fromCurrentException("", true));
|
||||
part_log_writer(ExecutionStatus::fromCurrentException("", true));
|
||||
throw;
|
||||
}
|
||||
|
||||
@ -214,17 +221,8 @@ bool ReplicatedMergeMutateTaskBase::executeImpl()
|
||||
}
|
||||
case State::NEED_FINALIZE :
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!finalize(part_log_writer))
|
||||
return execute_fetch(/* need_to_check_missing = */true);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (part_log_writer)
|
||||
part_log_writer(ExecutionStatus::fromCurrentException("", true));
|
||||
throw;
|
||||
}
|
||||
if (!finalize(part_log_writer))
|
||||
return execute_fetch(/* need_to_check_missing = */true);
|
||||
|
||||
return remove_processed_entry();
|
||||
}
|
||||
|
@ -323,6 +323,9 @@ void ReplicatedMergeTreeSinkImpl<async_insert>::consume(Chunk chunk)
|
||||
if (!temp_part.part)
|
||||
continue;
|
||||
|
||||
if (!support_parallel_write && temp_part.part->getDataPartStorage().supportParallelWrite())
|
||||
support_parallel_write = true;
|
||||
|
||||
BlockIDsType block_id;
|
||||
|
||||
if constexpr (async_insert)
|
||||
@ -365,9 +368,13 @@ void ReplicatedMergeTreeSinkImpl<async_insert>::consume(Chunk chunk)
|
||||
profile_events_scope.reset();
|
||||
UInt64 elapsed_ns = watch.elapsed();
|
||||
|
||||
size_t max_insert_delayed_streams_for_parallel_write = DEFAULT_DELAYED_STREAMS_FOR_PARALLEL_WRITE;
|
||||
if (!support_parallel_write || settings.max_insert_delayed_streams_for_parallel_write.changed)
|
||||
size_t max_insert_delayed_streams_for_parallel_write;
|
||||
if (settings.max_insert_delayed_streams_for_parallel_write.changed)
|
||||
max_insert_delayed_streams_for_parallel_write = settings.max_insert_delayed_streams_for_parallel_write;
|
||||
else if (support_parallel_write)
|
||||
max_insert_delayed_streams_for_parallel_write = DEFAULT_DELAYED_STREAMS_FOR_PARALLEL_WRITE;
|
||||
else
|
||||
max_insert_delayed_streams_for_parallel_write = 0;
|
||||
|
||||
/// In case of too much columns/parts in block, flush explicitly.
|
||||
streams += temp_part.streams.size();
|
||||
|
@ -41,6 +41,10 @@
|
||||
#include <Disks/IO/ReadBufferFromAzureBlobStorage.h>
|
||||
#include <Disks/IO/WriteBufferFromAzureBlobStorage.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
using namespace Azure::Storage::Blobs;
|
||||
|
||||
namespace CurrentMetrics
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Interpreters/InterpreterInsertQuery.h>
|
||||
#include <Interpreters/InterpreterSelectQuery.h>
|
||||
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
|
||||
#include <Interpreters/addMissingDefaults.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
@ -24,6 +25,7 @@
|
||||
#include <Storages/AlterCommands.h>
|
||||
#include <Storages/StorageBuffer.h>
|
||||
#include <Storages/StorageFactory.h>
|
||||
#include <Storages/StorageValues.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <base/getThreadId.h>
|
||||
#include <base/range.h>
|
||||
@ -363,14 +365,31 @@ void StorageBuffer::read(
|
||||
/** If the sources from the table were processed before some non-initial stage of query execution,
|
||||
* then sources from the buffers must also be wrapped in the processing pipeline before the same stage.
|
||||
*/
|
||||
/// TODO: Find a way to support projections for StorageBuffer
|
||||
if (processed_stage > QueryProcessingStage::FetchColumns)
|
||||
{
|
||||
/// TODO: Find a way to support projections for StorageBuffer
|
||||
auto interpreter = InterpreterSelectQuery(
|
||||
query_info.query, local_context, std::move(pipe_from_buffers),
|
||||
SelectQueryOptions(processed_stage));
|
||||
interpreter.addStorageLimits(*query_info.storage_limits);
|
||||
interpreter.buildQueryPlan(buffers_plan);
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
auto storage = std::make_shared<StorageValues>(
|
||||
getStorageID(),
|
||||
storage_snapshot->getAllColumnsDescription(),
|
||||
std::move(pipe_from_buffers),
|
||||
*getVirtualsPtr());
|
||||
|
||||
auto interpreter = InterpreterSelectQueryAnalyzer(
|
||||
query_info.query, local_context, storage,
|
||||
SelectQueryOptions(processed_stage));
|
||||
interpreter.addStorageLimits(*query_info.storage_limits);
|
||||
buffers_plan = std::move(interpreter).extractQueryPlan();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto interpreter = InterpreterSelectQuery(
|
||||
query_info.query, local_context, std::move(pipe_from_buffers),
|
||||
SelectQueryOptions(processed_stage));
|
||||
interpreter.addStorageLimits(*query_info.storage_limits);
|
||||
interpreter.buildQueryPlan(buffers_plan);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -885,6 +885,7 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const ContextMutablePtr & mo
|
||||
if (modified_query_info.table_expression)
|
||||
{
|
||||
auto replacement_table_expression = std::make_shared<TableNode>(storage, storage_lock, storage_snapshot_);
|
||||
replacement_table_expression->setAlias(modified_query_info.table_expression->getAlias());
|
||||
if (query_info.table_expression_modifiers)
|
||||
replacement_table_expression->setTableExpressionModifiers(*query_info.table_expression_modifiers);
|
||||
|
||||
@ -1025,7 +1026,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
||||
const auto & [database_name, storage, _, table_name] = storage_with_lock;
|
||||
bool allow_experimental_analyzer = context->getSettingsRef().allow_experimental_analyzer;
|
||||
auto storage_stage
|
||||
= storage->getQueryProcessingStage(context, QueryProcessingStage::Complete, storage_snapshot_, modified_query_info);
|
||||
= storage->getQueryProcessingStage(context, processed_stage, storage_snapshot_, modified_query_info);
|
||||
|
||||
builder = plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context));
|
||||
@ -1052,40 +1053,80 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources(
|
||||
|
||||
Block pipe_header = builder->getHeader();
|
||||
|
||||
if (has_database_virtual_column && common_header.has("_database") && !pipe_header.has("_database"))
|
||||
if (allow_experimental_analyzer)
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = "_database";
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(database_name));
|
||||
String table_alias = modified_query_info.query_tree->as<QueryNode>()->getJoinTree()->as<TableNode>()->getAlias();
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
String database_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_database" : table_alias + "._database";
|
||||
String table_column = table_alias.empty() || processed_stage == QueryProcessingStage::FetchColumns ? "_table" : table_alias + "._table";
|
||||
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
if (has_database_virtual_column && common_header.has(database_column)
|
||||
&& (storage_stage == QueryProcessingStage::FetchColumns || !pipe_header.has("'" + database_name + "'_String")))
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = database_column;
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(database_name));
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
}
|
||||
|
||||
if (has_table_virtual_column && common_header.has(table_column)
|
||||
&& (storage_stage == QueryProcessingStage::FetchColumns || !pipe_header.has("'" + table_name + "'_String")))
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = table_column;
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(table_name));
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
}
|
||||
}
|
||||
|
||||
if (has_table_virtual_column && common_header.has("_table") && !pipe_header.has("_table"))
|
||||
else
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = "_table";
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(table_name));
|
||||
if (has_database_virtual_column && common_header.has("_database") && !pipe_header.has("_database"))
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = "_database";
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(database_name));
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
}
|
||||
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
if (has_table_virtual_column && common_header.has("_table") && !pipe_header.has("_table"))
|
||||
{
|
||||
ColumnWithTypeAndName column;
|
||||
column.name = "_table";
|
||||
column.type = std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>());
|
||||
column.column = column.type->createColumnConst(0, Field(table_name));
|
||||
|
||||
auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column));
|
||||
auto adding_column_actions = std::make_shared<ExpressionActions>(
|
||||
std::move(adding_column_dag), ExpressionActionsSettings::fromContext(context, CompileExpressions::yes));
|
||||
builder->addSimpleTransform([&](const Block & stream_header)
|
||||
{ return std::make_shared<ExpressionTransform>(stream_header, adding_column_actions); });
|
||||
}
|
||||
}
|
||||
|
||||
/// Subordinary tables could have different but convertible types, like numeric types of different width.
|
||||
/// We must return streams with structure equals to structure of Merge table.
|
||||
convertAndFilterSourceStream(
|
||||
header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, processed_stage);
|
||||
header, modified_query_info, storage_snapshot_, aliases, row_policy_data_opt, context, *builder, storage_stage);
|
||||
}
|
||||
|
||||
return builder;
|
||||
@ -1116,13 +1157,13 @@ QueryPlan ReadFromMerge::createPlanForTable(
|
||||
bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer;
|
||||
|
||||
auto storage_stage = storage->getQueryProcessingStage(modified_context,
|
||||
QueryProcessingStage::Complete,
|
||||
processed_stage,
|
||||
storage_snapshot_,
|
||||
modified_query_info);
|
||||
|
||||
QueryPlan plan;
|
||||
|
||||
if (processed_stage <= storage_stage || (allow_experimental_analyzer && processed_stage == QueryProcessingStage::FetchColumns))
|
||||
if (processed_stage <= storage_stage)
|
||||
{
|
||||
/// If there are only virtual columns in query, you must request at least one other column.
|
||||
if (real_column_names.empty())
|
||||
@ -1167,7 +1208,7 @@ QueryPlan ReadFromMerge::createPlanForTable(
|
||||
row_policy_data_opt->addStorageFilter(source_step_with_filter);
|
||||
}
|
||||
}
|
||||
else if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns))
|
||||
else if (processed_stage > storage_stage || allow_experimental_analyzer)
|
||||
{
|
||||
/// Maximum permissible parallelism is streams_num
|
||||
modified_context->setSetting("max_threads", streams_num);
|
||||
|
@ -1479,8 +1479,11 @@ UInt64 StorageMergeTree::getCurrentMutationVersion(
|
||||
|
||||
size_t StorageMergeTree::clearOldMutations(bool truncate)
|
||||
{
|
||||
size_t finished_mutations_to_keep = truncate ? 0 : getSettings()->finished_mutations_to_keep;
|
||||
size_t finished_mutations_to_keep = getSettings()->finished_mutations_to_keep;
|
||||
if (!truncate && !finished_mutations_to_keep)
|
||||
return 0;
|
||||
|
||||
finished_mutations_to_keep = truncate ? 0 : finished_mutations_to_keep;
|
||||
std::vector<MergeTreeMutationEntry> mutations_to_delete;
|
||||
{
|
||||
std::lock_guard lock(currently_processing_in_background_mutex);
|
||||
@ -1899,8 +1902,6 @@ void StorageMergeTree::dropPart(const String & part_name, bool detach, ContextPt
|
||||
}
|
||||
}
|
||||
|
||||
/// Old part objects is needed to be destroyed before clearing them from filesystem.
|
||||
clearOldMutations(true);
|
||||
clearOldPartsFromFilesystem();
|
||||
clearEmptyParts();
|
||||
}
|
||||
@ -1985,8 +1986,6 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, Cont
|
||||
}
|
||||
}
|
||||
|
||||
/// Old parts are needed to be destroyed before clearing them from filesystem.
|
||||
clearOldMutations(true);
|
||||
clearOldPartsFromFilesystem();
|
||||
clearEmptyParts();
|
||||
}
|
||||
|
@ -4,27 +4,28 @@
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
#include <Compression/CompressionInfo.h>
|
||||
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
|
||||
#include <Processors/SourceWithKeyCondition.h>
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <IO/S3/getObjectInfo.h>
|
||||
#include <Core/Types.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <IO/S3/BlobStorageLogWriter.h>
|
||||
#include <IO/S3/getObjectInfo.h>
|
||||
#include <IO/SeekableReadBuffer.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/threadPoolCallbackRunner.h>
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Formats/IInputFormat.h>
|
||||
#include <Processors/SourceWithKeyCondition.h>
|
||||
#include <Storages/Cache/SchemaCache.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
#include <Storages/StorageConfiguration.h>
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
#include <Storages/prepareReadingFromFormat.h>
|
||||
#include <IO/S3/BlobStorageLogWriter.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <Common/threadPoolCallbackRunner.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <Storages/StorageValues.h>
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include <Processors/Transforms/ExpressionTransform.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
|
||||
|
||||
@ -21,6 +22,19 @@ StorageValues::StorageValues(
|
||||
setVirtuals(std::move(virtuals_));
|
||||
}
|
||||
|
||||
StorageValues::StorageValues(
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
Pipe prepared_pipe_,
|
||||
VirtualColumnsDescription virtuals_)
|
||||
: IStorage(table_id_), prepared_pipe(std::move(prepared_pipe_))
|
||||
{
|
||||
StorageInMemoryMetadata storage_metadata;
|
||||
storage_metadata.setColumns(columns_);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
setVirtuals(std::move(virtuals_));
|
||||
}
|
||||
|
||||
Pipe StorageValues::read(
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
@ -32,6 +46,25 @@ Pipe StorageValues::read(
|
||||
{
|
||||
storage_snapshot->check(column_names);
|
||||
|
||||
if (!prepared_pipe.empty())
|
||||
{
|
||||
auto dag = std::make_shared<ActionsDAG>(prepared_pipe.getHeader().getColumnsWithTypeAndName());
|
||||
ActionsDAG::NodeRawConstPtrs outputs;
|
||||
outputs.reserve(column_names.size());
|
||||
for (const auto & name : column_names)
|
||||
outputs.push_back(dag->getOutputs()[prepared_pipe.getHeader().getPositionByName(name)]);
|
||||
|
||||
dag->getOutputs().swap(outputs);
|
||||
auto expression = std::make_shared<ExpressionActions>(dag);
|
||||
|
||||
prepared_pipe.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<ExpressionTransform>(header, expression);
|
||||
});
|
||||
|
||||
return std::move(prepared_pipe);
|
||||
}
|
||||
|
||||
/// Get only required columns.
|
||||
Block block;
|
||||
for (const auto & name : column_names)
|
||||
|
@ -20,6 +20,12 @@ public:
|
||||
const Block & res_block_,
|
||||
VirtualColumnsDescription virtuals_ = {});
|
||||
|
||||
StorageValues(
|
||||
const StorageID & table_id_,
|
||||
const ColumnsDescription & columns_,
|
||||
Pipe prepared_pipe_,
|
||||
VirtualColumnsDescription virtuals_ = {});
|
||||
|
||||
std::string getName() const override { return "Values"; }
|
||||
|
||||
Pipe read(
|
||||
@ -39,6 +45,7 @@ public:
|
||||
|
||||
private:
|
||||
Block res_block;
|
||||
Pipe prepared_pipe;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,10 +1,8 @@
|
||||
00223_shard_distributed_aggregation_memory_efficient
|
||||
00717_merge_and_distributed
|
||||
00725_memory_tracking
|
||||
01062_pm_all_join_with_block_continuation
|
||||
01083_expressions_in_engine_arguments
|
||||
01155_rename_move_materialized_view
|
||||
01584_distributed_buffer_cannot_find_column
|
||||
01624_soft_constraints
|
||||
01747_join_view_filter_dictionary
|
||||
01925_join_materialized_columns
|
||||
|
@ -115,6 +115,7 @@ def main():
|
||||
"core.zst": workspace_path / "core.zst",
|
||||
"dmesg.log": workspace_path / "dmesg.log",
|
||||
"fatal.log": workspace_path / "fatal.log",
|
||||
"stderr.log": workspace_path / "stderr.log",
|
||||
}
|
||||
|
||||
compressed_server_log_path = workspace_path / "server.log.zst"
|
||||
|
@ -2900,7 +2900,7 @@ def get_additional_client_options(args):
|
||||
if args.client_option:
|
||||
client_options = " ".join("--" + option for option in args.client_option)
|
||||
if "CLICKHOUSE_CLIENT_OPT" in os.environ:
|
||||
return os.environ["CLICKHOUSE_CLIENT_OPT"] + client_options
|
||||
return os.environ["CLICKHOUSE_CLIENT_OPT"] + " " + client_options
|
||||
else:
|
||||
return client_options
|
||||
else:
|
||||
@ -3360,7 +3360,6 @@ if __name__ == "__main__":
|
||||
else:
|
||||
os.environ["CLICKHOUSE_CLIENT_OPT"] = ""
|
||||
|
||||
os.environ["CLICKHOUSE_CLIENT_OPT"] += get_additional_client_options(args)
|
||||
if args.secure:
|
||||
os.environ["CLICKHOUSE_CLIENT_OPT"] += " --secure "
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<!-- Remove the default remote server to avoid people depending on its hardcoded config
|
||||
Use test_shard_localhost instead -->
|
||||
<default remove="remove"></default>
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
|
@ -29,4 +29,7 @@
|
||||
</access_control_improvements>
|
||||
|
||||
<custom_cached_disks_base_directory replace="replace">/</custom_cached_disks_base_directory>
|
||||
|
||||
<!-- Remove the default remote server to avoid people depending on its hardcoded config -->
|
||||
<remote_servers remove="remove"></remote_servers>
|
||||
</clickhouse>
|
||||
|
@ -0,0 +1,12 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<default>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</default>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -6,7 +6,10 @@ from helpers.test_tools import TSV
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
main_configs=["configs/config.d/disable_access_control_improvements.xml"],
|
||||
main_configs=[
|
||||
"configs/config.d/disable_access_control_improvements.xml",
|
||||
"configs/remote_servers.xml",
|
||||
],
|
||||
user_configs=[
|
||||
"configs/users.d/another_user.xml",
|
||||
],
|
||||
|
@ -1,3 +1,4 @@
|
||||
<clickhouse>
|
||||
<max_server_memory_usage>1000</max_server_memory_usage>
|
||||
<page_cache_size>0</page_cache_size>
|
||||
</clickhouse>
|
||||
|
@ -19,10 +19,10 @@ node_no_backoff = cluster.add_instance(
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
||||
REPLICATED_POSPONE_MUTATION_LOG = (
|
||||
REPLICATED_POSTPONE_MUTATION_LOG = (
|
||||
"According to exponential backoff policy, put aside this log entry"
|
||||
)
|
||||
POSPONE_MUTATION_LOG = (
|
||||
POSTPONE_MUTATION_LOG = (
|
||||
"According to exponential backoff policy, do not perform mutations for the part"
|
||||
)
|
||||
|
||||
@ -56,21 +56,36 @@ def started_cluster():
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("node"),
|
||||
("node, found_in_log"),
|
||||
[
|
||||
(node_with_backoff),
|
||||
(
|
||||
node_with_backoff,
|
||||
True,
|
||||
),
|
||||
(
|
||||
node_no_backoff,
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_exponential_backoff_with_merge_tree(started_cluster, node):
|
||||
def test_exponential_backoff_with_merge_tree(started_cluster, node, found_in_log):
|
||||
prepare_cluster(False)
|
||||
|
||||
def check_logs():
|
||||
if found_in_log:
|
||||
assert node.wait_for_log_line(POSTPONE_MUTATION_LOG)
|
||||
# Do not rotate the logs when we are checking the absence of a log message
|
||||
node.rotate_logs()
|
||||
else:
|
||||
# Best effort, but when it fails, then the logs for sure contain the problematic message
|
||||
assert not node.contains_in_log(POSTPONE_MUTATION_LOG)
|
||||
|
||||
# Executing incorrect mutation.
|
||||
node.query(
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
|
||||
assert node.wait_for_log_line(POSPONE_MUTATION_LOG)
|
||||
node.rotate_logs()
|
||||
check_logs()
|
||||
|
||||
node.query("KILL MUTATION WHERE table='test_mutations'")
|
||||
# Check that after kill new parts mutations are postponing.
|
||||
@ -78,7 +93,7 @@ def test_exponential_backoff_with_merge_tree(started_cluster, node):
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
|
||||
assert node.wait_for_log_line(POSPONE_MUTATION_LOG)
|
||||
check_logs()
|
||||
|
||||
|
||||
def test_exponential_backoff_with_replicated_tree(started_cluster):
|
||||
@ -88,36 +103,37 @@ def test_exponential_backoff_with_replicated_tree(started_cluster):
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
|
||||
assert node_with_backoff.wait_for_log_line(REPLICATED_POSPONE_MUTATION_LOG)
|
||||
assert not node_no_backoff.contains_in_log(REPLICATED_POSPONE_MUTATION_LOG)
|
||||
assert node_with_backoff.wait_for_log_line(REPLICATED_POSTPONE_MUTATION_LOG)
|
||||
assert not node_no_backoff.contains_in_log(REPLICATED_POSTPONE_MUTATION_LOG)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("node"),
|
||||
[
|
||||
(node_with_backoff),
|
||||
],
|
||||
)
|
||||
def test_exponential_backoff_create_dependent_table(started_cluster, node):
|
||||
def test_exponential_backoff_create_dependent_table(started_cluster):
|
||||
prepare_cluster(False)
|
||||
|
||||
# Executing incorrect mutation.
|
||||
node.query(
|
||||
node_with_backoff.query(
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
|
||||
# Creating dependent table for mutation.
|
||||
node.query("CREATE TABLE dep_table(x UInt32) ENGINE MergeTree() ORDER BY x")
|
||||
node_with_backoff.query(
|
||||
"CREATE TABLE dep_table(x UInt32) ENGINE MergeTree() ORDER BY x"
|
||||
)
|
||||
|
||||
retry_count = 100
|
||||
no_unfinished_mutation = False
|
||||
for _ in range(0, retry_count):
|
||||
if node.query("SELECT count() FROM system.mutations WHERE is_done=0") == "0\n":
|
||||
if (
|
||||
node_with_backoff.query(
|
||||
"SELECT count() FROM system.mutations WHERE is_done=0"
|
||||
)
|
||||
== "0\n"
|
||||
):
|
||||
no_unfinished_mutation = True
|
||||
break
|
||||
|
||||
assert no_unfinished_mutation
|
||||
node.query("DROP TABLE IF EXISTS dep_table SYNC")
|
||||
node_with_backoff.query("DROP TABLE IF EXISTS dep_table SYNC")
|
||||
|
||||
|
||||
def test_exponential_backoff_setting_override(started_cluster):
|
||||
@ -133,7 +149,7 @@ def test_exponential_backoff_setting_override(started_cluster):
|
||||
node.query(
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
assert not node.contains_in_log(POSPONE_MUTATION_LOG)
|
||||
assert not node.contains_in_log(POSTPONE_MUTATION_LOG)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -152,14 +168,14 @@ def test_backoff_clickhouse_restart(started_cluster, replicated_table):
|
||||
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
|
||||
)
|
||||
assert node.wait_for_log_line(
|
||||
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
|
||||
REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG
|
||||
)
|
||||
|
||||
node.restart_clickhouse()
|
||||
node.rotate_logs()
|
||||
|
||||
assert node.wait_for_log_line(
|
||||
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
|
||||
REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG
|
||||
)
|
||||
|
||||
|
||||
@ -181,7 +197,7 @@ def test_no_backoff_after_killing_mutation(started_cluster, replicated_table):
|
||||
# Executing correct mutation.
|
||||
node.query("ALTER TABLE test_mutations DELETE WHERE x=1")
|
||||
assert node.wait_for_log_line(
|
||||
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
|
||||
REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG
|
||||
)
|
||||
mutation_ids = node.query("select mutation_id from system.mutations").split()
|
||||
|
||||
@ -190,5 +206,5 @@ def test_no_backoff_after_killing_mutation(started_cluster, replicated_table):
|
||||
)
|
||||
node.rotate_logs()
|
||||
assert not node.contains_in_log(
|
||||
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
|
||||
REPLICATED_POSTPONE_MUTATION_LOG if replicated_table else POSTPONE_MUTATION_LOG
|
||||
)
|
||||
|
@ -156,15 +156,33 @@ def test_merge_tree_load_parts_corrupted(started_cluster):
|
||||
node1.query("SYSTEM WAIT LOADING PARTS mt_load_parts_2")
|
||||
|
||||
def check_parts_loading(node, partition, loaded, failed, skipped):
|
||||
# The whole test produces around 6-700 lines, so 2k is plenty enough.
|
||||
# wait_for_log_line uses tail + grep, so the overhead is negligible
|
||||
look_behind_lines = 2000
|
||||
for min_block, max_block in loaded:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
assert node.contains_in_log(f"Loading Active part {part_name}")
|
||||
assert node.contains_in_log(f"Finished loading Active part {part_name}")
|
||||
assert node.wait_for_log_line(
|
||||
f"Loading Active part {part_name}", look_behind_lines=look_behind_lines
|
||||
)
|
||||
assert node.wait_for_log_line(
|
||||
f"Finished loading Active part {part_name}",
|
||||
look_behind_lines=look_behind_lines,
|
||||
)
|
||||
|
||||
failed_part_names = []
|
||||
# Let's wait until there is some information about all expected parts, and only
|
||||
# check the absence of not expected log messages after all expected logs are present
|
||||
for min_block, max_block in failed:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
assert node.contains_in_log(f"Loading Active part {part_name}")
|
||||
assert not node.contains_in_log(f"Finished loading Active part {part_name}")
|
||||
failed_part_names.append(part_name)
|
||||
assert node.wait_for_log_line(
|
||||
f"Loading Active part {part_name}", look_behind_lines=look_behind_lines
|
||||
)
|
||||
|
||||
for failed_part_name in failed_part_names:
|
||||
assert not node.contains_in_log(
|
||||
f"Finished loading Active part {failed_part_name}"
|
||||
)
|
||||
|
||||
for min_block, max_block in skipped:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
|
12
tests/integration/test_quota/configs/remote_servers.xml
Normal file
12
tests/integration/test_quota/configs/remote_servers.xml
Normal file
@ -0,0 +1,12 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<default>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</default>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -9,6 +9,7 @@ from helpers.test_tools import assert_eq_with_retry, TSV
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"instance",
|
||||
main_configs=["configs/remote_servers.xml"],
|
||||
user_configs=[
|
||||
"configs/users.d/assign_myquota_to_default_user.xml",
|
||||
"configs/users.d/drop_default_quota.xml",
|
||||
|
@ -131,14 +131,13 @@ def test_all_projection_files_are_dropped_when_part_is_dropped(
|
||||
"""
|
||||
)
|
||||
|
||||
objects_empty_table = list_objects(cluster)
|
||||
|
||||
node.query(
|
||||
"ALTER TABLE test_all_projection_files_are_dropped ADD projection b_order (SELECT a, b ORDER BY b)"
|
||||
)
|
||||
node.query(
|
||||
"ALTER TABLE test_all_projection_files_are_dropped MATERIALIZE projection b_order"
|
||||
)
|
||||
objects_empty_table = list_objects(cluster)
|
||||
|
||||
node.query(
|
||||
"""
|
||||
|
@ -0,0 +1,12 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<default>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</default>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -6,6 +6,7 @@ from helpers.test_tools import TSV
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"node",
|
||||
main_configs=["configs/remote_servers.xml"],
|
||||
user_configs=[
|
||||
"configs/another_user.xml",
|
||||
],
|
||||
|
@ -5,7 +5,7 @@
|
||||
11
|
||||
40
|
||||
|
||||
0
|
||||
40
|
||||
41
|
||||
|
||||
0
|
||||
|
@ -10,6 +10,14 @@ select 43 AS z from remote('127.0.0.{2,3}', system.one) group by 42, 43, 44;
|
||||
select 11 AS z from (SELECT 2 UNION ALL SELECT 3) group by 42, 43, 44;
|
||||
|
||||
select 40 as z from (select * from system.numbers limit 3) group by z WITH TOTALS;
|
||||
-- NOTE: non-analyzer preserves the original header (i.e. 41) for TOTALS in
|
||||
-- case of remote queries with GROUP BY some_requested_const and there were no
|
||||
-- aggregate functions, the query above. But everything else works in the same
|
||||
-- way, i.e.:
|
||||
--
|
||||
-- select 41 as z, count() from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS;
|
||||
-- select 41 as z from remote('127.0.0.{2,3}', system.one) group by 1 WITH TOTALS;
|
||||
--
|
||||
select 41 as z from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS;
|
||||
select count(), 42 AS z from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS;
|
||||
select 43 AS z from remote('127.0.0.{2,3}', system.one) group by 42, 43, 44 WITH TOTALS;
|
||||
|
@ -48,10 +48,10 @@
|
||||
{
|
||||
"i0": "0",
|
||||
"u0": "0",
|
||||
"ip": "0",
|
||||
"in": "0",
|
||||
"up": "0",
|
||||
"arr": [],
|
||||
"ip": "9223372036854775807",
|
||||
"in": "-9223372036854775808",
|
||||
"up": "18446744073709551615",
|
||||
"arr": ["0"],
|
||||
"tuple": ["0","0"]
|
||||
},
|
||||
|
||||
@ -119,7 +119,7 @@
|
||||
["0", "0", "9223372036854775807", "-9223372036854775808", "18446744073709551615", ["0"], ["0","0"]]
|
||||
],
|
||||
|
||||
"totals": ["0", "0", "0", "0", "0", [], ["0","0"]],
|
||||
"totals": ["0", "0", "9223372036854775807", "-9223372036854775808", "18446744073709551615", ["0"], ["0","0"]],
|
||||
|
||||
"extremes":
|
||||
{
|
||||
@ -180,10 +180,10 @@
|
||||
{
|
||||
"i0": 0,
|
||||
"u0": 0,
|
||||
"ip": 0,
|
||||
"in": 0,
|
||||
"up": 0,
|
||||
"arr": [],
|
||||
"ip": 9223372036854775807,
|
||||
"in": -9223372036854775808,
|
||||
"up": 18446744073709551615,
|
||||
"arr": [0],
|
||||
"tuple": [0,0]
|
||||
},
|
||||
|
||||
@ -251,7 +251,7 @@
|
||||
[0, 0, 9223372036854775807, -9223372036854775808, 18446744073709551615, [0], [0,0]]
|
||||
],
|
||||
|
||||
"totals": [0, 0, 0, 0, 0, [], [0,0]],
|
||||
"totals": [0, 0, 9223372036854775807, -9223372036854775808, 18446744073709551615, [0], [0,0]],
|
||||
|
||||
"extremes":
|
||||
{
|
||||
|
@ -1,9 +1,3 @@
|
||||
iphone 1
|
||||
|
||||
iphone 1
|
||||
iphone 1
|
||||
|
||||
\N 1
|
||||
a
|
||||
b
|
||||
1
|
||||
|
@ -1,6 +1,3 @@
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
|
||||
DROP TABLE IF EXISTS auto_assign_enum;
|
||||
DROP TABLE IF EXISTS auto_assign_enum1;
|
||||
DROP TABLE IF EXISTS auto_assign_enum2;
|
||||
|
@ -0,0 +1,6 @@
|
||||
iphone 1
|
||||
|
||||
iphone 1
|
||||
iphone 1
|
||||
|
||||
\N 1
|
3
tests/queries/0_stateless/00757_enum_defaults_const.sql
Normal file
3
tests/queries/0_stateless/00757_enum_defaults_const.sql
Normal file
@ -0,0 +1,3 @@
|
||||
SET allow_experimental_analyzer=0;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
@ -0,0 +1,6 @@
|
||||
iphone 1
|
||||
|
||||
iphone 1
|
||||
iphone 1
|
||||
|
||||
iphone 1
|
@ -0,0 +1,3 @@
|
||||
SET allow_experimental_analyzer=1;
|
||||
select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
||||
select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS;
|
@ -1,7 +1,7 @@
|
||||
11
|
||||
|
||||
0
|
||||
11
|
||||
12
|
||||
12
|
||||
0
|
||||
13
|
||||
0
|
||||
13
|
||||
|
@ -36,9 +36,9 @@ FROM
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
1 AS co,
|
||||
2 AS co2,
|
||||
3 AS co3
|
||||
dummy + 1 AS co,
|
||||
dummy + 2 AS co2,
|
||||
dummy + 3 AS co3
|
||||
)
|
||||
GROUP BY
|
||||
co,
|
||||
|
@ -8,8 +8,8 @@ EXPLAIN SYNTAX SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDiffere
|
||||
SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/5682
|
||||
EXPLAIN SYNTAX SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2;
|
||||
SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2;
|
||||
EXPLAIN SYNTAX SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM (SELECT dummy+1 AS co,dummy+2 AS co2 ,dummy+3 AS co3) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2;
|
||||
SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM (SELECT dummy+1 AS co,dummy+2 AS co2 ,dummy+3 AS co3) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2;
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/6734
|
||||
EXPLAIN SYNTAX SELECT name FROM ( SELECT name FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression';
|
||||
|
@ -10,8 +10,9 @@ ENGINE = TinyLog;
|
||||
INSERT INTO simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0');
|
||||
INSERT INTO simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1');
|
||||
INSERT INTO simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2');
|
||||
INSERT INTO simple_key_simple_attributes_source_table SELECT number + 10 as id, concat('value_', toString(id)), concat('value_second_', toString(id)) FROM numbers_mt(1_000_000);
|
||||
|
||||
{% for dictionary_config in ['', 'SHARDS 16'] -%}
|
||||
{% for dictionary_config in ['', 'SHARDS 16 SHARD_LOAD_QUEUE_BACKLOG 2'] -%}
|
||||
|
||||
DROP DICTIONARY IF EXISTS hashed_array_dictionary_simple_key_simple_attributes;
|
||||
CREATE DICTIONARY hashed_array_dictionary_simple_key_simple_attributes
|
||||
@ -42,7 +43,7 @@ SELECT dictGetOrDefault('hashed_array_dictionary_simple_key_simple_attributes',
|
||||
SELECT 'dictHas';
|
||||
SELECT dictHas('hashed_array_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4;
|
||||
SELECT 'select all values as input stream';
|
||||
SELECT * FROM hashed_array_dictionary_simple_key_simple_attributes ORDER BY id;
|
||||
SELECT * FROM hashed_array_dictionary_simple_key_simple_attributes ORDER BY id LIMIT 3;
|
||||
|
||||
DROP DICTIONARY hashed_array_dictionary_simple_key_simple_attributes;
|
||||
{% endfor %}
|
||||
@ -61,8 +62,9 @@ ENGINE = TinyLog;
|
||||
INSERT INTO simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'value_second_0');
|
||||
INSERT INTO simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL);
|
||||
INSERT INTO simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2');
|
||||
INSERT INTO simple_key_complex_attributes_source_table SELECT number + 10 as id, concat('value_', toString(id)), concat('value_second_', toString(id)) FROM numbers_mt(1_000_000);
|
||||
|
||||
{% for dictionary_config in ['', 'SHARDS 16'] -%}
|
||||
{% for dictionary_config in ['', 'SHARDS 16 SHARD_LOAD_QUEUE_BACKLOG 2'] -%}
|
||||
|
||||
DROP DICTIONARY IF EXISTS hashed_array_dictionary_simple_key_complex_attributes;
|
||||
CREATE DICTIONARY hashed_array_dictionary_simple_key_complex_attributes
|
||||
@ -92,7 +94,7 @@ SELECT dictGetOrDefault('hashed_array_dictionary_simple_key_complex_attributes',
|
||||
SELECT 'dictHas';
|
||||
SELECT dictHas('hashed_array_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4;
|
||||
SELECT 'select all values as input stream';
|
||||
SELECT * FROM hashed_array_dictionary_simple_key_complex_attributes ORDER BY id;
|
||||
SELECT * FROM hashed_array_dictionary_simple_key_complex_attributes ORDER BY id LIMIT 3;
|
||||
|
||||
DROP DICTIONARY hashed_array_dictionary_simple_key_complex_attributes;
|
||||
|
||||
|
@ -12,7 +12,9 @@ INSERT INTO complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'va
|
||||
INSERT INTO complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1');
|
||||
INSERT INTO complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2');
|
||||
|
||||
{% for dictionary_config in ['', 'SHARDS 16'] -%}
|
||||
INSERT INTO complex_key_simple_attributes_source_table SELECT number + 10 as id, concat('id_key_', toString(id)), toString(id), toString(id) FROM numbers_mt(1_000_000);
|
||||
|
||||
{% for dictionary_config in ['', 'SHARDS 16 SHARD_LOAD_QUEUE_BACKLOG 2'] -%}
|
||||
|
||||
DROP DICTIONARY IF EXISTS hashed_array_dictionary_complex_key_simple_attributes;
|
||||
CREATE DICTIONARY hashed_array_dictionary_complex_key_simple_attributes
|
||||
@ -43,7 +45,7 @@ SELECT dictGetOrDefault('hashed_array_dictionary_complex_key_simple_attributes',
|
||||
SELECT 'dictHas';
|
||||
SELECT dictHas('hashed_array_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4;
|
||||
SELECT 'select all values as input stream';
|
||||
SELECT * FROM hashed_array_dictionary_complex_key_simple_attributes ORDER BY (id, id_key);
|
||||
SELECT * FROM hashed_array_dictionary_complex_key_simple_attributes ORDER BY (id, id_key) LIMIT 3;
|
||||
|
||||
DROP DICTIONARY hashed_array_dictionary_complex_key_simple_attributes;
|
||||
|
||||
@ -64,8 +66,9 @@ ENGINE = TinyLog;
|
||||
INSERT INTO complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0');
|
||||
INSERT INTO complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL);
|
||||
INSERT INTO complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2');
|
||||
INSERT INTO complex_key_complex_attributes_source_table SELECT number + 10 as id, concat('id_key_', toString(id)), toString(id), toString(id) FROM numbers_mt(1_000_000);
|
||||
|
||||
{% for dictionary_config in ['', 'SHARDS 16'] -%}
|
||||
{% for dictionary_config in ['', 'SHARDS 16 SHARD_LOAD_QUEUE_BACKLOG 2'] -%}
|
||||
|
||||
DROP DICTIONARY IF EXISTS hashed_array_dictionary_complex_key_complex_attributes;
|
||||
CREATE DICTIONARY hashed_array_dictionary_complex_key_complex_attributes
|
||||
@ -97,7 +100,7 @@ SELECT dictGetOrDefault('hashed_array_dictionary_complex_key_complex_attributes'
|
||||
SELECT 'dictHas';
|
||||
SELECT dictHas('hashed_array_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4;
|
||||
SELECT 'select all values as input stream';
|
||||
SELECT * FROM hashed_array_dictionary_complex_key_complex_attributes ORDER BY (id, id_key);
|
||||
SELECT * FROM hashed_array_dictionary_complex_key_complex_attributes ORDER BY (id, id_key) LIMIT 3;
|
||||
|
||||
{% endfor %}
|
||||
|
||||
|
19
tests/queries/0_stateless/02125_lz4_compression_bug.lib
Executable file
19
tests/queries/0_stateless/02125_lz4_compression_bug.lib
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
format=$1
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS file"
|
||||
${CLICKHOUSE_CLIENT} --query "CREATE TABLE file (x UInt64) ENGINE = File($format, '${CLICKHOUSE_DATABASE}/data.$format.lz4')"
|
||||
for size in 10000 100000 1000000 2500000
|
||||
do
|
||||
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE file"
|
||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO file SELECT * FROM numbers($size)"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT max(x) FROM file"
|
||||
done
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE file"
|
@ -1,45 +0,0 @@
|
||||
Native
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
Values
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
JSONCompactEachRow
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
TSKV
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
TSV
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
CSV
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
JSONEachRow
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
JSONCompactEachRow
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
||||
JSONStringsEachRow
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
@ -1,21 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel, no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
for format in Native Values JSONCompactEachRow TSKV TSV CSV JSONEachRow JSONCompactEachRow JSONStringsEachRow
|
||||
do
|
||||
echo $format
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS file"
|
||||
${CLICKHOUSE_CLIENT} --query "CREATE TABLE file (x UInt64) ENGINE = File($format, '${CLICKHOUSE_DATABASE}/data.$format.lz4')"
|
||||
for size in 10000 100000 1000000 2500000
|
||||
do
|
||||
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE file"
|
||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO file SELECT * FROM numbers($size)"
|
||||
${CLICKHOUSE_CLIENT} --query "SELECT max(x) FROM file"
|
||||
done
|
||||
done
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE file"
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_CSV.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_CSV.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib CSV
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib JSONCompactEachRow
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_JSONEachRow.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_JSONEachRow.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib JSONEachRow
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib JSONStringsEachRow
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_Native.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_Native.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib Native
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_TSKV.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_TSKV.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib TSKV
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_TSV.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_TSV.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib TSV
|
@ -0,0 +1,4 @@
|
||||
9999
|
||||
99999
|
||||
999999
|
||||
2499999
|
6
tests/queries/0_stateless/02125_lz4_compression_bug_Values.sh
Executable file
6
tests/queries/0_stateless/02125_lz4_compression_bug_Values.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
${CUR_DIR}/02125_lz4_compression_bug.lib Values
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user