mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into stack-frams-size-limit
This commit is contained in:
commit
c202691319
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -174,3 +174,9 @@
|
|||||||
[submodule "contrib/sentry-native"]
|
[submodule "contrib/sentry-native"]
|
||||||
path = contrib/sentry-native
|
path = contrib/sentry-native
|
||||||
url = https://github.com/getsentry/sentry-native.git
|
url = https://github.com/getsentry/sentry-native.git
|
||||||
|
[submodule "contrib/gcem"]
|
||||||
|
path = contrib/gcem
|
||||||
|
url = https://github.com/kthohr/gcem.git
|
||||||
|
[submodule "contrib/stats"]
|
||||||
|
path = contrib/stats
|
||||||
|
url = https://github.com/kthohr/stats.git
|
||||||
|
@ -378,6 +378,7 @@ include (cmake/find/avro.cmake)
|
|||||||
include (cmake/find/msgpack.cmake)
|
include (cmake/find/msgpack.cmake)
|
||||||
include (cmake/find/cassandra.cmake)
|
include (cmake/find/cassandra.cmake)
|
||||||
include (cmake/find/sentry.cmake)
|
include (cmake/find/sentry.cmake)
|
||||||
|
include (cmake/find/stats.cmake)
|
||||||
|
|
||||||
find_contrib_lib(cityhash)
|
find_contrib_lib(cityhash)
|
||||||
find_contrib_lib(farmhash)
|
find_contrib_lib(farmhash)
|
||||||
|
@ -49,7 +49,7 @@ public:
|
|||||||
struct Values
|
struct Values
|
||||||
{
|
{
|
||||||
/// Least significat 32 bits from time_t at beginning of the day.
|
/// Least significat 32 bits from time_t at beginning of the day.
|
||||||
/// If the unix timestamp of beginning of the day is negative (example: 1970-01-01 MSK, where time_t == -10800), then value is zero.
|
/// If the unix timestamp of beginning of the day is negative (example: 1970-01-01 MSK, where time_t == -10800), then value will overflow.
|
||||||
/// Change to time_t; change constants above; and recompile the sources if you need to support time after 2105 year.
|
/// Change to time_t; change constants above; and recompile the sources if you need to support time after 2105 year.
|
||||||
UInt32 date;
|
UInt32 date;
|
||||||
|
|
||||||
|
@ -16,6 +16,19 @@ void trim(String & s)
|
|||||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Uses separate replxx::Replxx instance to avoid loading them again in the
|
||||||
|
// current context (replxx::Replxx::history_load() will re-load the history
|
||||||
|
// from the file), since then they will overlaps with history from the current
|
||||||
|
// session (this will make behavior compatible with other interpreters, i.e.
|
||||||
|
// bash).
|
||||||
|
void history_save(const String & history_file_path, const String & line)
|
||||||
|
{
|
||||||
|
replxx::Replxx rx_no_overlap;
|
||||||
|
rx_no_overlap.history_load(history_file_path);
|
||||||
|
rx_no_overlap.history_add(line);
|
||||||
|
rx_no_overlap.history_save(history_file_path);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ReplxxLineReader::ReplxxLineReader(
|
ReplxxLineReader::ReplxxLineReader(
|
||||||
@ -101,6 +114,10 @@ LineReader::InputStatus ReplxxLineReader::readOneLine(const String & prompt)
|
|||||||
void ReplxxLineReader::addToHistory(const String & line)
|
void ReplxxLineReader::addToHistory(const String & line)
|
||||||
{
|
{
|
||||||
// locking history file to prevent from inconsistent concurrent changes
|
// locking history file to prevent from inconsistent concurrent changes
|
||||||
|
//
|
||||||
|
// replxx::Replxx::history_save() already has lockf(),
|
||||||
|
// but replxx::Replxx::history_load() does not
|
||||||
|
// and that is why flock() is added here.
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
if (flock(history_file_fd, LOCK_EX))
|
if (flock(history_file_fd, LOCK_EX))
|
||||||
rx.print("Lock of history file failed: %s\n", strerror(errno));
|
rx.print("Lock of history file failed: %s\n", strerror(errno));
|
||||||
@ -110,7 +127,7 @@ void ReplxxLineReader::addToHistory(const String & line)
|
|||||||
rx.history_add(line);
|
rx.history_add(line);
|
||||||
|
|
||||||
// flush changes to the disk
|
// flush changes to the disk
|
||||||
rx.history_save(history_file_path);
|
history_save(history_file_path, line);
|
||||||
|
|
||||||
if (locked && 0 != flock(history_file_fd, LOCK_UN))
|
if (locked && 0 != flock(history_file_fd, LOCK_UN))
|
||||||
rx.print("Unlock of history file failed: %s\n", strerror(errno));
|
rx.print("Unlock of history file failed: %s\n", strerror(errno));
|
||||||
|
17
benchmark/omnisci/benchmark.sh
Executable file
17
benchmark/omnisci/benchmark.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits/' | while read query; do
|
||||||
|
|
||||||
|
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||||
|
sudo systemctl restart omnisci_server
|
||||||
|
for i in {1..1000}; do
|
||||||
|
/opt/omnisci/bin/omnisql -t -p HyperInteractive <<< "SELECT 1;" 2>&1 | grep -q '1 rows returned' && break;
|
||||||
|
sleep 0.1;
|
||||||
|
done
|
||||||
|
sleep 10;
|
||||||
|
|
||||||
|
echo "$query";
|
||||||
|
for i in {1..3}; do
|
||||||
|
/opt/omnisci/bin/omnisql -t -p HyperInteractive <<< "$query" 2>&1 | grep -P 'Exception:|Execution time:';
|
||||||
|
done;
|
||||||
|
done;
|
332
benchmark/omnisci/instruction.md
Normal file
332
benchmark/omnisci/instruction.md
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
# Instruction to run benchmark for OmniSci on web-analytics dataset
|
||||||
|
|
||||||
|
OmniSci (former name "MapD") is open-source (open-core) in-memory analytical DBMS with support for GPU processing.
|
||||||
|
It can run on CPU without GPU as well. It can show competitive performance on simple queries (like - simple aggregation on a single column).
|
||||||
|
|
||||||
|
# How to install
|
||||||
|
|
||||||
|
https://docs.omnisci.com/installation-and-configuration/installation/installing-on-ubuntu
|
||||||
|
|
||||||
|
# Caveats
|
||||||
|
|
||||||
|
- Dataset (at least needed columns) must fit in memory.
|
||||||
|
- It does not support data compression (only dictionary encoding for strings).
|
||||||
|
- First query execution is very slow because uncompressed data is read from disk.
|
||||||
|
- It does not support index for quick range queries.
|
||||||
|
- It does not support NOT NULL for data types.
|
||||||
|
- It does not support BLOB.
|
||||||
|
- No support for UNSIGNED data type (it's Ok according to SQL standard).
|
||||||
|
- Lack of string processing functions.
|
||||||
|
- Strings are limited to 32767 bytes.
|
||||||
|
- GROUP BY on text data type is supported only if it has dictionary encoding.
|
||||||
|
`Exception: Cannot group by string columns which are not dictionary encoded`
|
||||||
|
- Some aggregate functions are not supported for strings at all.
|
||||||
|
`Aggregate on TEXT is not supported yet.`
|
||||||
|
- Sometimes I hit a bug when query is run in infinite loop and does not finish (after retry it's finished successfully).
|
||||||
|
- One query executed in hours even with retries.
|
||||||
|
- Sorting is slow and disabled with default settings for large resultsets.
|
||||||
|
`Exception: Sorting the result would be too slow`
|
||||||
|
`Cast from dictionary-encoded string to none-encoded would be slow`
|
||||||
|
- There is approximate count distinct function but the precision is not documented.
|
||||||
|
|
||||||
|
To enable sorting of large resultsets, see:
|
||||||
|
https://stackoverflow.com/questions/62977734/omnissci-sorting-the-result-would-be-too-slow
|
||||||
|
|
||||||
|
The list of known issues is here:
|
||||||
|
https://github.com/omnisci/omniscidb/issues?q=is%3Aissue+author%3Aalexey-milovidov
|
||||||
|
|
||||||
|
# How to prepare data
|
||||||
|
|
||||||
|
Download the 100 million rows dataset from here and insert into ClickHouse:
|
||||||
|
https://clickhouse.tech/docs/en/getting-started/example-datasets/metrica/
|
||||||
|
|
||||||
|
Convert the CREATE TABLE query:
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --query "SHOW CREATE TABLE hits_100m" --format TSVRaw |
|
||||||
|
tr '`' '"' |
|
||||||
|
sed -r -e '
|
||||||
|
s/U?Int64/BIGINT/;
|
||||||
|
s/U?Int32/INTEGER/;
|
||||||
|
s/U?Int16/SMALLINT/;
|
||||||
|
s/U?Int8/TINYINT/;
|
||||||
|
s/DateTime/TIMESTAMP ENCODING FIXED(32)/;
|
||||||
|
s/ Date/ DATE ENCODING DAYS(16)/;
|
||||||
|
s/FixedString\(2\)/TEXT ENCODING DICT(16)/;
|
||||||
|
s/FixedString\(3\)/TEXT ENCODING DICT/;
|
||||||
|
s/FixedString\(\d+\)/TEXT ENCODING DICT/;
|
||||||
|
s/String/TEXT ENCODING DICT/;'
|
||||||
|
```
|
||||||
|
And cut `ENGINE` part.
|
||||||
|
|
||||||
|
The resulting CREATE TABLE query:
|
||||||
|
```
|
||||||
|
CREATE TABLE hits
|
||||||
|
(
|
||||||
|
"WatchID" BIGINT,
|
||||||
|
"JavaEnable" TINYINT,
|
||||||
|
"Title" TEXT ENCODING DICT,
|
||||||
|
"GoodEvent" SMALLINT,
|
||||||
|
"EventTime" TIMESTAMP ENCODING FIXED(32),
|
||||||
|
"EventDate" ENCODING DAYS(16) Date,
|
||||||
|
"CounterID" INTEGER,
|
||||||
|
"ClientIP" INTEGER,
|
||||||
|
"RegionID" INTEGER,
|
||||||
|
"UserID" BIGINT,
|
||||||
|
"CounterClass" TINYINT,
|
||||||
|
"OS" TINYINT,
|
||||||
|
"UserAgent" TINYINT,
|
||||||
|
"URL" TEXT ENCODING DICT,
|
||||||
|
"Referer" TEXT ENCODING DICT,
|
||||||
|
"Refresh" TINYINT,
|
||||||
|
"RefererCategoryID" SMALLINT,
|
||||||
|
"RefererRegionID" INTEGER,
|
||||||
|
"URLCategoryID" SMALLINT,
|
||||||
|
"URLRegionID" INTEGER,
|
||||||
|
"ResolutionWidth" SMALLINT,
|
||||||
|
"ResolutionHeight" SMALLINT,
|
||||||
|
"ResolutionDepth" TINYINT,
|
||||||
|
"FlashMajor" TINYINT,
|
||||||
|
"FlashMinor" TINYINT,
|
||||||
|
"FlashMinor2" TEXT ENCODING DICT,
|
||||||
|
"NetMajor" TINYINT,
|
||||||
|
"NetMinor" TINYINT,
|
||||||
|
"UserAgentMajor" SMALLINT,
|
||||||
|
"UserAgentMinor" TEXT ENCODING DICT(16),
|
||||||
|
"CookieEnable" TINYINT,
|
||||||
|
"JavascriptEnable" TINYINT,
|
||||||
|
"IsMobile" TINYINT,
|
||||||
|
"MobilePhone" TINYINT,
|
||||||
|
"MobilePhoneModel" TEXT ENCODING DICT,
|
||||||
|
"Params" TEXT ENCODING DICT,
|
||||||
|
"IPNetworkID" INTEGER,
|
||||||
|
"TraficSourceID" TINYINT,
|
||||||
|
"SearchEngineID" SMALLINT,
|
||||||
|
"SearchPhrase" TEXT ENCODING DICT,
|
||||||
|
"AdvEngineID" TINYINT,
|
||||||
|
"IsArtifical" TINYINT,
|
||||||
|
"WindowClientWidth" SMALLINT,
|
||||||
|
"WindowClientHeight" SMALLINT,
|
||||||
|
"ClientTimeZone" SMALLINT,
|
||||||
|
"ClientEventTime" TIMESTAMP ENCODING FIXED(32),
|
||||||
|
"SilverlightVersion1" TINYINT,
|
||||||
|
"SilverlightVersion2" TINYINT,
|
||||||
|
"SilverlightVersion3" INTEGER,
|
||||||
|
"SilverlightVersion4" SMALLINT,
|
||||||
|
"PageCharset" TEXT ENCODING DICT,
|
||||||
|
"CodeVersion" INTEGER,
|
||||||
|
"IsLink" TINYINT,
|
||||||
|
"IsDownload" TINYINT,
|
||||||
|
"IsNotBounce" TINYINT,
|
||||||
|
"FUniqID" BIGINT,
|
||||||
|
"OriginalURL" TEXT ENCODING DICT,
|
||||||
|
"HID" INTEGER,
|
||||||
|
"IsOldCounter" TINYINT,
|
||||||
|
"IsEvent" TINYINT,
|
||||||
|
"IsParameter" TINYINT,
|
||||||
|
"DontCountHits" TINYINT,
|
||||||
|
"WithHash" TINYINT,
|
||||||
|
"HitColor" TEXT ENCODING DICT(8),
|
||||||
|
"LocalEventTime" TIMESTAMP ENCODING FIXED(32),
|
||||||
|
"Age" TINYINT,
|
||||||
|
"Sex" TINYINT,
|
||||||
|
"Income" TINYINT,
|
||||||
|
"Interests" SMALLINT,
|
||||||
|
"Robotness" TINYINT,
|
||||||
|
"RemoteIP" INTEGER,
|
||||||
|
"WindowName" INTEGER,
|
||||||
|
"OpenerName" INTEGER,
|
||||||
|
"HistoryLength" SMALLINT,
|
||||||
|
"BrowserLanguage" TEXT ENCODING DICT(16),
|
||||||
|
"BrowserCountry" TEXT ENCODING DICT(16),
|
||||||
|
"SocialNetwork" TEXT ENCODING DICT,
|
||||||
|
"SocialAction" TEXT ENCODING DICT,
|
||||||
|
"HTTPError" SMALLINT,
|
||||||
|
"SendTiming" INTEGER,
|
||||||
|
"DNSTiming" INTEGER,
|
||||||
|
"ConnectTiming" INTEGER,
|
||||||
|
"ResponseStartTiming" INTEGER,
|
||||||
|
"ResponseEndTiming" INTEGER,
|
||||||
|
"FetchTiming" INTEGER,
|
||||||
|
"SocialSourceNetworkID" TINYINT,
|
||||||
|
"SocialSourcePage" TEXT ENCODING DICT,
|
||||||
|
"ParamPrice" BIGINT,
|
||||||
|
"ParamOrderID" TEXT ENCODING DICT,
|
||||||
|
"ParamCurrency" TEXT ENCODING DICT,
|
||||||
|
"ParamCurrencyID" SMALLINT,
|
||||||
|
"OpenstatServiceName" TEXT ENCODING DICT,
|
||||||
|
"OpenstatCampaignID" TEXT ENCODING DICT,
|
||||||
|
"OpenstatAdID" TEXT ENCODING DICT,
|
||||||
|
"OpenstatSourceID" TEXT ENCODING DICT,
|
||||||
|
"UTMSource" TEXT ENCODING DICT,
|
||||||
|
"UTMMedium" TEXT ENCODING DICT,
|
||||||
|
"UTMCampaign" TEXT ENCODING DICT,
|
||||||
|
"UTMContent" TEXT ENCODING DICT,
|
||||||
|
"UTMTerm" TEXT ENCODING DICT,
|
||||||
|
"FromTag" TEXT ENCODING DICT,
|
||||||
|
"HasGCLID" TINYINT,
|
||||||
|
"RefererHash" BIGINT,
|
||||||
|
"URLHash" BIGINT,
|
||||||
|
"CLID" INTEGER
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
Convert the dataset, prepare the list of fields for SELECT:
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --query "SHOW CREATE TABLE hits_100m" --format TSVRaw |
|
||||||
|
tr '`' '"' |
|
||||||
|
sed -r -e '
|
||||||
|
s/"(\w+)" U?Int([0-9]+)/toInt\2(\1)/;
|
||||||
|
s/"(\w+)" (Fixed)?String(\([0-9]+\))?/toValidUTF8(toString(\1))/;
|
||||||
|
s/"(\w+)" \w+/\1/'
|
||||||
|
```
|
||||||
|
|
||||||
|
The resulting SELECT query for data preparation:
|
||||||
|
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
toInt64(WatchID),
|
||||||
|
toInt8(JavaEnable),
|
||||||
|
toValidUTF8(toString(Title)),
|
||||||
|
toInt16(GoodEvent),
|
||||||
|
EventTime,
|
||||||
|
EventDate,
|
||||||
|
toInt32(CounterID),
|
||||||
|
toInt32(ClientIP),
|
||||||
|
toInt32(RegionID),
|
||||||
|
toInt64(UserID),
|
||||||
|
toInt8(CounterClass),
|
||||||
|
toInt8(OS),
|
||||||
|
toInt8(UserAgent),
|
||||||
|
toValidUTF8(toString(URL)),
|
||||||
|
toValidUTF8(toString(Referer)),
|
||||||
|
toInt8(Refresh),
|
||||||
|
toInt16(RefererCategoryID),
|
||||||
|
toInt32(RefererRegionID),
|
||||||
|
toInt16(URLCategoryID),
|
||||||
|
toInt32(URLRegionID),
|
||||||
|
toInt16(ResolutionWidth),
|
||||||
|
toInt16(ResolutionHeight),
|
||||||
|
toInt8(ResolutionDepth),
|
||||||
|
toInt8(FlashMajor),
|
||||||
|
toInt8(FlashMinor),
|
||||||
|
toValidUTF8(toString(FlashMinor2)),
|
||||||
|
toInt8(NetMajor),
|
||||||
|
toInt8(NetMinor),
|
||||||
|
toInt16(UserAgentMajor),
|
||||||
|
toValidUTF8(toString(UserAgentMinor)),
|
||||||
|
toInt8(CookieEnable),
|
||||||
|
toInt8(JavascriptEnable),
|
||||||
|
toInt8(IsMobile),
|
||||||
|
toInt8(MobilePhone),
|
||||||
|
toValidUTF8(toString(MobilePhoneModel)),
|
||||||
|
toValidUTF8(toString(Params)),
|
||||||
|
toInt32(IPNetworkID),
|
||||||
|
toInt8(TraficSourceID),
|
||||||
|
toInt16(SearchEngineID),
|
||||||
|
toValidUTF8(toString(SearchPhrase)),
|
||||||
|
toInt8(AdvEngineID),
|
||||||
|
toInt8(IsArtifical),
|
||||||
|
toInt16(WindowClientWidth),
|
||||||
|
toInt16(WindowClientHeight),
|
||||||
|
toInt16(ClientTimeZone),
|
||||||
|
ClientEventTime,
|
||||||
|
toInt8(SilverlightVersion1),
|
||||||
|
toInt8(SilverlightVersion2),
|
||||||
|
toInt32(SilverlightVersion3),
|
||||||
|
toInt16(SilverlightVersion4),
|
||||||
|
toValidUTF8(toString(PageCharset)),
|
||||||
|
toInt32(CodeVersion),
|
||||||
|
toInt8(IsLink),
|
||||||
|
toInt8(IsDownload),
|
||||||
|
toInt8(IsNotBounce),
|
||||||
|
toInt64(FUniqID),
|
||||||
|
toValidUTF8(toString(OriginalURL)),
|
||||||
|
toInt32(HID),
|
||||||
|
toInt8(IsOldCounter),
|
||||||
|
toInt8(IsEvent),
|
||||||
|
toInt8(IsParameter),
|
||||||
|
toInt8(DontCountHits),
|
||||||
|
toInt8(WithHash),
|
||||||
|
toValidUTF8(toString(HitColor)),
|
||||||
|
LocalEventTime,
|
||||||
|
toInt8(Age),
|
||||||
|
toInt8(Sex),
|
||||||
|
toInt8(Income),
|
||||||
|
toInt16(Interests),
|
||||||
|
toInt8(Robotness),
|
||||||
|
toInt32(RemoteIP),
|
||||||
|
toInt32(WindowName),
|
||||||
|
toInt32(OpenerName),
|
||||||
|
toInt16(HistoryLength),
|
||||||
|
toValidUTF8(toString(BrowserLanguage)),
|
||||||
|
toValidUTF8(toString(BrowserCountry)),
|
||||||
|
toValidUTF8(toString(SocialNetwork)),
|
||||||
|
toValidUTF8(toString(SocialAction)),
|
||||||
|
toInt16(HTTPError),
|
||||||
|
toInt32(SendTiming),
|
||||||
|
toInt32(DNSTiming),
|
||||||
|
toInt32(ConnectTiming),
|
||||||
|
toInt32(ResponseStartTiming),
|
||||||
|
toInt32(ResponseEndTiming),
|
||||||
|
toInt32(FetchTiming),
|
||||||
|
toInt8(SocialSourceNetworkID),
|
||||||
|
toValidUTF8(toString(SocialSourcePage)),
|
||||||
|
toInt64(ParamPrice),
|
||||||
|
toValidUTF8(toString(ParamOrderID)),
|
||||||
|
toValidUTF8(toString(ParamCurrency)),
|
||||||
|
toInt16(ParamCurrencyID),
|
||||||
|
toValidUTF8(toString(OpenstatServiceName)),
|
||||||
|
toValidUTF8(toString(OpenstatCampaignID)),
|
||||||
|
toValidUTF8(toString(OpenstatAdID)),
|
||||||
|
toValidUTF8(toString(OpenstatSourceID)),
|
||||||
|
toValidUTF8(toString(UTMSource)),
|
||||||
|
toValidUTF8(toString(UTMMedium)),
|
||||||
|
toValidUTF8(toString(UTMCampaign)),
|
||||||
|
toValidUTF8(toString(UTMContent)),
|
||||||
|
toValidUTF8(toString(UTMTerm)),
|
||||||
|
toValidUTF8(toString(FromTag)),
|
||||||
|
toInt8(HasGCLID),
|
||||||
|
toInt64(RefererHash),
|
||||||
|
toInt64(URLHash),
|
||||||
|
toInt32(CLID)
|
||||||
|
FROM hits_100m_obfuscated
|
||||||
|
INTO OUTFILE '/home/milovidov/example_datasets/hits_100m_obfuscated.csv'
|
||||||
|
FORMAT CSV;
|
||||||
|
```
|
||||||
|
|
||||||
|
Upload data to OmniSci:
|
||||||
|
```
|
||||||
|
/opt/omnisci/bin/omnisql -t -p HyperInteractive
|
||||||
|
```
|
||||||
|
Run CREATE TABLE statement, then run:
|
||||||
|
```
|
||||||
|
COPY hits FROM '/home/milovidov/example_datasets/hits_100m_obfuscated.csv' WITH (HEADER = 'false');
|
||||||
|
```
|
||||||
|
|
||||||
|
Data loading took
|
||||||
|
```
|
||||||
|
336639 ms
|
||||||
|
```
|
||||||
|
on a server (Linux Ubuntu, Xeon E5-2560v2, 32 logical CPU, 128 GiB RAM, 8xHDD RAID-5, 40 TB).
|
||||||
|
|
||||||
|
Run benchmark:
|
||||||
|
|
||||||
|
```
|
||||||
|
./benchmark.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Prepare the result to paste into JSON:
|
||||||
|
|
||||||
|
```
|
||||||
|
grep -oP 'Total time: \d+' log.txt |
|
||||||
|
grep -oP '\d+' |
|
||||||
|
awk '{
|
||||||
|
if (i % 3 == 0) { a = $1 }
|
||||||
|
else if (i % 3 == 1) { b = $1 }
|
||||||
|
else if (i % 3 == 2) { c = $1; print "[" a / 1000 ", " b / 1000 ", " c / 1000 "]," };
|
||||||
|
++i; }'
|
||||||
|
```
|
||||||
|
|
||||||
|
And fill out `[null, null, null]` for missing runs.
|
210
benchmark/omnisci/log.txt
Normal file
210
benchmark/omnisci/log.txt
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
3
|
||||||
|
SELECT count(*) FROM hits;
|
||||||
|
Execution time: 23471 ms, Total time: 23471 ms
|
||||||
|
Execution time: 42 ms, Total time: 43 ms
|
||||||
|
Execution time: 35 ms, Total time: 35 ms
|
||||||
|
3
|
||||||
|
SELECT count(*) FROM hits WHERE AdvEngineID != 0;
|
||||||
|
Execution time: 17328 ms, Total time: 17329 ms
|
||||||
|
Execution time: 58 ms, Total time: 59 ms
|
||||||
|
Execution time: 57 ms, Total time: 59 ms
|
||||||
|
3
|
||||||
|
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits;
|
||||||
|
Execution time: 17309 ms, Total time: 17310 ms
|
||||||
|
Execution time: 115 ms, Total time: 115 ms
|
||||||
|
Execution time: 129 ms, Total time: 130 ms
|
||||||
|
3
|
||||||
|
SELECT sum(UserID) FROM hits;
|
||||||
|
Execution time: 26091 ms, Total time: 26091 ms
|
||||||
|
Execution time: 88 ms, Total time: 89 ms
|
||||||
|
Execution time: 71 ms, Total time: 72 ms
|
||||||
|
3
|
||||||
|
SELECT APPROX_COUNT_DISTINCT(UserID) FROM hits;
|
||||||
|
Execution time: 21720 ms, Total time: 21720 ms
|
||||||
|
Execution time: 364 ms, Total time: 364 ms
|
||||||
|
Execution time: 344 ms, Total time: 345 ms
|
||||||
|
3
|
||||||
|
SELECT APPROX_COUNT_DISTINCT(SearchPhrase) FROM hits;
|
||||||
|
Execution time: 19314 ms, Total time: 19315 ms
|
||||||
|
Execution time: 385 ms, Total time: 386 ms
|
||||||
|
Execution time: 382 ms, Total time: 382 ms
|
||||||
|
3
|
||||||
|
SELECT min(EventDate), max(EventDate) FROM hits;
|
||||||
|
Execution time: 19431 ms, Total time: 19432 ms
|
||||||
|
Execution time: 130 ms, Total time: 131 ms
|
||||||
|
Execution time: 147 ms, Total time: 148 ms
|
||||||
|
3
|
||||||
|
SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||||
|
Execution time: 20660 ms, Total time: 20661 ms
|
||||||
|
Execution time: 63 ms, Total time: 64 ms
|
||||||
|
Execution time: 88 ms, Total time: 89 ms
|
||||||
|
3
|
||||||
|
SELECT RegionID, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||||
|
Execution time: 21364 ms, Total time: 21472 ms
|
||||||
|
Execution time: 1387 ms, Total time: 1504 ms
|
||||||
|
Execution time: 1443 ms, Total time: 1505 ms
|
||||||
|
3
|
||||||
|
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), APPROX_COUNT_DISTINCT(UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 22205 ms, Total time: 22285 ms
|
||||||
|
Execution time: 1590 ms, Total time: 1655 ms
|
||||||
|
Execution time: 1591 ms, Total time: 1658 ms
|
||||||
|
3
|
||||||
|
SELECT MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||||
|
Execution time: 22343 ms, Total time: 22344 ms
|
||||||
|
Execution time: 122 ms, Total time: 123 ms
|
||||||
|
Execution time: 117 ms, Total time: 118 ms
|
||||||
|
3
|
||||||
|
SELECT MobilePhone, MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||||
|
Execution time: 21681 ms, Total time: 21695 ms
|
||||||
|
Execution time: 299 ms, Total time: 310 ms
|
||||||
|
Execution time: 275 ms, Total time: 292 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase, count(*) AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 23346 ms, Total time: 23360 ms
|
||||||
|
Execution time: 613 ms, Total time: 631 ms
|
||||||
|
Execution time: 606 ms, Total time: 624 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||||
|
Execution time: 66014 ms, Total time: 68618 ms
|
||||||
|
Execution time: 44309 ms, Total time: 47296 ms
|
||||||
|
Execution time: 44019 ms, Total time: 46866 ms
|
||||||
|
3
|
||||||
|
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 25853 ms, Total time: 25984 ms
|
||||||
|
Execution time: 2590 ms, Total time: 2728 ms
|
||||||
|
Execution time: 2652 ms, Total time: 2789 ms
|
||||||
|
3
|
||||||
|
SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
Execution time: 26581 ms, Total time: 26953 ms
|
||||||
|
Execution time: 5843 ms, Total time: 6158 ms
|
||||||
|
Execution time: 5970 ms, Total time: 6286 ms
|
||||||
|
3
|
||||||
|
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
Execution time: 33007 ms, Total time: 33581 ms
|
||||||
|
Execution time: 9943 ms, Total time: 10509 ms
|
||||||
|
Execution time: 9470 ms, Total time: 10047 ms
|
||||||
|
3
|
||||||
|
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||||
|
Execution time: 39009 ms, Total time: 39575 ms
|
||||||
|
Execution time: 8151 ms, Total time: 8785 ms
|
||||||
|
Execution time: 8037 ms, Total time: 8665 ms
|
||||||
|
3
|
||||||
|
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
Execution time: 56207 ms, Total time: 57764 ms
|
||||||
|
Execution time: 26653 ms, Total time: 28199 ms
|
||||||
|
Execution time: 25614 ms, Total time: 27336 ms
|
||||||
|
3
|
||||||
|
SELECT UserID FROM hits WHERE UserID = -6101065172474983726;
|
||||||
|
Execution time: 18975 ms, Total time: 18976 ms
|
||||||
|
Execution time: 136 ms, Total time: 136 ms
|
||||||
|
Execution time: 136 ms, Total time: 136 ms
|
||||||
|
3
|
||||||
|
SELECT count(*) FROM hits WHERE URL LIKE '%metrika%';
|
||||||
|
Execution time: 32444 ms, Total time: 32445 ms
|
||||||
|
Execution time: 125 ms, Total time: 126 ms
|
||||||
|
Execution time: 134 ms, Total time: 136 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, APPROX_COUNT_DISTINCT(UserID) FROM hits WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
Exception: Aggregate on TEXT is not supported yet.
|
||||||
|
3
|
||||||
|
SELECT * FROM hits WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||||
|
Execution time: 96163 ms, Total time: 96166 ms
|
||||||
|
Execution time: 312 ms, Total time: 314 ms
|
||||||
|
Execution time: 303 ms, Total time: 305 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||||
|
Execution time: 27493 ms, Total time: 27494 ms
|
||||||
|
Execution time: 216 ms, Total time: 216 ms
|
||||||
|
Execution time: 221 ms, Total time: 222 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||||
|
Execution time: 38230 ms, Total time: 38308 ms
|
||||||
|
Execution time: 17175 ms, Total time: 17256 ms
|
||||||
|
Execution time: 17225 ms, Total time: 17310 ms
|
||||||
|
3
|
||||||
|
SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||||
|
Execution time: 115614 ms, Total time: 115714 ms
|
||||||
|
Execution time: 95944 ms, Total time: 96041 ms
|
||||||
|
Execution time: 94274 ms, Total time: 94383 ms
|
||||||
|
3
|
||||||
|
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
Execution time: 31775 ms, Total time: 31779 ms
|
||||||
|
Execution time: 2643 ms, Total time: 2647 ms
|
||||||
|
Execution time: 2933 ms, Total time: 2937 ms
|
||||||
|
3
|
||||||
|
SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW(<CHARACTER>)
|
||||||
|
Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW(<CHARACTER>)
|
||||||
|
Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW(<CHARACTER>)
|
||||||
|
3
|
||||||
|
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits;
|
||||||
|
Execution time: 28853 ms, Total time: 28854 ms
|
||||||
|
Execution time: 5654 ms, Total time: 5655 ms
|
||||||
|
Execution time: 5579 ms, Total time: 5581 ms
|
||||||
|
3
|
||||||
|
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 31694 ms, Total time: 31925 ms
|
||||||
|
Execution time: 3872 ms, Total time: 4142 ms
|
||||||
|
Execution time: 3928 ms, Total time: 4162 ms
|
||||||
|
3
|
||||||
|
SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 43690 ms, Total time: 44297 ms
|
||||||
|
Execution time: 8221 ms, Total time: 8825 ms
|
||||||
|
Execution time: 8115 ms, Total time: 8711 ms
|
||||||
|
3
|
||||||
|
SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 29669 ms, Total time: 29715 ms
|
||||||
|
Execution time: 1623 ms, Total time: 1669 ms
|
||||||
|
Execution time: 1534 ms, Total time: 1586 ms
|
||||||
|
3
|
||||||
|
SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 34860 ms, Total time: 35201 ms
|
||||||
|
Execution time: 7075 ms, Total time: 7414 ms
|
||||||
|
Execution time: 7164 ms, Total time: 7567 ms
|
||||||
|
3
|
||||||
|
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||||
|
Execution time: 26467 ms, Total time: 26724 ms
|
||||||
|
Execution time: 5740 ms, Total time: 6026 ms
|
||||||
|
Execution time: 5667 ms, Total time: 5920 ms
|
||||||
|
3
|
||||||
|
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||||
|
Execution time: 31899 ms, Total time: 31908 ms
|
||||||
|
Execution time: 1141 ms, Total time: 1154 ms
|
||||||
|
Execution time: 1155 ms, Total time: 1168 ms
|
||||||
|
3
|
||||||
|
SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||||
|
Execution time: 27991 ms, Total time: 27997 ms
|
||||||
|
Execution time: 719 ms, Total time: 724 ms
|
||||||
|
Execution time: 737 ms, Total time: 744 ms
|
||||||
|
3
|
||||||
|
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||||
|
Execution time: 34651 ms, Total time: 34661 ms
|
||||||
|
Execution time: 1182 ms, Total time: 1200 ms
|
||||||
|
Execution time: 1142 ms, Total time: 1159 ms
|
||||||
|
3
|
||||||
|
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||||
|
Execution time: 30130 ms, Total time: 30136 ms
|
||||||
|
Execution time: 461 ms, Total time: 467 ms
|
||||||
|
Execution time: 445 ms, Total time: 451 ms
|
||||||
|
3
|
||||||
|
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||||
|
Execution time: 19989 ms, Total time: 19991 ms
|
||||||
|
Execution time: 326 ms, Total time: 327 ms
|
||||||
|
Execution time: 325 ms, Total time: 326 ms
|
||||||
|
3
|
||||||
|
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||||
|
Execution time: 18658 ms, Total time: 18660 ms
|
||||||
|
Execution time: 265 ms, Total time: 266 ms
|
||||||
|
Execution time: 254 ms, Total time: 255 ms
|
||||||
|
3
|
||||||
|
SELECT DATE_TRUNC(minute, EventTime) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "Refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC(minute, EventTime) ORDER BY DATE_TRUNC(minute, EventTime);
|
||||||
|
Execution time: 25225 ms, Total time: 25227 ms
|
||||||
|
Execution time: 210 ms, Total time: 212 ms
|
||||||
|
Execution time: 199 ms, Total time: 200 ms
|
43
benchmark/omnisci/queries.sql
Normal file
43
benchmark/omnisci/queries.sql
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
SELECT count(*) FROM {table};
|
||||||
|
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||||
|
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||||
|
SELECT sum(UserID) FROM {table};
|
||||||
|
SELECT APPROX_COUNT_DISTINCT(UserID) FROM {table};
|
||||||
|
SELECT APPROX_COUNT_DISTINCT(SearchPhrase) FROM {table};
|
||||||
|
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||||
|
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||||
|
SELECT RegionID, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||||
|
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), APPROX_COUNT_DISTINCT(UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||||
|
SELECT MobilePhone, MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||||
|
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT SearchPhrase, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||||
|
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||||
|
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||||
|
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||||
|
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, APPROX_COUNT_DISTINCT(UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||||
|
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||||
|
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||||
|
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||||
|
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||||
|
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||||
|
#SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||||
|
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||||
|
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||||
|
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||||
|
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||||
|
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||||
|
SELECT DATE_TRUNC(minute, EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "Refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC(minute, EventTime) ORDER BY DATE_TRUNC(minute, EventTime);
|
20
cmake/find/stats.cmake
Normal file
20
cmake/find/stats.cmake
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
option(ENABLE_STATS "Enalbe StatsLib library" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (ENABLE_STATS)
|
||||||
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats")
|
||||||
|
message (WARNING "submodule contrib/stats is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
|
set (ENABLE_STATS 0)
|
||||||
|
set (USE_STATS 0)
|
||||||
|
elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/gcem")
|
||||||
|
message (WARNING "submodule contrib/gcem is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
|
set (ENABLE_STATS 0)
|
||||||
|
set (USE_STATS 0)
|
||||||
|
else()
|
||||||
|
set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include)
|
||||||
|
set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include)
|
||||||
|
set (USE_STATS 1)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message (STATUS "Using stats=${USE_STATS} : ${STATS_INCLUDE_DIR}")
|
||||||
|
message (STATUS "Using gcem=${USE_STATS}: ${GCEM_INCLUDE_DIR}")
|
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -307,3 +307,7 @@ endif()
|
|||||||
|
|
||||||
add_subdirectory (fmtlib-cmake)
|
add_subdirectory (fmtlib-cmake)
|
||||||
|
|
||||||
|
if (USE_STATS)
|
||||||
|
add_subdirectory (stats-cmake)
|
||||||
|
add_subdirectory (gcem)
|
||||||
|
endif()
|
||||||
|
1
contrib/gcem
vendored
Submodule
1
contrib/gcem
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 8d4f1b5d76ea8f6ff12f3f4f34cda45424556b00
|
1
contrib/stats
vendored
Submodule
1
contrib/stats
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit b6dd459c10a88c7ea04693c007e9e35820c5d9ad
|
9
contrib/stats-cmake/CMakeLists.txt
Normal file
9
contrib/stats-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# The stats is a header-only library of probability density functions,
|
||||||
|
# cumulative distribution functions, quantile functions, and random sampling methods.
|
||||||
|
set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include)
|
||||||
|
set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include)
|
||||||
|
|
||||||
|
add_library(stats INTERFACE)
|
||||||
|
|
||||||
|
target_include_directories(stats SYSTEM INTERFACE ${STATS_INCLUDE_DIR})
|
||||||
|
target_include_directories(stats SYSTEM INTERFACE ${GCEM_INCLUDE_DIR})
|
@ -99,6 +99,9 @@
|
|||||||
"docker/test/integration/resolver": {
|
"docker/test/integration/resolver": {
|
||||||
"name": "yandex/clickhouse-python-bottle",
|
"name": "yandex/clickhouse-python-bottle",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/helper_container": {
|
||||||
|
"name": "yandex/clickhouse-integration-helper",
|
||||||
|
"dependent": []
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -48,8 +48,9 @@ then
|
|||||||
mkdir /output/ch
|
mkdir /output/ch
|
||||||
git -C /output/ch init --bare
|
git -C /output/ch init --bare
|
||||||
git -C /output/ch remote add origin /build
|
git -C /output/ch remote add origin /build
|
||||||
git -C /output/ch fetch --no-tags --depth 50 origin HEAD
|
git -C /output/ch fetch --no-tags --depth 50 origin HEAD:pr
|
||||||
git -C /output/ch reset --soft FETCH_HEAD
|
git -C /output/ch fetch --no-tags --depth 50 origin master:master
|
||||||
|
git -C /output/ch reset --soft pr
|
||||||
git -C /output/ch log -5
|
git -C /output/ch log -5
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -90,8 +90,45 @@ do
|
|||||||
sleep 0.1
|
sleep 0.1
|
||||||
done
|
done
|
||||||
|
|
||||||
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper"
|
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper 01411_bayesian_ab_testing"
|
||||||
|
|
||||||
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||||
|
|
||||||
|
|
||||||
|
kill_clickhouse () {
|
||||||
|
kill `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` 2>/dev/null
|
||||||
|
|
||||||
|
for i in {1..10}
|
||||||
|
do
|
||||||
|
if ! kill -0 `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'`; then
|
||||||
|
echo "No clickhouse process"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Process" `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` "still alive"
|
||||||
|
sleep 10
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FAILED_TESTS=`grep 'FAIL\|TIMEOUT\|ERROR' /test_output/test_log.txt | awk 'BEGIN { ORS=" " }; { print substr($3, 1, length($3)-1) }'`
|
||||||
|
|
||||||
|
|
||||||
|
if [[ ! -z "$FAILED_TESTS" ]]; then
|
||||||
|
kill_clickhouse
|
||||||
|
|
||||||
|
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||||
|
|
||||||
|
until clickhouse-client --query "SELECT 1"
|
||||||
|
do
|
||||||
|
sleep 0.1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Going to run again: $FAILED_TESTS"
|
||||||
|
|
||||||
|
clickhouse-test --no-long --testname --shard --zookeeper $FAILED_TESTS 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_output/test_log.txt
|
||||||
|
else
|
||||||
|
echo "No failed tests"
|
||||||
|
fi
|
||||||
|
|
||||||
mv /var/log/clickhouse-server/* /test_output
|
mv /var/log/clickhouse-server/* /test_output
|
||||||
|
@ -29,7 +29,9 @@ RUN apt-get update \
|
|||||||
|
|
||||||
COPY * /
|
COPY * /
|
||||||
|
|
||||||
CMD cd /workspace \
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
CMD set -o pipefail \
|
||||||
|
&& cd /workspace \
|
||||||
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer
|
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer
|
||||||
|
@ -72,7 +72,7 @@ function watchdog
|
|||||||
|
|
||||||
function fuzz
|
function fuzz
|
||||||
{
|
{
|
||||||
./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -100000 > server.log &
|
./clickhouse-server --config-file db/config.xml -- --path db 2>&1 | tail -10000 > server.log &
|
||||||
server_pid=$!
|
server_pid=$!
|
||||||
kill -0 $server_pid
|
kill -0 $server_pid
|
||||||
while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
|
while ! ./clickhouse-client --query "select 1" && kill -0 $server_pid ; do echo . ; sleep 1 ; done
|
||||||
@ -83,7 +83,7 @@ function fuzz
|
|||||||
fuzzer_exit_code=0
|
fuzzer_exit_code=0
|
||||||
./clickhouse-client --query-fuzzer-runs=1000 \
|
./clickhouse-client --query-fuzzer-runs=1000 \
|
||||||
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
|
< <(for f in $(ls ch/tests/queries/0_stateless/*.sql | sort -R); do cat "$f"; echo ';'; done) \
|
||||||
> >(tail -100000 > fuzzer.log) \
|
> >(tail -10000 > fuzzer.log) \
|
||||||
2>&1 \
|
2>&1 \
|
||||||
|| fuzzer_exit_code=$?
|
|| fuzzer_exit_code=$?
|
||||||
|
|
||||||
@ -100,12 +100,6 @@ function fuzz
|
|||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
killall -9 clickhouse-server ||:
|
killall -9 clickhouse-server ||:
|
||||||
|
|
||||||
if [ "$fuzzer_exit_code" == "143" ]
|
|
||||||
then
|
|
||||||
# Killed by watchdog, meaning, no errors.
|
|
||||||
fuzzer_exit_code=0
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
@ -122,8 +116,9 @@ case "$stage" in
|
|||||||
# Run the testing script from the repository
|
# Run the testing script from the repository
|
||||||
echo Using the testing script from the repository
|
echo Using the testing script from the repository
|
||||||
export stage=download
|
export stage=download
|
||||||
|
time ch/docker/test/fuzzer/run-fuzzer.sh
|
||||||
# Keep the error code
|
# Keep the error code
|
||||||
time ch/docker/test/fuzzer/run-fuzzer.sh || exit $?
|
exit $?
|
||||||
fi
|
fi
|
||||||
;&
|
;&
|
||||||
"download")
|
"download")
|
||||||
@ -154,19 +149,31 @@ case "$stage" in
|
|||||||
pstree -aspgT
|
pstree -aspgT
|
||||||
|
|
||||||
# Make files with status and description we'll show for this check on Github
|
# Make files with status and description we'll show for this check on Github
|
||||||
if [ "$fuzzer_exit_code" == 0 ]
|
task_exit_code=$fuzzer_exit_code
|
||||||
|
if [ "$fuzzer_exit_code" == 143 ]
|
||||||
then
|
then
|
||||||
echo "OK" > description.txt
|
# SIGTERM -- the fuzzer was killed by timeout, which means a normal run.
|
||||||
echo "success" > status.txt
|
echo "success" > status.txt
|
||||||
else
|
echo "OK" > description.txt
|
||||||
echo "failure" > status.txt
|
task_exit_code=0
|
||||||
if ! grep -a "received signal \|Logical error" server.log > description.txt
|
elif [ "$fuzzer_exit_code" == 210 ]
|
||||||
then
|
then
|
||||||
echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt
|
# Lost connection to the server. This probably means that the server died
|
||||||
|
# with abort.
|
||||||
|
echo "failure" > status.txt
|
||||||
|
if ! grep -a "Received signal \|Logical error" server.log > description.txt
|
||||||
|
then
|
||||||
|
echo "Lost connection to server. See the logs" > description.txt
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
# Something different -- maybe the fuzzer itself died? Don't grep the
|
||||||
|
# server log in this case, because we will find a message about normal
|
||||||
|
# server termination (Received signal 15), which is confusing.
|
||||||
|
echo "failure" > status.txt
|
||||||
|
echo "Fuzzer failed ($fuzzer_exit_code). See the logs" > description.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exit $fuzzer_exit_code
|
exit $task_exit_code
|
||||||
;&
|
;&
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ RUN rm -rf \
|
|||||||
RUN apt-get clean
|
RUN apt-get clean
|
||||||
|
|
||||||
# Install MySQL ODBC driver
|
# Install MySQL ODBC driver
|
||||||
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.18-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.18-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
|
RUN curl 'https://cdn.mysql.com//Downloads/Connector-ODBC/8.0/mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit.tar.gz' --output 'mysql-connector.tar.gz' && tar -xzf mysql-connector.tar.gz && cd mysql-connector-odbc-8.0.21-linux-glibc2.12-x86-64bit/lib && mv * /usr/local/lib && ln -s /usr/local/lib/libmyodbc8a.so /usr/lib/x86_64-linux-gnu/odbc/libmyodbc.so
|
||||||
|
|
||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Moscow
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# docker build -t yandex/clickhouse-integration-helper .
|
||||||
# Helper docker container to run iptables without sudo
|
# Helper docker container to run iptables without sudo
|
||||||
|
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
@ -17,6 +17,7 @@ RUN apt-get update \
|
|||||||
libc6-dbg \
|
libc6-dbg \
|
||||||
moreutils \
|
moreutils \
|
||||||
ncdu \
|
ncdu \
|
||||||
|
numactl \
|
||||||
p7zip-full \
|
p7zip-full \
|
||||||
parallel \
|
parallel \
|
||||||
psmisc \
|
psmisc \
|
||||||
|
@ -317,9 +317,11 @@ create view right_query_log as select *
|
|||||||
'$(cat "right-query-log.tsv.columns")');
|
'$(cat "right-query-log.tsv.columns")');
|
||||||
|
|
||||||
create view query_logs as
|
create view query_logs as
|
||||||
select *, 0 version from left_query_log
|
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||||
|
query_duration_ms from left_query_log
|
||||||
union all
|
union all
|
||||||
select *, 1 version from right_query_log
|
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||||
|
query_duration_ms from right_query_log
|
||||||
;
|
;
|
||||||
|
|
||||||
-- This is a single source of truth on all metrics we have for query runs. The
|
-- This is a single source of truth on all metrics we have for query runs. The
|
||||||
|
@ -6,7 +6,6 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
|||||||
|
|
||||||
mkdir db0 ||:
|
mkdir db0 ||:
|
||||||
mkdir left ||:
|
mkdir left ||:
|
||||||
mkdir right ||:
|
|
||||||
|
|
||||||
left_pr=$1
|
left_pr=$1
|
||||||
left_sha=$2
|
left_sha=$2
|
||||||
@ -24,7 +23,7 @@ dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_e
|
|||||||
|
|
||||||
function download
|
function download
|
||||||
{
|
{
|
||||||
# Historically there were various path for the performance test package.
|
# Historically there were various paths for the performance test package.
|
||||||
# Test all of them.
|
# Test all of them.
|
||||||
for path in "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
for path in "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||||
do
|
do
|
||||||
@ -34,22 +33,13 @@ function download
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
for path in "https://clickhouse-builds.s3.yandex.net/$right_pr/$right_sha/"{,clickhouse_build_check/}"performance/performance.tgz"
|
# Might have the same version on left and right (for testing).
|
||||||
do
|
if ! [ "$left_sha" = "$right_sha" ]
|
||||||
if curl --fail --head "$path"
|
|
||||||
then
|
|
||||||
right_path="$path"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# might have the same version on left and right
|
|
||||||
if ! [ "$left_path" = "$right_path" ]
|
|
||||||
then
|
then
|
||||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
||||||
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv &
|
|
||||||
else
|
else
|
||||||
mkdir right ||:
|
mkdir left ||:
|
||||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv && cp -a left/* right &
|
cp -a right/* left &
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for dataset_name in $datasets
|
for dataset_name in $datasets
|
||||||
|
@ -1,38 +1,25 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
chown nobody workspace output
|
# Use the packaged repository to find the revision we will compare to.
|
||||||
chgrp nogroup workspace output
|
|
||||||
chmod 777 workspace output
|
|
||||||
|
|
||||||
cd workspace
|
|
||||||
|
|
||||||
# Fetch the repository to find and describe the compared revisions.
|
|
||||||
rm -rf ch ||:
|
|
||||||
time git clone --depth 50 --bare https://github.com/ClickHouse/ClickHouse ch
|
|
||||||
git -C ch fetch origin "$SHA_TO_TEST"
|
|
||||||
|
|
||||||
function find_reference_sha
|
function find_reference_sha
|
||||||
{
|
{
|
||||||
# If not master, try to fetch pull/.../{head,merge}
|
git -C right/ch log -1 origin/master
|
||||||
if [ "$PR_TO_TEST" != "0" ]
|
git -C right/ch log -1 pr
|
||||||
then
|
|
||||||
git -C ch fetch origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Go back from the revision to be tested, trying to find the closest published
|
# Go back from the revision to be tested, trying to find the closest published
|
||||||
# testing release.
|
# testing release. The PR branch may be either pull/*/head which is the
|
||||||
start_ref="$SHA_TO_TEST"~
|
# author's branch, or pull/*/merge, which is head merged with some master
|
||||||
# If we are testing a PR, and it merges with master successfully, we are
|
# automatically by Github. We will use a merge base with master as a reference
|
||||||
# building and testing not the nominal last SHA specified by pull/.../head
|
# for tesing (or some older commit). A caveat is that if we're testing the
|
||||||
# and SHA_TO_TEST, but a revision that is merged with recent master, given
|
# master, the merge base is the tested commit itself, so we have to step back
|
||||||
# by pull/.../merge ref.
|
# once.
|
||||||
# Master is the first parent of the pull/.../merge.
|
start_ref=$(git -C right/ch merge-base origin/master pr)
|
||||||
if git -C ch rev-parse "pull/$PR_TO_TEST/merge"
|
if [ "PR_TO_TEST" == "0" ]
|
||||||
then
|
then
|
||||||
start_ref="pull/$PR_TO_TEST/merge~"
|
start_ref=$start_ref~
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Loop back to find a commit that actually has a published perf test package.
|
||||||
while :
|
while :
|
||||||
do
|
do
|
||||||
# FIXME the original idea was to compare to a closest testing tag, which
|
# FIXME the original idea was to compare to a closest testing tag, which
|
||||||
@ -46,12 +33,12 @@ function find_reference_sha
|
|||||||
echo Reference tag is "$ref_tag"
|
echo Reference tag is "$ref_tag"
|
||||||
# We use annotated tags which have their own shas, so we have to further
|
# We use annotated tags which have their own shas, so we have to further
|
||||||
# dereference the tag to get the commit it points to, hence the '~0' thing.
|
# dereference the tag to get the commit it points to, hence the '~0' thing.
|
||||||
REF_SHA=$(git -C ch rev-parse "$ref_tag~0")
|
REF_SHA=$(git -C right/ch rev-parse "$ref_tag~0")
|
||||||
|
|
||||||
# FIXME sometimes we have testing tags on commits without published builds --
|
# FIXME sometimes we have testing tags on commits without published builds.
|
||||||
# normally these are documentation commits. Loop to skip them.
|
# Normally these are documentation commits. Loop to skip them.
|
||||||
# Historically there were various path for the performance test package.
|
# Historically there were various path for the performance test package,
|
||||||
# Test all of them.
|
# test all of them.
|
||||||
unset found
|
unset found
|
||||||
for path in "https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/"{,clickhouse_build_check/}"performance/performance.tgz"
|
for path in "https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||||
do
|
do
|
||||||
@ -69,6 +56,24 @@ function find_reference_sha
|
|||||||
REF_PR=0
|
REF_PR=0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chown nobody workspace output
|
||||||
|
chgrp nogroup workspace output
|
||||||
|
chmod 777 workspace output
|
||||||
|
|
||||||
|
cd workspace
|
||||||
|
|
||||||
|
# Download the package for the version we are going to test
|
||||||
|
for path in "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/"{,clickhouse_build_check/}"performance/performance.tgz"
|
||||||
|
do
|
||||||
|
if curl --fail --head "$path"
|
||||||
|
then
|
||||||
|
right_path="$path"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
mkdir right
|
||||||
|
wget -nv -nd -c "$right_path" -O- | tar -C right --strip-components=1 -zxv
|
||||||
|
|
||||||
# Find reference revision if not specified explicitly
|
# Find reference revision if not specified explicitly
|
||||||
if [ "$REF_SHA" == "" ]; then find_reference_sha; fi
|
if [ "$REF_SHA" == "" ]; then find_reference_sha; fi
|
||||||
if [ "$REF_SHA" == "" ]; then echo Reference SHA is not specified ; exit 1 ; fi
|
if [ "$REF_SHA" == "" ]; then echo Reference SHA is not specified ; exit 1 ; fi
|
||||||
@ -76,17 +81,14 @@ if [ "$REF_PR" == "" ]; then echo Reference PR is not specified ; exit 1 ; fi
|
|||||||
|
|
||||||
# Show what we're testing
|
# Show what we're testing
|
||||||
(
|
(
|
||||||
git -C ch log -1 --decorate "$REF_SHA" ||:
|
git -C right/ch log -1 --decorate "$REF_SHA" ||:
|
||||||
) | tee left-commit.txt
|
) | tee left-commit.txt
|
||||||
|
|
||||||
(
|
(
|
||||||
git -C ch log -1 --decorate "$SHA_TO_TEST" ||:
|
git -C right/ch log -1 --decorate "$SHA_TO_TEST" ||:
|
||||||
if git -C ch rev-parse "pull/$PR_TO_TEST/merge" &> /dev/null
|
|
||||||
then
|
|
||||||
echo
|
echo
|
||||||
echo Real tested commit is:
|
echo Real tested commit is:
|
||||||
git -C ch log -1 --decorate "pull/$PR_TO_TEST/merge"
|
git -C right/ch log -1 --decorate "pr"
|
||||||
fi
|
|
||||||
) | tee right-commit.txt
|
) | tee right-commit.txt
|
||||||
|
|
||||||
if [ "$PR_TO_TEST" != "0" ]
|
if [ "$PR_TO_TEST" != "0" ]
|
||||||
@ -94,8 +96,8 @@ then
|
|||||||
# If the PR only changes the tests and nothing else, prepare a list of these
|
# If the PR only changes the tests and nothing else, prepare a list of these
|
||||||
# tests for use by compare.sh. Compare to merge base, because master might be
|
# tests for use by compare.sh. Compare to merge base, because master might be
|
||||||
# far in the future and have unrelated test changes.
|
# far in the future and have unrelated test changes.
|
||||||
base=$(git -C ch merge-base "$SHA_TO_TEST" master)
|
base=$(git -C right/ch merge-base pr origin/master)
|
||||||
git -C ch diff --name-only "$base" "$SHA_TO_TEST" | tee changed-tests.txt
|
git -C right/ch diff --name-only "$base" pr | tee changed-tests.txt
|
||||||
if grep -vq '^tests/performance' changed-tests.txt
|
if grep -vq '^tests/performance' changed-tests.txt
|
||||||
then
|
then
|
||||||
# Have some other changes besides the tests, so truncate the test list,
|
# Have some other changes besides the tests, so truncate the test list,
|
||||||
|
24
docs/_description_templates/template-statement.md
Normal file
24
docs/_description_templates/template-statement.md
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Statement name (for example, SHOW USER)
|
||||||
|
|
||||||
|
Brief description of what the statement does.
|
||||||
|
|
||||||
|
Syntax:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
Syntax of the statement.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Other necessary sections of the description (Optional)
|
||||||
|
|
||||||
|
Examples of descriptions with a complicated structure:
|
||||||
|
|
||||||
|
- https://clickhouse.tech/docs/en/sql-reference/statements/grant/
|
||||||
|
- https://clickhouse.tech/docs/en/sql-reference/statements/revoke/
|
||||||
|
- https://clickhouse.tech/docs/en/sql-reference/statements/select/join/
|
||||||
|
|
||||||
|
|
||||||
|
## See Also (Optional)
|
||||||
|
|
||||||
|
Links to related topics as a list.
|
||||||
|
|
||||||
|
- [link](#)
|
@ -7,7 +7,7 @@ toc_title: Yandex.Metrica Data
|
|||||||
|
|
||||||
Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section.
|
Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section.
|
||||||
|
|
||||||
The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz.
|
The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz.
|
||||||
|
|
||||||
## Obtaining Tables from Prepared Partitions {#obtaining-tables-from-prepared-partitions}
|
## Obtaining Tables from Prepared Partitions {#obtaining-tables-from-prepared-partitions}
|
||||||
|
|
||||||
|
@ -19,26 +19,33 @@ toc_title: Adopters
|
|||||||
| <a href="https://www.benocs.com/" class="favicon">Benocs</a> | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) |
|
| <a href="https://www.benocs.com/" class="favicon">Benocs</a> | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) |
|
||||||
| <a href="https://www.bloomberg.com/" class="favicon">Bloomberg</a> | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
| <a href="https://www.bloomberg.com/" class="favicon">Bloomberg</a> | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||||
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
|
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
|
||||||
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
|
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
|
||||||
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
|
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
|
||||||
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
|
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
|
||||||
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
||||||
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
||||||
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
||||||
| <a href="https://contentsquare.com" class="favicon">ContentSquare</a> | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
|
||||||
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
||||||
|
| <a href="https://contentsquare.com" class="favicon">ContentSquare</a> | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
||||||
| <a href="https://coru.net/" class="favicon">Corunet</a> | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
| <a href="https://coru.net/" class="favicon">Corunet</a> | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
||||||
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
||||||
|
| <a href="https://crazypanda.ru/en/" class="favicon">Crazypanda</a> | Games | | — | — | Live session on ClickHouse meetup |
|
||||||
| <a href="https://www.criteo.com/" class="favicon">Criteo</a> | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) |
|
| <a href="https://www.criteo.com/" class="favicon">Criteo</a> | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) |
|
||||||
|
| <a href="https://www.chinatelecomglobal.com/" class="favicon">Dataliance for China Telecom</a> | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) |
|
||||||
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
||||||
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
||||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||||
|
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||||
|
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
|
||||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||||
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
| <a href="https://www.infovista.com/" class="favicon">Infovista</a> | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) |
|
||||||
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
| <a href="https://www.innogames.com" class="favicon">InnoGames</a> | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) |
|
||||||
|
| <a href="https://www.instana.com" class="favicon">Instana</a> | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) |
|
||||||
| <a href="https://integros.com" class="favicon">Integros</a> | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
| <a href="https://integros.com" class="favicon">Integros</a> | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||||
|
| <a href="https://ippon.tech" class="favicon">Ippon Technologies</a> | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) |
|
||||||
|
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
||||||
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||||
@ -47,29 +54,34 @@ toc_title: Adopters
|
|||||||
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
||||||
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||||
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
||||||
|
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||||
|
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||||
|
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
||||||
|
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
|
||||||
| <a href="http://www.pragma-innovation.fr/" class="favicon">Pragma Innovation</a> | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) |
|
| <a href="http://www.pragma-innovation.fr/" class="favicon">Pragma Innovation</a> | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) |
|
||||||
| <a href="https://www.qingcloud.com/" class="favicon">QINGCLOUD</a> | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) |
|
| <a href="https://www.qingcloud.com/" class="favicon">QINGCLOUD</a> | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) |
|
||||||
| <a href="https://qrator.net" class="favicon">Qrator</a> | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) |
|
| <a href="https://qrator.net" class="favicon">Qrator</a> | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) |
|
||||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
|
||||||
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
|
||||||
| <a href="https://rambler.ru" class="favicon">Rambler</a> | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) |
|
| <a href="https://rambler.ru" class="favicon">Rambler</a> | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) |
|
||||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
|
|
||||||
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
|
|
||||||
| <a href="https://www.s7.ru" class="favicon">S7 Airlines</a> | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) |
|
| <a href="https://www.s7.ru" class="favicon">S7 Airlines</a> | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) |
|
||||||
| <a href="https://www.semrush.com/" class="favicon">SEMrush</a> | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) |
|
|
||||||
| <a href="https://www.scireum.de/" class="favicon">scireum GmbH</a> | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) |
|
| <a href="https://www.scireum.de/" class="favicon">scireum GmbH</a> | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) |
|
||||||
|
| <a href="https://www.semrush.com/" class="favicon">SEMrush</a> | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) |
|
||||||
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
|
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
|
||||||
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
|
|
||||||
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
|
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
|
||||||
|
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
|
||||||
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
|
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
|
||||||
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
|
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
|
||||||
| <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) |
|
| <a href="https://www.splunk.com/" class="favicon">Splunk</a> | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) |
|
||||||
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
|
| <a href="https://www.spotify.com" class="favicon">Spotify</a> | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) |
|
||||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
|
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
|
||||||
|
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
|
||||||
|
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
|
||||||
| <a href="https://www.uber.com" class="favicon">Uber</a> | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) |
|
| <a href="https://www.uber.com" class="favicon">Uber</a> | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) |
|
||||||
| <a href="https://vk.com" class="favicon">VKontakte</a> | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) |
|
| <a href="https://vk.com" class="favicon">VKontakte</a> | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) |
|
||||||
|
| <a href="https://www.walmartlabs.com/" class="favicon">Walmart Labs</a> | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) |
|
||||||
|
| <a href="https://wargaming.com/en/" class="favicon">Wargaming</a> | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
|
||||||
| <a href="https://wisebits.com/" class="favicon">Wisebits</a> | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
| <a href="https://wisebits.com/" class="favicon">Wisebits</a> | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) |
|
||||||
|
| <a href="https://www.workato.com/" class="favicon">Workato</a> | Automation Software | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=334) |
|
||||||
| <a href="http://www.xiaoxintech.cn/" class="favicon">Xiaoxin Tech</a> | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) |
|
| <a href="http://www.xiaoxintech.cn/" class="favicon">Xiaoxin Tech</a> | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) |
|
||||||
| <a href="https://www.ximalaya.com/" class="favicon">Ximalaya</a> | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) |
|
| <a href="https://www.ximalaya.com/" class="favicon">Ximalaya</a> | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) |
|
||||||
| <a href="https://cloud.yandex.ru/services/managed-clickhouse" class="favicon">Yandex Cloud</a> | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) |
|
| <a href="https://cloud.yandex.ru/services/managed-clickhouse" class="favicon">Yandex Cloud</a> | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) |
|
||||||
@ -78,10 +90,5 @@ toc_title: Adopters
|
|||||||
| <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) |
|
| <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) |
|
||||||
| <a href="https://htc-cs.ru/" class="favicon">ЦВТ</a> | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) |
|
| <a href="https://htc-cs.ru/" class="favicon">ЦВТ</a> | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) |
|
||||||
| <a href="https://mkb.ru/" class="favicon">МКБ</a> | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
|
| <a href="https://mkb.ru/" class="favicon">МКБ</a> | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
|
||||||
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
|
||||||
| <a href="https://www.instana.com" class="favicon">Instana</a> | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) |
|
|
||||||
| <a href="https://wargaming.com/en/" class="favicon">Wargaming</a> | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
|
|
||||||
| <a href="https://crazypanda.ru/en/" class="favicon">Crazypanda</a> | Games | | — | — | Live session on ClickHouse meetup |
|
|
||||||
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->
|
||||||
|
@ -1459,6 +1459,20 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 16.
|
Default value: 16.
|
||||||
|
|
||||||
|
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||||
|
|
||||||
|
Enables parallel distributed `INSERT ... SELECT` query.
|
||||||
|
|
||||||
|
If we execute `INSERT INTO distributed_table_a SELECT ... FROM distributed_table_b` queries and both tables use the same cluster, and both tables are either [replicated](../../engines/table-engines/mergetree-family/replication.md) or non-replicated, then this query is processed locally on every shard.
|
||||||
|
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Disabled.
|
||||||
|
- 1 — Enabled.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## insert_distributed_sync {#insert_distributed_sync}
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table.
|
||||||
@ -1535,7 +1549,7 @@ Default value: 16.
|
|||||||
|
|
||||||
## validate\_polygons {#validate_polygons}
|
## validate\_polygons {#validate_polygons}
|
||||||
|
|
||||||
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
|
Enables or disables throwing an exception in the [pointInPolygon](../../sql-reference/functions/geo/index.md#pointinpolygon) function, if the polygon is self-intersecting or self-tangent.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1709,4 +1723,21 @@ SELECT * FROM a;
|
|||||||
+---+
|
+---+
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## mutations_sync {#mutations_sync}
|
||||||
|
|
||||||
|
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 - Mutations execute asynchronously.
|
||||||
|
- 1 - The query waits for all mutations to complete on the current server.
|
||||||
|
- 2 - The query waits for all mutations to complete on all replicas (if they exist).
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||||
|
- [Mutations](../../sql-reference/statements/alter/index.md#mutations)
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||||
|
@ -1,3 +1,36 @@
|
|||||||
## system.asynchronous\_metric\_log {#system-tables-async-log}
|
## system.asynchronous\_metric\_log {#system-tables-async-log}
|
||||||
|
|
||||||
Contains the historical values for `system.asynchronous_log` (see [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics))
|
Contains the historical values for `system.asynchronous_metrics`, which are saved once per minute. This feature is enabled by default.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||||
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||||
|
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
|
||||||
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [system.asynchronous\_metrics](../system-tables/asynchronous_metrics.md) — Contains metrics that are calculated periodically in the background.
|
||||||
|
- [system.metric_log](../system-tables/metric_log.md) — Contains history of metrics values from tables `system.metrics` and `system.events`, periodically flushed to disk.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# system.asynchronous\_metrics {#system_tables-asynchronous_metrics}
|
# system.asynchronous_metrics {#system_tables-asynchronous_metrics}
|
||||||
|
|
||||||
Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use.
|
Contains metrics that are calculated periodically in the background. For example, the amount of RAM in use.
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ CurrentMetric_ReplicatedChecks: 0
|
|||||||
|
|
||||||
**See also**
|
**See also**
|
||||||
|
|
||||||
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md#system_tables-asynchronous_metrics) — Contains periodically calculated metrics.
|
- [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md) — Contains periodically calculated metrics.
|
||||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred.
|
||||||
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — Contains instantly calculated metrics.
|
- [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics.
|
||||||
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
|
||||||
|
@ -23,7 +23,8 @@ For temporary data an unique temporary data directory is created by default. If
|
|||||||
Basic usage:
|
Basic usage:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" -q "query"
|
$ clickhouse-local --structure "table_structure" --input-format "format_of_incoming_data" \
|
||||||
|
--query "query"
|
||||||
```
|
```
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
@ -46,7 +47,8 @@ Also there are arguments for each ClickHouse configuration variable which are mo
|
|||||||
## Examples {#examples}
|
## Examples {#examples}
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ echo -e "1,2\n3,4" | clickhouse-local -S "a Int64, b Int64" -if "CSV" -q "SELECT * FROM table"
|
$ echo -e "1,2\n3,4" | clickhouse-local --structure "a Int64, b Int64" \
|
||||||
|
--input-format "CSV" --query "SELECT * FROM table"
|
||||||
Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
|
Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
|
||||||
1 2
|
1 2
|
||||||
3 4
|
3 4
|
||||||
@ -55,16 +57,37 @@ Read 2 rows, 32.00 B in 0.000 sec., 5182 rows/sec., 80.97 KiB/sec.
|
|||||||
Previous example is the same as:
|
Previous example is the same as:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin); SELECT a, b FROM table; DROP TABLE table"
|
$ echo -e "1,2\n3,4" | clickhouse-local --query "
|
||||||
|
CREATE TABLE table (a Int64, b Int64) ENGINE = File(CSV, stdin);
|
||||||
|
SELECT a, b FROM table;
|
||||||
|
DROP TABLE table"
|
||||||
Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
|
Read 2 rows, 32.00 B in 0.000 sec., 4987 rows/sec., 77.93 KiB/sec.
|
||||||
1 2
|
1 2
|
||||||
3 4
|
3 4
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You don't have to use `stdin` or `--file` argument, and can open any number of files using the [`file` table function](../../sql-reference/table-functions/file.md):
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ echo 1 | tee 1.tsv
|
||||||
|
1
|
||||||
|
|
||||||
|
$ echo 2 | tee 2.tsv
|
||||||
|
2
|
||||||
|
|
||||||
|
$ clickhouse-local --query "
|
||||||
|
select * from file('1.tsv', TSV, 'a int') t1
|
||||||
|
cross join file('2.tsv', TSV, 'b int') t2"
|
||||||
|
1 2
|
||||||
|
```
|
||||||
|
|
||||||
Now let’s output memory user for each Unix user:
|
Now let’s output memory user for each Unix user:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' | clickhouse-local -S "user String, mem Float64" -q "SELECT user, round(sum(mem), 2) as memTotal FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
|
$ ps aux | tail -n +2 | awk '{ printf("%s\t%s\n", $1, $4) }' \
|
||||||
|
| clickhouse-local --structure "user String, mem Float64" \
|
||||||
|
--query "SELECT user, round(sum(mem), 2) as memTotal
|
||||||
|
FROM table GROUP BY user ORDER BY memTotal DESC FORMAT Pretty"
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
|
@ -13,6 +13,7 @@ ClickHouse also supports:
|
|||||||
- [Parametric aggregate functions](../../sql-reference/aggregate-functions/parametric-functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns.
|
- [Parametric aggregate functions](../../sql-reference/aggregate-functions/parametric-functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns.
|
||||||
- [Combinators](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions.
|
- [Combinators](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions.
|
||||||
|
|
||||||
|
|
||||||
## NULL Processing {#null-processing}
|
## NULL Processing {#null-processing}
|
||||||
|
|
||||||
During aggregation, all `NULL`s are skipped.
|
During aggregation, all `NULL`s are skipped.
|
||||||
@ -37,9 +38,11 @@ Let’s say you need to total the values in the `y` column:
|
|||||||
SELECT sum(y) FROM t_null_big
|
SELECT sum(y) FROM t_null_big
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
┌─sum(y)─┐
|
┌─sum(y)─┐
|
||||||
│ 7 │
|
│ 7 │
|
||||||
└────────┘
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`.
|
The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`.
|
||||||
|
|
||||||
@ -57,4 +60,5 @@ SELECT groupArray(y) FROM t_null_big
|
|||||||
|
|
||||||
`groupArray` does not include `NULL` in the resulting array.
|
`groupArray` does not include `NULL` in the resulting array.
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) <!--hide-->
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
toc_folder_title: Reference
|
toc_folder_title: Reference
|
||||||
toc_priority: 36
|
toc_priority: 36
|
||||||
toc_title: Reference
|
toc_hidden: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Aggregate Function Reference {#aggregate-functions-reference}
|
# List of Aggregate Functions {#aggregate-functions-reference}
|
||||||
|
|
||||||
Standard aggregate functions:
|
Standard aggregate functions:
|
||||||
|
|
||||||
@ -24,97 +24,51 @@ Standard aggregate functions:
|
|||||||
ClickHouse-specific aggregate functions:
|
ClickHouse-specific aggregate functions:
|
||||||
|
|
||||||
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
|
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
|
||||||
|
|
||||||
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
|
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
|
||||||
|
|
||||||
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||||
|
|
||||||
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||||
|
|
||||||
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||||
|
|
||||||
- [topK](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
- [topK](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||||
|
|
||||||
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||||
|
|
||||||
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||||
|
|
||||||
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||||
|
|
||||||
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||||
|
|
||||||
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||||
|
|
||||||
- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
||||||
|
|
||||||
- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md)
|
- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md)
|
||||||
|
|
||||||
- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md)
|
- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md)
|
||||||
|
|
||||||
- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md)
|
- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md)
|
||||||
|
|
||||||
- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md)
|
- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md)
|
||||||
|
|
||||||
- [groupBitmapAnd](../../../sql-reference/aggregate-functions/reference/groupbitmapand.md)
|
- [groupBitmapAnd](../../../sql-reference/aggregate-functions/reference/groupbitmapand.md)
|
||||||
|
|
||||||
- [groupBitmapOr](../../../sql-reference/aggregate-functions/reference/groupbitmapor.md)
|
- [groupBitmapOr](../../../sql-reference/aggregate-functions/reference/groupbitmapor.md)
|
||||||
|
|
||||||
- [groupBitmapXor](../../../sql-reference/aggregate-functions/reference/groupbitmapxor.md)
|
- [groupBitmapXor](../../../sql-reference/aggregate-functions/reference/groupbitmapxor.md)
|
||||||
|
|
||||||
- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md)
|
- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md)
|
||||||
|
|
||||||
- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md)
|
- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md)
|
||||||
|
|
||||||
- [minMap](../../../sql-reference/aggregate-functions/reference/minmap.md)
|
- [minMap](../../../sql-reference/aggregate-functions/reference/minmap.md)
|
||||||
|
|
||||||
- [maxMap](../../../sql-reference/aggregate-functions/reference/maxmap.md)
|
- [maxMap](../../../sql-reference/aggregate-functions/reference/maxmap.md)
|
||||||
|
|
||||||
- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md)
|
- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md)
|
||||||
|
|
||||||
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
||||||
|
|
||||||
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||||
|
|
||||||
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||||
|
|
||||||
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
|
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
|
||||||
|
|
||||||
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
|
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
|
||||||
|
|
||||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
||||||
|
|
||||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||||
|
|
||||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||||
|
|
||||||
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md)
|
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md)
|
||||||
|
|
||||||
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md)
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md)
|
||||||
|
|
||||||
- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md)
|
- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md)
|
||||||
|
|
||||||
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md)
|
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md)
|
||||||
|
|
||||||
- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md)
|
- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md)
|
||||||
|
|
||||||
- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md)
|
- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md)
|
||||||
|
|
||||||
- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md)
|
- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md)
|
||||||
|
|
||||||
- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
|
- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
|
||||||
|
|
||||||
- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md)
|
- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md)
|
||||||
|
|
||||||
- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md)
|
- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md)
|
||||||
|
|
||||||
- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
||||||
|
|
||||||
- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
||||||
|
|
||||||
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
||||||
|
|
||||||
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
||||||
|
|
||||||
- [categoricalInformationValue](../../../sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)
|
- [categoricalInformationValue](../../../sql-reference/aggregate-functions/reference/categoricalinformationvalue.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/) <!--hide-->
|
||||||
|
@ -15,6 +15,9 @@ The following aggregate functions are supported:
|
|||||||
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor)
|
- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor)
|
||||||
- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray)
|
- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray)
|
||||||
- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||||
|
- [`sumMap`](../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap)
|
||||||
|
- [`minMap`](../../sql-reference/aggregate-functions/reference/minmap.md#agg_functions-minmap)
|
||||||
|
- [`maxMap`](../../sql-reference/aggregate-functions/reference/maxmap.md#agg_functions-maxmap)
|
||||||
|
|
||||||
Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function.
|
Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function.
|
||||||
|
|
||||||
|
@ -54,8 +54,6 @@ In this case, ClickHouse can reload the dictionary earlier if the dictionary con
|
|||||||
|
|
||||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
||||||
|
|
||||||
When upgrading the dictionaries, the ClickHouse server applies different logic depending on the type of [source](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md):
|
|
||||||
|
|
||||||
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
|
- For a text file, it checks the time of modification. If the time differs from the previously recorded time, the dictionary is updated.
|
||||||
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
|
- For MyISAM tables, the time of modification is checked using a `SHOW TABLE STATUS` query.
|
||||||
- Dictionaries from other sources are updated every time by default.
|
- Dictionaries from other sources are updated every time by default.
|
||||||
|
@ -503,3 +503,34 @@ Supported modifiers for Format:
|
|||||||
| %% | a % sign | % |
|
| %% | a % sign | % |
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
||||||
|
|
||||||
|
## FROM_UNIXTIME
|
||||||
|
|
||||||
|
When there is only single argument of integer type, it act in the same way as `toDateTime` and return [DateTime](../../sql-reference/data-types/datetime.md).
|
||||||
|
type.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT FROM_UNIXTIME(423543535)
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─FROM_UNIXTIME(423543535)─┐
|
||||||
|
│ 1983-06-04 10:58:55 │
|
||||||
|
└──────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
When there are two arguments, first is integer or DateTime, second is constant format string, it act in the same way as `formatDateTime` and return `String` type.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─DateTime────────────┐
|
||||||
|
│ 2009-02-11 14:42:23 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
@ -1,554 +0,0 @@
|
|||||||
---
|
|
||||||
toc_priority: 62
|
|
||||||
toc_title: Geographical Coordinates
|
|
||||||
---
|
|
||||||
|
|
||||||
# Functions for Working with Geographical Coordinates {#functions-for-working-with-geographical-coordinates}
|
|
||||||
|
|
||||||
## greatCircleDistance {#greatcircledistance}
|
|
||||||
|
|
||||||
Calculates the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input parameters**
|
|
||||||
|
|
||||||
- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`.
|
|
||||||
- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`.
|
|
||||||
- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`.
|
|
||||||
- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`.
|
|
||||||
|
|
||||||
Positive values correspond to North latitude and East longitude, and negative values correspond to South latitude and West longitude.
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
The distance between two points on the Earth’s surface, in meters.
|
|
||||||
|
|
||||||
Generates an exception when the input parameter values fall outside of the range.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐
|
|
||||||
│ 14132374.194975413 │
|
|
||||||
└───────────────────────────────────────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## greatCircleAngle {#greatcircleangle}
|
|
||||||
|
|
||||||
Calculates the central angle between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
greatCircleAngle(lon1Deg, lat1Deg, lon2Deg, lat2Deg)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input parameters**
|
|
||||||
|
|
||||||
- `lon1Deg` — Longitude of the first point in degrees.
|
|
||||||
- `lat1Deg` — Latitude of the first point in degrees.
|
|
||||||
- `lon2Deg` — Longitude of the second point in degrees.
|
|
||||||
- `lat2Deg` — Latitude of the second point in degrees.
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
The central angle between two points in degrees.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT greatCircleAngle(0, 0, 45, 0) AS arc
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─arc─┐
|
|
||||||
│ 45 │
|
|
||||||
└─────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## pointInEllipses {#pointinellipses}
|
|
||||||
|
|
||||||
Checks whether the point belongs to at least one of the ellipses.
|
|
||||||
Coordinates are geometric in the Cartesian coordinate system.
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input parameters**
|
|
||||||
|
|
||||||
- `x, y` — Coordinates of a point on the plane.
|
|
||||||
- `xᵢ, yᵢ` — Coordinates of the center of the `i`-th ellipsis.
|
|
||||||
- `aᵢ, bᵢ` — Axes of the `i`-th ellipsis in units of x, y coordinates.
|
|
||||||
|
|
||||||
The input parameters must be `2+4⋅n`, where `n` is the number of ellipses.
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
`1` if the point is inside at least one of the ellipses; `0`if it is not.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐
|
|
||||||
│ 1 │
|
|
||||||
└─────────────────────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## pointInPolygon {#pointinpolygon}
|
|
||||||
|
|
||||||
Checks whether the point belongs to the polygon on the plane.
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
pointInPolygon((x, y), [(a, b), (c, d) ...], ...)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input values**
|
|
||||||
|
|
||||||
- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../sql-reference/data-types/tuple.md) — A tuple of two numbers.
|
|
||||||
- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Array](../../sql-reference/data-types/array.md). Each vertex is represented by a pair of coordinates `(a, b)`. Vertices should be specified in a clockwise or counterclockwise order. The minimum number of vertices is 3. The polygon must be constant.
|
|
||||||
- The function also supports polygons with holes (cut out sections). In this case, add polygons that define the cut out sections using additional arguments of the function. The function does not support non-simply-connected polygons.
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
`1` if the point is inside the polygon, `0` if it is not.
|
|
||||||
If the point is on the polygon boundary, the function may return either 0 or 1.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─res─┐
|
|
||||||
│ 1 │
|
|
||||||
└─────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## geohashEncode {#geohashencode}
|
|
||||||
|
|
||||||
Encodes latitude and longitude as a geohash-string, please see (http://geohash.org/, https://en.wikipedia.org/wiki/Geohash).
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
geohashEncode(longitude, latitude, [precision])
|
|
||||||
```
|
|
||||||
|
|
||||||
**Input values**
|
|
||||||
|
|
||||||
- longitude - longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`
|
|
||||||
- latitude - latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`
|
|
||||||
- precision - Optional, length of the resulting encoded string, defaults to `12`. Integer in range `[1, 12]`. Any value less than `1` or greater than `12` is silently converted to `12`.
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
- alphanumeric `String` of encoded coordinate (modified version of the base32-encoding alphabet is used).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─res──────────┐
|
|
||||||
│ ezs42d000000 │
|
|
||||||
└──────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## geohashDecode {#geohashdecode}
|
|
||||||
|
|
||||||
Decodes any geohash-encoded string into longitude and latitude.
|
|
||||||
|
|
||||||
**Input values**
|
|
||||||
|
|
||||||
- encoded string - geohash-encoded string.
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
- (longitude, latitude) - 2-tuple of `Float64` values of longitude and latitude.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT geohashDecode('ezs42') AS res
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─res─────────────────────────────┐
|
|
||||||
│ (-5.60302734375,42.60498046875) │
|
|
||||||
└─────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## geoToH3 {#geotoh3}
|
|
||||||
|
|
||||||
Returns [H3](https://uber.github.io/h3/#/documentation/overview/introduction) point index `(lon, lat)` with specified resolution.
|
|
||||||
|
|
||||||
[H3](https://uber.github.io/h3/#/documentation/overview/introduction) is a geographical indexing system where Earth’s surface divided into even hexagonal tiles. This system is hierarchical, i. e. each hexagon on the top level can be splitted into seven even but smaller ones and so on.
|
|
||||||
|
|
||||||
This index is used primarily for bucketing locations and other geospatial manipulations.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
geoToH3(lon, lat, resolution)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
- `lon` — Longitude. Type: [Float64](../../sql-reference/data-types/float.md).
|
|
||||||
- `lat` — Latitude. Type: [Float64](../../sql-reference/data-types/float.md).
|
|
||||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
- Hexagon index number.
|
|
||||||
- 0 in case of error.
|
|
||||||
|
|
||||||
Type: `UInt64`.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌────────────h3Index─┐
|
|
||||||
│ 644325524701193974 │
|
|
||||||
└────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## geohashesInBox {#geohashesinbox}
|
|
||||||
|
|
||||||
Returns an array of geohash-encoded strings of given precision that fall inside and intersect boundaries of given box, basically a 2D grid flattened into array.
|
|
||||||
|
|
||||||
**Input values**
|
|
||||||
|
|
||||||
- longitude\_min - min longitude, floating value in range `[-180°, 180°]`
|
|
||||||
- latitude\_min - min latitude, floating value in range `[-90°, 90°]`
|
|
||||||
- longitude\_max - max longitude, floating value in range `[-180°, 180°]`
|
|
||||||
- latitude\_max - max latitude, floating value in range `[-90°, 90°]`
|
|
||||||
- precision - geohash precision, `UInt8` in range `[1, 12]`
|
|
||||||
|
|
||||||
Please note that all coordinate parameters should be of the same type: either `Float32` or `Float64`.
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
- array of precision-long strings of geohash-boxes covering provided area, you should not rely on order of items.
|
|
||||||
- \[\] - empty array if *min* values of *latitude* and *longitude* aren’t less than corresponding *max* values.
|
|
||||||
|
|
||||||
Please note that function will throw an exception if resulting array is over 10’000’000 items long.
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─thasos──────────────────────────────────────┐
|
|
||||||
│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │
|
|
||||||
└─────────────────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3GetBaseCell {#h3getbasecell}
|
|
||||||
|
|
||||||
Returns the base cell number of the H3 index.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3GetBaseCell(index)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameter**
|
|
||||||
|
|
||||||
- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- Hexagon base cell number.
|
|
||||||
|
|
||||||
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3GetBaseCell(612916788725809151) as basecell;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─basecell─┐
|
|
||||||
│ 12 │
|
|
||||||
└──────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3HexAreaM2 {#h3hexaream2}
|
|
||||||
|
|
||||||
Returns average hexagon area in square meters at the given resolution.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3HexAreaM2(resolution)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameter**
|
|
||||||
|
|
||||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- Area in square meters.
|
|
||||||
|
|
||||||
Type: [Float64](../../sql-reference/data-types/float.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3HexAreaM2(13) as area;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─area─┐
|
|
||||||
│ 43.9 │
|
|
||||||
└──────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3IndexesAreNeighbors {#h3indexesareneighbors}
|
|
||||||
|
|
||||||
Returns whether or not the provided H3 indexes are neighbors.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3IndexesAreNeighbors(index1, index2)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
- `index1` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
- `index2` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- `1` — Indexes are neighbours.
|
|
||||||
- `0` — Indexes are not neighbours.
|
|
||||||
|
|
||||||
Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─n─┐
|
|
||||||
│ 1 │
|
|
||||||
└───┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3ToChildren {#h3tochildren}
|
|
||||||
|
|
||||||
Returns an array of child indexes for the given H3 index.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3ToChildren(index, resolution)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned values**
|
|
||||||
|
|
||||||
- Array of the child H3-indexes.
|
|
||||||
|
|
||||||
Type: [Array](../../sql-reference/data-types/array.md)([UInt64](../../sql-reference/data-types/int-uint.md)).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3ToChildren(599405990164561919, 6) AS children;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
|
||||||
│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │
|
|
||||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3ToParent {#h3toparent}
|
|
||||||
|
|
||||||
Returns the parent (coarser) index containing the given H3 index.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3ToParent(index, resolution)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters**
|
|
||||||
|
|
||||||
- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- Parent H3 index.
|
|
||||||
|
|
||||||
Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3ToParent(599405990164561919, 3) as parent;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─────────────parent─┐
|
|
||||||
│ 590398848891879423 │
|
|
||||||
└────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3ToString {#h3tostring}
|
|
||||||
|
|
||||||
Converts the `H3Index` representation of the index to the string representation.
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3ToString(index)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameter**
|
|
||||||
|
|
||||||
- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- String representation of the H3 index.
|
|
||||||
|
|
||||||
Type: [String](../../sql-reference/data-types/string.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3ToString(617420388352917503) as h3_string;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─h3_string───────┐
|
|
||||||
│ 89184926cdbffff │
|
|
||||||
└─────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## stringToH3 {#stringtoh3}
|
|
||||||
|
|
||||||
Converts the string representation to the `H3Index` (UInt64) representation.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
stringToH3(index_str)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameter**
|
|
||||||
|
|
||||||
- `index_str` — String representation of the H3 index. Type: [String](../../sql-reference/data-types/string.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- Hexagon index number. Returns 0 on error. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT stringToH3('89184926cc3ffff') as index;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌──────────────index─┐
|
|
||||||
│ 617420388351344639 │
|
|
||||||
└────────────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
## h3GetResolution {#h3getresolution}
|
|
||||||
|
|
||||||
Returns the resolution of the H3 index.
|
|
||||||
|
|
||||||
**Syntax**
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
h3GetResolution(index)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameter**
|
|
||||||
|
|
||||||
- `index` — Hexagon index number. Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Returned value**
|
|
||||||
|
|
||||||
- Index resolution. Range: `[0, 15]`. Type: [UInt8](../../sql-reference/data-types/int-uint.md).
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Query:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT h3GetResolution(617420388352917503) as res;
|
|
||||||
```
|
|
||||||
|
|
||||||
Result:
|
|
||||||
|
|
||||||
``` text
|
|
||||||
┌─res─┐
|
|
||||||
│ 9 │
|
|
||||||
└─────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/geo/) <!--hide-->
|
|
140
docs/en/sql-reference/functions/geo/coordinates.md
Normal file
140
docs/en/sql-reference/functions/geo/coordinates.md
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
---
|
||||||
|
toc_title: Geographical Coordinates
|
||||||
|
toc_priority: 62
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Functions for Working with Geographical Coordinates {#geographical-coordinates}
|
||||||
|
|
||||||
|
## greatCircleDistance {#greatcircledistance}
|
||||||
|
|
||||||
|
Calculates the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
greatCircleDistance(lon1Deg, lat1Deg, lon2Deg, lat2Deg)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Input parameters**
|
||||||
|
|
||||||
|
- `lon1Deg` — Longitude of the first point in degrees. Range: `[-180°, 180°]`.
|
||||||
|
- `lat1Deg` — Latitude of the first point in degrees. Range: `[-90°, 90°]`.
|
||||||
|
- `lon2Deg` — Longitude of the second point in degrees. Range: `[-180°, 180°]`.
|
||||||
|
- `lat2Deg` — Latitude of the second point in degrees. Range: `[-90°, 90°]`.
|
||||||
|
|
||||||
|
Positive values correspond to North latitude and East longitude, and negative values correspond to South latitude and West longitude.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
The distance between two points on the Earth’s surface, in meters.
|
||||||
|
|
||||||
|
Generates an exception when the input parameter values fall outside of the range.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673)─┐
|
||||||
|
│ 14132374.194975413 │
|
||||||
|
└───────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## greatCircleAngle {#greatcircleangle}
|
||||||
|
|
||||||
|
Calculates the central angle between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
greatCircleAngle(lon1Deg, lat1Deg, lon2Deg, lat2Deg)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Input parameters**
|
||||||
|
|
||||||
|
- `lon1Deg` — Longitude of the first point in degrees.
|
||||||
|
- `lat1Deg` — Latitude of the first point in degrees.
|
||||||
|
- `lon2Deg` — Longitude of the second point in degrees.
|
||||||
|
- `lat2Deg` — Latitude of the second point in degrees.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
The central angle between two points in degrees.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT greatCircleAngle(0, 0, 45, 0) AS arc
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─arc─┐
|
||||||
|
│ 45 │
|
||||||
|
└─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## pointInEllipses {#pointinellipses}
|
||||||
|
|
||||||
|
Checks whether the point belongs to at least one of the ellipses.
|
||||||
|
Coordinates are geometric in the Cartesian coordinate system.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
pointInEllipses(x, y, x₀, y₀, a₀, b₀,...,xₙ, yₙ, aₙ, bₙ)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Input parameters**
|
||||||
|
|
||||||
|
- `x, y` — Coordinates of a point on the plane.
|
||||||
|
- `xᵢ, yᵢ` — Coordinates of the center of the `i`-th ellipsis.
|
||||||
|
- `aᵢ, bᵢ` — Axes of the `i`-th ellipsis in units of x, y coordinates.
|
||||||
|
|
||||||
|
The input parameters must be `2+4⋅n`, where `n` is the number of ellipses.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
`1` if the point is inside at least one of the ellipses; `0`if it is not.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─pointInEllipses(10., 10., 10., 9.1, 1., 0.9999)─┐
|
||||||
|
│ 1 │
|
||||||
|
└─────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## pointInPolygon {#pointinpolygon}
|
||||||
|
|
||||||
|
Checks whether the point belongs to the polygon on the plane.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
pointInPolygon((x, y), [(a, b), (c, d) ...], ...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Input values**
|
||||||
|
|
||||||
|
- `(x, y)` — Coordinates of a point on the plane. Data type — [Tuple](../../../sql-reference/data-types/tuple.md) — A tuple of two numbers.
|
||||||
|
- `[(a, b), (c, d) ...]` — Polygon vertices. Data type — [Array](../../../sql-reference/data-types/array.md). Each vertex is represented by a pair of coordinates `(a, b)`. Vertices should be specified in a clockwise or counterclockwise order. The minimum number of vertices is 3. The polygon must be constant.
|
||||||
|
- The function also supports polygons with holes (cut out sections). In this case, add polygons that define the cut out sections using additional arguments of the function. The function does not support non-simply-connected polygons.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
`1` if the point is inside the polygon, `0` if it is not.
|
||||||
|
If the point is on the polygon boundary, the function may return either 0 or 1.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2)]) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─res─┐
|
||||||
|
│ 1 │
|
||||||
|
└─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/geo/coordinates) <!--hide-->
|
111
docs/en/sql-reference/functions/geo/geohash.md
Normal file
111
docs/en/sql-reference/functions/geo/geohash.md
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
---
|
||||||
|
toc_title: Geohash
|
||||||
|
---
|
||||||
|
|
||||||
|
# Functions for Working with Geohash {#geohash}
|
||||||
|
|
||||||
|
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
||||||
|
|
||||||
|
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
||||||
|
|
||||||
|
## geohashEncode {#geohashencode}
|
||||||
|
|
||||||
|
Encodes latitude and longitude as a [geohash](#geohash)-string.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
geohashEncode(longitude, latitude, [precision])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Input values**
|
||||||
|
|
||||||
|
- longitude - longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`
|
||||||
|
- latitude - latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`
|
||||||
|
- precision - Optional, length of the resulting encoded string, defaults to `12`. Integer in range `[1, 12]`. Any value less than `1` or greater than `12` is silently converted to `12`.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- alphanumeric `String` of encoded coordinate (modified version of the base32-encoding alphabet is used).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─res──────────┐
|
||||||
|
│ ezs42d000000 │
|
||||||
|
└──────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## geohashDecode {#geohashdecode}
|
||||||
|
|
||||||
|
Decodes any [geohash](#geohash)-encoded string into longitude and latitude.
|
||||||
|
|
||||||
|
**Input values**
|
||||||
|
|
||||||
|
- encoded string - geohash-encoded string.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- (longitude, latitude) - 2-tuple of `Float64` values of longitude and latitude.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT geohashDecode('ezs42') AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─res─────────────────────────────┐
|
||||||
|
│ (-5.60302734375,42.60498046875) │
|
||||||
|
└─────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## geohashesInBox {#geohashesinbox}
|
||||||
|
|
||||||
|
Returns an array of [geohash](#geohash)-encoded strings of given precision that fall inside and intersect boundaries of given box, basically a 2D grid flattened into array.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precision)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `longitude_min` — Minimum longitude. Range: `[-180°, 180°]`. Type: [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
- `latitude_min` — Minimum latitude. Range: `[-90°, 90°]`. Type: [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
- `longitude_max` — Maximum longitude. Range: `[-180°, 180°]`. Type: [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
- `latitude_max` — Maximum latitude. Range: `[-90°, 90°]`. Type: [Float](../../../sql-reference/data-types/float.md).
|
||||||
|
- `precision` — Geohash precision. Range: `[1, 12]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
!!! info "Note"
|
||||||
|
All coordinate parameters must be of the same type: either `Float32` or `Float64`.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Array of precision-long strings of geohash-boxes covering provided area, you should not rely on order of items.
|
||||||
|
- `[]` - Empty array if minimum latitude and longitude values aren’t less than corresponding maximum values.
|
||||||
|
|
||||||
|
Type: [Array](../../../sql-reference/data-types/array.md)([String](../../../sql-reference/data-types/string.md)).
|
||||||
|
|
||||||
|
!!! info "Note"
|
||||||
|
Function throws an exception if resulting array is over 10’000’000 items long.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 4) AS thasos
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─thasos──────────────────────────────────────┐
|
||||||
|
│ ['sx1q','sx1r','sx32','sx1w','sx1x','sx38'] │
|
||||||
|
└─────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/geo/geohash) <!--hide-->
|
522
docs/en/sql-reference/functions/geo/h3.md
Normal file
522
docs/en/sql-reference/functions/geo/h3.md
Normal file
@ -0,0 +1,522 @@
|
|||||||
|
---
|
||||||
|
toc_title: H3 Indexes
|
||||||
|
---
|
||||||
|
|
||||||
|
# Functions for Working with H3 Indexes {#h3index}
|
||||||
|
|
||||||
|
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be splitted into seven even but smaller ones ("children"), and so on.
|
||||||
|
|
||||||
|
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.
|
||||||
|
|
||||||
|
A latitude and longitude pair can be transformed to a 64-bit H3 index, identifying a grid cell.
|
||||||
|
|
||||||
|
The H3 index is used primarily for bucketing locations and other geospatial manipulations.
|
||||||
|
|
||||||
|
The full description of the H3 system is available at [the Uber Engeneering site](https://eng.uber.com/h3/).
|
||||||
|
|
||||||
|
## h3IsValid {#h3isvalid}
|
||||||
|
|
||||||
|
Verifies whether the number is a valid [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3IsValid(h3index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `h3index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- 1 — The number is a valid H3 index.
|
||||||
|
- 0 — The number is not a valid H3 index.
|
||||||
|
|
||||||
|
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3IsValid(630814730351855103) as h3IsValid
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─h3IsValid─┐
|
||||||
|
│ 1 │
|
||||||
|
└───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3GetResolution {#h3getresolution}
|
||||||
|
|
||||||
|
Defines the resolution of the given [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3GetResolution(h3index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `h3index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Index resolution. Range: `[0, 15]`.
|
||||||
|
- If the index is not valid, the function returns a random value. Use [h3IsValid](#h3isvalid) to verify the index.
|
||||||
|
|
||||||
|
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3GetResolution(639821929606596015) as resolution
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─resolution─┐
|
||||||
|
│ 14 │
|
||||||
|
└────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3EdgeAngle {#h3edgeangle}
|
||||||
|
|
||||||
|
Calculates the average length of the [H3](#h3index) hexagon edge in grades.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3EdgeAngle(resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `resolution` — Index resolution. Type: [UInt8](../../../sql-reference/data-types/int-uint.md). Range: `[0, 15]`.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- The average length of the [H3](#h3index) hexagon edge in grades. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3EdgeAngle(10) as edgeAngle
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌───────h3EdgeAngle(10)─┐
|
||||||
|
│ 0.0005927224846720883 │
|
||||||
|
└───────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3EdgeLengthM {#h3edgelengthm}
|
||||||
|
|
||||||
|
Calculates the average length of the [H3](#h3index) hexagon edge in meters.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3EdgeLengthM(resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `resolution` — Index resolution. Type: [UInt8](../../../sql-reference/data-types/int-uint.md). Range: `[0, 15]`.
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- The average length of the [H3](#h3index) hexagon edge in meters. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3EdgeLengthM(15) as edgeLengthM
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─edgeLengthM─┐
|
||||||
|
│ 0.509713273 │
|
||||||
|
└─────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## geoToH3 {#geotoh3}
|
||||||
|
|
||||||
|
Returns [H3](#h3index) point index `(lon, lat)` with specified resolution.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
geoToH3(lon, lat, resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `lon` — Longitude. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- `lat` — Latitude. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Hexagon index number.
|
||||||
|
- 0 in case of error.
|
||||||
|
|
||||||
|
Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT geoToH3(37.79506683, 55.71290588, 15) as h3Index
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌────────────h3Index─┐
|
||||||
|
│ 644325524701193974 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3kRing {#h3kring}
|
||||||
|
|
||||||
|
Lists all the [H3](#h3index) hexagons in the raduis of `k` from the given hexagon in random order.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3kRing(h3index, k)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `h3index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
- `k` — Raduis. Type: [integer](../../../sql-reference/data-types/int-uint.md)
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Array of H3 indexes.
|
||||||
|
|
||||||
|
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT arrayJoin(h3kRing(644325529233966508, 1)) AS h3index
|
||||||
|
```
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌────────────h3index─┐
|
||||||
|
│ 644325529233966508 │
|
||||||
|
│ 644325529233966497 │
|
||||||
|
│ 644325529233966510 │
|
||||||
|
│ 644325529233966504 │
|
||||||
|
│ 644325529233966509 │
|
||||||
|
│ 644325529233966355 │
|
||||||
|
│ 644325529233966354 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3GetBaseCell {#h3getbasecell}
|
||||||
|
|
||||||
|
Returns the base cell number of the [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3GetBaseCell(index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Hexagon base cell number.
|
||||||
|
|
||||||
|
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3GetBaseCell(612916788725809151) as basecell;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─basecell─┐
|
||||||
|
│ 12 │
|
||||||
|
└──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3HexAreaM2 {#h3hexaream2}
|
||||||
|
|
||||||
|
Returns average hexagon area in square meters at the given resolution.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3HexAreaM2(resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Area in square meters.
|
||||||
|
|
||||||
|
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3HexAreaM2(13) as area;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─area─┐
|
||||||
|
│ 43.9 │
|
||||||
|
└──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3IndexesAreNeighbors {#h3indexesareneighbors}
|
||||||
|
|
||||||
|
Returns whether or not the provided [H3](#h3index) indexes are neighbors.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3IndexesAreNeighbors(index1, index2)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `index1` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
- `index2` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- `1` — Indexes are neighbours.
|
||||||
|
- `0` — Indexes are not neighbours.
|
||||||
|
|
||||||
|
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359) AS n;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─n─┐
|
||||||
|
│ 1 │
|
||||||
|
└───┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3ToChildren {#h3tochildren}
|
||||||
|
|
||||||
|
Returns an array of child indexes for the given [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3ToChildren(index, resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned values**
|
||||||
|
|
||||||
|
- Array of the child H3-indexes.
|
||||||
|
|
||||||
|
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3ToChildren(599405990164561919, 6) AS children;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─children───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ [603909588852408319,603909588986626047,603909589120843775,603909589255061503,603909589389279231,603909589523496959,603909589657714687] │
|
||||||
|
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3ToParent {#h3toparent}
|
||||||
|
|
||||||
|
Returns the parent (coarser) index containing the given [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3ToParent(index, resolution)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Parent H3 index.
|
||||||
|
|
||||||
|
Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3ToParent(599405990164561919, 3) as parent;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─────────────parent─┐
|
||||||
|
│ 590398848891879423 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3ToString {#h3tostring}
|
||||||
|
|
||||||
|
Converts the `H3Index` representation of the index to the string representation.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3ToString(index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- String representation of the H3 index.
|
||||||
|
|
||||||
|
Type: [String](../../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3ToString(617420388352917503) as h3_string;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─h3_string───────┐
|
||||||
|
│ 89184926cdbffff │
|
||||||
|
└─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## stringToH3 {#stringtoh3}
|
||||||
|
|
||||||
|
Converts the string representation to the `H3Index` (UInt64) representation.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
stringToH3(index_str)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `index_str` — String representation of the H3 index. Type: [String](../../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Hexagon index number. Returns 0 on error. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT stringToH3('89184926cc3ffff') as index;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌──────────────index─┐
|
||||||
|
│ 617420388351344639 │
|
||||||
|
└────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## h3GetResolution {#h3getresolution}
|
||||||
|
|
||||||
|
Returns the resolution of the [H3](#h3index) index.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
h3GetResolution(index)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameter**
|
||||||
|
|
||||||
|
- `index` — Hexagon index number. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT h3GetResolution(617420388352917503) as res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─res─┐
|
||||||
|
│ 9 │
|
||||||
|
└─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/geo/h3) <!--hide-->
|
8
docs/en/sql-reference/functions/geo/index.md
Normal file
8
docs/en/sql-reference/functions/geo/index.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
toc_title: hidden
|
||||||
|
toc_priority: 62
|
||||||
|
toc_folder_title: Geo
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/sql-reference/functions/geo/) <!--hide-->
|
@ -1351,6 +1351,44 @@ len: 30
|
|||||||
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
- [randomPrintableASCII](../../sql-reference/functions/other-functions.md#randomascii)
|
||||||
|
|
||||||
|
|
||||||
|
## randomFixedString {#randomfixedstring}
|
||||||
|
|
||||||
|
Generates a binary string of the specified length filled with random bytes (including zero bytes).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
randomFixedString(length);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `length` — String length in bytes. [UInt64](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
- String filled with random bytes.
|
||||||
|
|
||||||
|
Type: [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT randomFixedString(13) as rnd, toTypeName(rnd)
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─rnd──────┬─toTypeName(randomFixedString(13))─┐
|
||||||
|
│ j▒h㋖HɨZ'▒ │ FixedString(13) │
|
||||||
|
└──────────┴───────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## randomStringUTF8 {#randomstringutf8}
|
## randomStringUTF8 {#randomstringutf8}
|
||||||
|
|
||||||
Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode.
|
Generates a random string of a specified length. Result string contains valid UTF-8 code points. The value of code points may be outside of the range of assigned Unicode.
|
||||||
|
48
docs/en/sql-reference/functions/tuple-map-functions.md
Normal file
48
docs/en/sql-reference/functions/tuple-map-functions.md
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 46
|
||||||
|
toc_title: Working with maps
|
||||||
|
---
|
||||||
|
|
||||||
|
# Functions for maps {#functions-for-working-with-tuple-maps}
|
||||||
|
|
||||||
|
## mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapadd}
|
||||||
|
|
||||||
|
Collect all the keys and sum corresponding values.
|
||||||
|
|
||||||
|
Arguments are tuples of two arrays, where items in the first array represent keys, and the second array
|
||||||
|
contains values for the each key.
|
||||||
|
All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64).
|
||||||
|
The common promoted type is used as a type for the result array.
|
||||||
|
|
||||||
|
Returns one tuple, where the first array contains the sorted keys and the second array contains values.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─res───────────┬─type───────────────────────────────┐
|
||||||
|
│ ([1,2],[2,2]) │ Tuple(Array(UInt8), Array(UInt64)) │
|
||||||
|
└───────────────┴────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapsubtract}
|
||||||
|
|
||||||
|
Collect all the keys and subtract corresponding values.
|
||||||
|
|
||||||
|
Arguments are tuples of two arrays, where items in the first array represent keys, and the second array
|
||||||
|
contains values for the each key.
|
||||||
|
All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64).
|
||||||
|
The common promoted type is used as a type for the result array.
|
||||||
|
|
||||||
|
Returns one tuple, where the first array contains the sorted keys and the second array contains values.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) as res, toTypeName(res) as type;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─res────────────┬─type──────────────────────────────┐
|
||||||
|
│ ([1,2],[-1,0]) │ Tuple(Array(UInt8), Array(Int64)) │
|
||||||
|
└────────────────┴───────────────────────────────────┘
|
||||||
|
````
|
@ -9,7 +9,7 @@ toc_title: DELETE
|
|||||||
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr
|
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr
|
||||||
```
|
```
|
||||||
|
|
||||||
Allows to asynchronously delete data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/index.md#mutations).
|
Allows to delete data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
|
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
|
||||||
@ -17,3 +17,11 @@ Allows to asynchronously delete data matching the specified filtering expression
|
|||||||
The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value.
|
The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value.
|
||||||
|
|
||||||
One query can contain several commands separated by commas.
|
One query can contain several commands separated by commas.
|
||||||
|
|
||||||
|
The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous.
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [Mutations](../../../sql-reference/statements/alter/index.md#mutations)
|
||||||
|
- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||||
|
- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting
|
||||||
|
@ -27,12 +27,6 @@ While these `ALTER` settings modify entities related to role-based access contro
|
|||||||
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
- [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md)
|
||||||
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
- [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md)
|
||||||
|
|
||||||
## Synchronicity of ALTER Queries {#synchronicity-of-alter-queries}
|
|
||||||
|
|
||||||
For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas.
|
|
||||||
|
|
||||||
For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all.
|
|
||||||
|
|
||||||
## Mutations {#mutations}
|
## Mutations {#mutations}
|
||||||
|
|
||||||
`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts.
|
`ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts.
|
||||||
@ -45,4 +39,12 @@ A mutation query returns immediately after the mutation entry is added (in case
|
|||||||
|
|
||||||
Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted.
|
Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted.
|
||||||
|
|
||||||
|
## Synchronicity of ALTER Queries {#synchronicity-of-alter-queries}
|
||||||
|
|
||||||
|
For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas.
|
||||||
|
|
||||||
|
For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all.
|
||||||
|
|
||||||
|
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/query_language/alter/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/query_language/alter/) <!--hide-->
|
||||||
|
@ -9,7 +9,7 @@ toc_title: UPDATE
|
|||||||
ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr
|
ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr
|
||||||
```
|
```
|
||||||
|
|
||||||
Allows to asynchronously manipulate data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/index.md#mutations).
|
Allows to manipulate data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
|
||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
|
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
|
||||||
@ -17,3 +17,12 @@ Allows to asynchronously manipulate data matching the specified filtering expres
|
|||||||
The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported.
|
The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported.
|
||||||
|
|
||||||
One query can contain several commands separated by commas.
|
One query can contain several commands separated by commas.
|
||||||
|
|
||||||
|
The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous.
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [Mutations](../../../sql-reference/statements/alter/index.md#mutations)
|
||||||
|
- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||||
|
- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 42
|
toc_priority: 42
|
||||||
|
toc_title: ATTACH
|
||||||
---
|
---
|
||||||
|
|
||||||
# ATTACH Statement {#attach}
|
# ATTACH Statement {#attach}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 43
|
toc_priority: 43
|
||||||
|
toc_title: CHECK
|
||||||
---
|
---
|
||||||
|
|
||||||
# CHECK TABLE Statement {#check-table}
|
# CHECK TABLE Statement {#check-table}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 44
|
toc_priority: 44
|
||||||
|
toc_title: DESCRIBE
|
||||||
---
|
---
|
||||||
|
|
||||||
# DESCRIBE TABLE Statement {#misc-describe-table}
|
# DESCRIBE TABLE Statement {#misc-describe-table}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 45
|
toc_priority: 45
|
||||||
|
toc_title: DETACH
|
||||||
---
|
---
|
||||||
|
|
||||||
# DETACH Statement {#detach}
|
# DETACH Statement {#detach}
|
||||||
@ -11,6 +12,5 @@ DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
|||||||
```
|
```
|
||||||
|
|
||||||
This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again.
|
This does not delete the table’s data or metadata. On the next server launch, the server will read the metadata and find out about the table again.
|
||||||
Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them).
|
|
||||||
|
|
||||||
There is no `DETACH DATABASE` query.
|
Similarly, a “detached” table can be re-attached using the `ATTACH` query (with the exception of system tables, which do not have metadata stored for them).
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 46
|
toc_priority: 46
|
||||||
|
toc_title: DROP
|
||||||
---
|
---
|
||||||
|
|
||||||
# DROP Statements {#drop}
|
# DROP Statements {#drop}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 47
|
toc_priority: 47
|
||||||
|
toc_title: EXISTS
|
||||||
---
|
---
|
||||||
|
|
||||||
# EXISTS Statement {#exists-statement}
|
# EXISTS Statement {#exists-statement}
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 48
|
toc_priority: 48
|
||||||
|
toc_title: KILL
|
||||||
---
|
---
|
||||||
|
|
||||||
## KILL Statements {#kill-statements}
|
# KILL Statements {#kill-statements}
|
||||||
|
|
||||||
There are two kinds of kill statements: to kill a query and to kill a mutation
|
There are two kinds of kill statements: to kill a query and to kill a mutation
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 49
|
toc_priority: 49
|
||||||
|
toc_title: OPTIMIZE
|
||||||
---
|
---
|
||||||
|
|
||||||
# OPTIMIZE Statement {#misc_operations-optimize}
|
# OPTIMIZE Statement {#misc_operations-optimize}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 50
|
toc_priority: 50
|
||||||
|
toc_title: RENAME
|
||||||
---
|
---
|
||||||
|
|
||||||
# RENAME Statement {#misc_operations-rename}
|
# RENAME Statement {#misc_operations-rename}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 52
|
toc_priority: 52
|
||||||
|
toc_title: SET ROLE
|
||||||
---
|
---
|
||||||
|
|
||||||
# SET ROLE Statement {#set-role-statement}
|
# SET ROLE Statement {#set-role-statement}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 51
|
toc_priority: 51
|
||||||
|
toc_title: SET
|
||||||
---
|
---
|
||||||
|
|
||||||
# SET Statement {#query-set}
|
# SET Statement {#query-set}
|
||||||
|
@ -5,6 +5,8 @@ toc_title: SYSTEM
|
|||||||
|
|
||||||
# SYSTEM Statements {#query-language-system}
|
# SYSTEM Statements {#query-language-system}
|
||||||
|
|
||||||
|
The list of available `SYSTEM` statements:
|
||||||
|
|
||||||
- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries)
|
- [RELOAD EMBEDDED DICTIONARIES](#query_language-system-reload-emdedded-dictionaries)
|
||||||
- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries)
|
- [RELOAD DICTIONARIES](#query_language-system-reload-dictionaries)
|
||||||
- [RELOAD DICTIONARY](#query_language-system-reload-dictionary)
|
- [RELOAD DICTIONARY](#query_language-system-reload-dictionary)
|
||||||
@ -36,7 +38,7 @@ toc_title: SYSTEM
|
|||||||
- [RESTART REPLICA](#query_language-system-restart-replica)
|
- [RESTART REPLICA](#query_language-system-restart-replica)
|
||||||
- [RESTART REPLICAS](#query_language-system-restart-replicas)
|
- [RESTART REPLICAS](#query_language-system-restart-replicas)
|
||||||
|
|
||||||
## RELOAD EMBEDDED DICTIONARIES\] {#query_language-system-reload-emdedded-dictionaries}
|
## RELOAD EMBEDDED DICTIONARIES {#query_language-system-reload-emdedded-dictionaries}
|
||||||
|
|
||||||
Reload all [Internal dictionaries](../../sql-reference/dictionaries/internal-dicts.md).
|
Reload all [Internal dictionaries](../../sql-reference/dictionaries/internal-dicts.md).
|
||||||
By default, internal dictionaries are disabled.
|
By default, internal dictionaries are disabled.
|
||||||
@ -48,7 +50,7 @@ Reloads all dictionaries that have been successfully loaded before.
|
|||||||
By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED).
|
By default, dictionaries are loaded lazily (see [dictionaries\_lazy\_load](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-dictionaries_lazy_load)), so instead of being loaded automatically at startup, they are initialized on first access through dictGet function or SELECT from tables with ENGINE = Dictionary. The `SYSTEM RELOAD DICTIONARIES` query reloads such dictionaries (LOADED).
|
||||||
Always returns `Ok.` regardless of the result of the dictionary update.
|
Always returns `Ok.` regardless of the result of the dictionary update.
|
||||||
|
|
||||||
## RELOAD DICTIONARY Dictionary\_name {#query_language-system-reload-dictionary}
|
## RELOAD DICTIONARY {#query_language-system-reload-dictionary}
|
||||||
|
|
||||||
Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED).
|
Completely reloads a dictionary `dictionary_name`, regardless of the state of the dictionary (LOADED / NOT\_LOADED / FAILED).
|
||||||
Always returns `Ok.` regardless of the result of updating the dictionary.
|
Always returns `Ok.` regardless of the result of updating the dictionary.
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 53
|
toc_priority: 53
|
||||||
|
toc_title: TRUNCATE
|
||||||
---
|
---
|
||||||
|
|
||||||
# TRUNCATE Statement {#truncate-statement}
|
# TRUNCATE Statement {#truncate-statement}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
toc_priority: 54
|
toc_priority: 54
|
||||||
|
toc_title: USE
|
||||||
---
|
---
|
||||||
|
|
||||||
# USE Statement {#use}
|
# USE Statement {#use}
|
||||||
|
@ -7,6 +7,6 @@ toc_title: "M\xE9todo de codificaci\xF3n de datos:"
|
|||||||
|
|
||||||
# Método de codificación de datos: {#materializedview}
|
# Método de codificación de datos: {#materializedview}
|
||||||
|
|
||||||
Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql-reference/statements/create.md)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor.
|
Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor.
|
||||||
|
|
||||||
[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
[Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
||||||
|
@ -7,6 +7,6 @@ toc_title: "\u0645\u0627\u062F\u0647 \u0628\u06CC\u0646\u06CC"
|
|||||||
|
|
||||||
# ماده بینی {#materializedview}
|
# ماده بینی {#materializedview}
|
||||||
|
|
||||||
مورد استفاده برای اجرای نمایش محقق (برای اطلاعات بیشتر, دیدن [CREATE TABLE](../../../sql-reference/statements/create.md)). برای ذخیره سازی داده ها از یک موتور مختلف استفاده می کند که هنگام ایجاد دیدگاه مشخص شده است. هنگام خواندن از یک جدول, فقط با استفاده از این موتور.
|
مورد استفاده برای اجرای نمایش محقق (برای اطلاعات بیشتر, دیدن [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). برای ذخیره سازی داده ها از یک موتور مختلف استفاده می کند که هنگام ایجاد دیدگاه مشخص شده است. هنگام خواندن از یک جدول, فقط با استفاده از این موتور.
|
||||||
|
|
||||||
[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
[مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
||||||
|
@ -7,6 +7,6 @@ toc_title: MaterializedView
|
|||||||
|
|
||||||
# Materializedview {#materializedview}
|
# Materializedview {#materializedview}
|
||||||
|
|
||||||
Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../../sql-reference/statements/create.md)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur.
|
Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur.
|
||||||
|
|
||||||
[Article Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
[Article Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
||||||
|
@ -7,6 +7,6 @@ toc_title: "\u30DE\u30C6\u30EA\u30A2\u30E9\u30A4\u30BA\u30C9\u30D3\u30E5\u30FC"
|
|||||||
|
|
||||||
# マテリアライズドビュー {#materializedview}
|
# マテリアライズドビュー {#materializedview}
|
||||||
|
|
||||||
マテリアライズドビューの実装に使用されます(詳細については、 [CREATE TABLE](../../../sql-reference/statements/create.md)). データを格納するために、ビューの作成時に指定された別のエンジンを使用します。 読み込み時にテーブルから、使用してこのエンジンです。
|
マテリアライズドビューの実装に使用されます(詳細については、 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). データを格納するために、ビューの作成時に指定された別のエンジンを使用します。 読み込み時にテーブルから、使用してこのエンジンです。
|
||||||
|
|
||||||
[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
[元の記事](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) <!--hide-->
|
||||||
|
@ -693,6 +693,7 @@ auto s = std::string{"Hello"};
|
|||||||
## Сообщения об ошибках {#error-messages}
|
## Сообщения об ошибках {#error-messages}
|
||||||
|
|
||||||
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
|
Сообщения об ошибках -- это часть пользовательского интерфейса программы, предназначенная для того, чтобы позволить пользователю:
|
||||||
|
|
||||||
* замечать ошибочные ситуации,
|
* замечать ошибочные ситуации,
|
||||||
* понимать их смысл и причины,
|
* понимать их смысл и причины,
|
||||||
* устранять эти ситуации.
|
* устранять эти ситуации.
|
||||||
@ -700,6 +701,7 @@ auto s = std::string{"Hello"};
|
|||||||
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
|
Форма и содержание сообщений об ошибках должны способствовать достижению этих целей.
|
||||||
|
|
||||||
Есть два основных вида ошибок:
|
Есть два основных вида ошибок:
|
||||||
|
|
||||||
* пользовательская или системная ошибка,
|
* пользовательская или системная ошибка,
|
||||||
* внутренняя программная ошибка.
|
* внутренняя программная ошибка.
|
||||||
|
|
||||||
@ -722,6 +724,7 @@ While processing '(SELECT 2 AS a)'.
|
|||||||
The dictionary is configured incorrectly.
|
The dictionary is configured incorrectly.
|
||||||
```
|
```
|
||||||
Из него не понятно:
|
Из него не понятно:
|
||||||
|
|
||||||
- какой словарь?
|
- какой словарь?
|
||||||
- в чём ошибка конфигурации?
|
- в чём ошибка конфигурации?
|
||||||
|
|
||||||
@ -735,12 +738,14 @@ The dictionary is configured incorrectly.
|
|||||||
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
|
Появление такой ошибки всегда свидетельствует о наличии бага в программе. Пользователь не может исправить такую ошибку самостоятельно, и должен сообщить о ней разработчикам.
|
||||||
|
|
||||||
Есть два основных варианта проверки на такие ошибки:
|
Есть два основных варианта проверки на такие ошибки:
|
||||||
|
|
||||||
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
* Исключение с кодом `LOGICAL_ERROR`. Его можно использовать для важных проверок, которые делаются в том числе в релизной сборке.
|
||||||
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
* `assert`. Такие условия не проверяются в релизной сборке, можно использовать для тяжёлых и опциональных проверок.
|
||||||
|
|
||||||
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
Пример сообщения, у которого должен быть код `LOGICAL_ERROR`:
|
||||||
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
`Block header is inconsistent with Chunk in ICompicatedProcessor::munge(). It is a bug!`
|
||||||
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
|
По каким признакам можно заметить, что здесь говорится о внутренней программной ошибке?
|
||||||
|
|
||||||
* в сообщении упоминаются внутренние сущности из кода,
|
* в сообщении упоминаются внутренние сущности из кода,
|
||||||
* в сообщении написано it's a bug,
|
* в сообщении написано it's a bug,
|
||||||
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
|
* непосредственные действия пользователя не могут исправить эту ошибку. Мы ожидаем, что пользователь зарепортит её как баг, и будем исправлять в коде.
|
||||||
@ -752,6 +757,7 @@ The dictionary is configured incorrectly.
|
|||||||
### Как добавить новое сообщение об ошибке? {#error-messages-add}
|
### Как добавить новое сообщение об ошибке? {#error-messages-add}
|
||||||
|
|
||||||
Когда добавляете сообщение об ошибке:
|
Когда добавляете сообщение об ошибке:
|
||||||
|
|
||||||
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
|
1. Опишите, что произошло, в пользовательских терминах, а не кусками кода.
|
||||||
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
|
2. Добавьте максимум контекста (с чем произошло, когда, почему, и т.д.).
|
||||||
3. Добавьте типичные причины.
|
3. Добавьте типичные причины.
|
||||||
|
@ -26,7 +26,7 @@ toc_priority: 29
|
|||||||
|
|
||||||
Во время запросов `INSERT` таблица блокируется, а другие запросы на чтение и запись ожидают разблокировки таблицы. Если запросов на запись данных нет, то можно выполнять любое количество конкуретных запросов на чтение.
|
Во время запросов `INSERT` таблица блокируется, а другие запросы на чтение и запись ожидают разблокировки таблицы. Если запросов на запись данных нет, то можно выполнять любое количество конкуретных запросов на чтение.
|
||||||
|
|
||||||
- Не поддерживают операции [мутации](../../../engines/table-engines/log-family/index.md#alter-mutations).
|
- Не поддерживают операции [мутации](../../../sql-reference/statements/alter.md#mutations).
|
||||||
|
|
||||||
- Не поддерживают индексы.
|
- Не поддерживают индексы.
|
||||||
|
|
||||||
|
@ -601,7 +601,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd'
|
|||||||
В таблицах `MergeTree` данные попадают на диск несколькими способами:
|
В таблицах `MergeTree` данные попадают на диск несколькими способами:
|
||||||
|
|
||||||
- В результате вставки (запрос `INSERT`).
|
- В результате вставки (запрос `INSERT`).
|
||||||
- В фоновых операциях слияний и [мутаций](../../../engines/table-engines/mergetree-family/mergetree.md#alter-mutations).
|
- В фоновых операциях слияний и [мутаций](../../../sql-reference/statements/alter.md#mutations).
|
||||||
- При скачивании данных с другой реплики.
|
- При скачивании данных с другой реплики.
|
||||||
- В результате заморозки партиций [ALTER TABLE … FREEZE PARTITION](../../../engines/table-engines/mergetree-family/mergetree.md#alter_freeze-partition).
|
- В результате заморозки партиций [ALTER TABLE … FREEZE PARTITION](../../../engines/table-engines/mergetree-family/mergetree.md#alter_freeze-partition).
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# MaterializedView {#materializedview}
|
# MaterializedView {#materializedview}
|
||||||
|
|
||||||
Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../engines/table-engines/special/materializedview.md)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок.
|
Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок.
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) <!--hide-->
|
||||||
|
@ -227,4 +227,4 @@ FROM
|
|||||||
```
|
```
|
||||||
|
|
||||||
!!! note "Примечание"
|
!!! note "Примечание"
|
||||||
Подробнее про функции [avg()](../sql-reference/aggregate-functions/reference.md#agg_function-avg), [log()](../sql-reference/functions/math-functions.md).
|
Подробнее про функции [avg()](../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg), [log()](../sql-reference/functions/math-functions.md).
|
||||||
|
@ -1012,15 +1012,15 @@ ClickHouse генерирует исключение
|
|||||||
|
|
||||||
## count\_distinct\_implementation {#settings-count_distinct_implementation}
|
## count\_distinct\_implementation {#settings-count_distinct_implementation}
|
||||||
|
|
||||||
Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference.md#agg_function-count).
|
Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count).
|
||||||
|
|
||||||
Возможные значения:
|
Возможные значения:
|
||||||
|
|
||||||
- [uniq](../../sql-reference/aggregate-functions/reference.md#agg_function-uniq)
|
- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq)
|
||||||
- [uniqCombined](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined)
|
- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined)
|
||||||
- [uniqCombined64](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined64)
|
- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64)
|
||||||
- [uniqHLL12](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqhll12)
|
- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12)
|
||||||
- [uniqExact](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqexact)
|
- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact)
|
||||||
|
|
||||||
Значение по умолчанию: `uniqExact`.
|
Значение по умолчанию: `uniqExact`.
|
||||||
|
|
||||||
@ -1278,6 +1278,19 @@ Default value: 0.
|
|||||||
|
|
||||||
Значение по умолчанию: 16.
|
Значение по умолчанию: 16.
|
||||||
|
|
||||||
|
## parallel_distributed_insert_select {#parallel_distributed_insert_select}
|
||||||
|
|
||||||
|
Включает параллельную обработку распределённых запросов `INSERT ... SELECT`.
|
||||||
|
|
||||||
|
Если при выполнении запроса `INSERT INTO distributed_table_a SELECT ... FROM distributed_table_b` оказывается, что обе таблицы находятся в одном кластере, то независимо от того [реплицируемые](../../engines/table-engines/mergetree-family/replication.md) они или нет, запрос выполняется локально на каждом шарде.
|
||||||
|
|
||||||
|
Допустимые значения:
|
||||||
|
|
||||||
|
- 0 — выключена.
|
||||||
|
- 1 — включена.
|
||||||
|
|
||||||
|
Значение по умолчанию: 0.
|
||||||
|
|
||||||
## insert_distributed_sync {#insert_distributed_sync}
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)).
|
Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)).
|
||||||
@ -1297,7 +1310,7 @@ Default value: 0.
|
|||||||
- [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed)
|
- [Управление распределёнными таблицами](../../sql-reference/statements/system.md#query-language-system-distributed)
|
||||||
## validate\_polygons {#validate_polygons}
|
## validate\_polygons {#validate_polygons}
|
||||||
|
|
||||||
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
|
Включает или отключает генерирование исключения в функции [pointInPolygon](../../sql-reference/functions/geo/index.md#pointinpolygon), если многоугольник самопересекающийся или самокасающийся.
|
||||||
|
|
||||||
Допустимые значения:
|
Допустимые значения:
|
||||||
|
|
||||||
@ -1467,4 +1480,21 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1;
|
|||||||
|
|
||||||
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
|
- [min_insert_block_size_bytes](#min-insert-block-size-bytes)
|
||||||
|
|
||||||
|
## mutations_sync {#mutations_sync}
|
||||||
|
|
||||||
|
Позволяет выполнять запросы `ALTER TABLE ... UPDATE|DELETE` ([мутации](../../sql-reference/statements/alter.md#mutations)) синхронно.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- 0 - мутации выполняются асинхронно.
|
||||||
|
- 1 - запрос ждет завершения всех мутаций на текущем сервере.
|
||||||
|
- 2 - запрос ждет завершения всех мутаций на всех репликах (если они есть).
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [Синхронность запросов ALTER](../../sql-reference/statements/alter.md#synchronicity-of-alter-queries)
|
||||||
|
- [Мутации](../../sql-reference/statements/alter.md#mutations)
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
||||||
|
@ -46,6 +46,41 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
|||||||
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
|
- [system.events](#system_tables-events) — таблица с количеством произошедших событий.
|
||||||
- [system.metric\_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
- [system.metric\_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
||||||
|
|
||||||
|
## system.asynchronous\_metric\_log {#system-tables-async-log}
|
||||||
|
|
||||||
|
Содержит исторические значения метрик из таблицы `system.asynchronous_metrics`, которые сохраняются раз в минуту. По умолчанию включена.
|
||||||
|
|
||||||
|
Столбцы:
|
||||||
|
- `event_date` ([Date](../sql-reference/data-types/date.md)) — дата события.
|
||||||
|
- `event_time` ([DateTime](../sql-reference/data-types/datetime.md)) — время события.
|
||||||
|
- `name` ([String](../sql-reference/data-types/string.md)) — название метрики.
|
||||||
|
- `value` ([Float64](../sql-reference/data-types/float.md)) — значение метрики.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬────value─┐
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pmuzzy │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.arenas.all.pdirty │ 4214 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.retained │ 17657856 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.mapped │ 71471104 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.resident │ 61538304 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.metadata │ 6199264 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.allocated │ 38074336 │
|
||||||
|
│ 2020-06-22 │ 2020-06-22 06:57:30 │ jemalloc.epoch │ 2 │
|
||||||
|
└────────────┴─────────────────────┴──────────────────────────────────────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
- [system.asynchronous_metrics](#system_tables-asynchronous_metrics) — Содержит метрики, которые периодически вычисляются в фоновом режиме.
|
||||||
|
- [system.metric_log](#system_tables-metric_log) — таблица фиксирующая историю значений метрик из `system.metrics` и `system.events`.
|
||||||
|
|
||||||
## system.clusters {#system-clusters}
|
## system.clusters {#system-clusters}
|
||||||
|
|
||||||
Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят.
|
Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят.
|
||||||
@ -1324,7 +1359,7 @@ path: /clickhouse/tables/01-08/visits/replicas
|
|||||||
|
|
||||||
## system.mutations {#system_tables-mutations}
|
## system.mutations {#system_tables-mutations}
|
||||||
|
|
||||||
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы.
|
Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы.
|
||||||
|
|
||||||
Столбцы:
|
Столбцы:
|
||||||
|
|
||||||
@ -1365,7 +1400,7 @@ path: /clickhouse/tables/01-08/visits/replicas
|
|||||||
|
|
||||||
**См. также**
|
**См. также**
|
||||||
|
|
||||||
- [Мутации](../sql-reference/statements/alter.md#alter-mutations)
|
- [Мутации](../sql-reference/statements/alter.md#mutations)
|
||||||
- [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md)
|
- [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md)
|
||||||
- [Репликация данных](../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree)
|
- [Репликация данных](../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree)
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
|
|
||||||
## -State {#state}
|
## -State {#state}
|
||||||
|
|
||||||
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации.
|
В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации.
|
||||||
|
|
||||||
Для работы с промежуточными состояниями предназначены:
|
Для работы с промежуточными состояниями предназначены:
|
||||||
|
|
||||||
@ -206,7 +206,7 @@ FROM
|
|||||||
|
|
||||||
Получим имена людей, чей возраст находится в интервалах `[30,60)` и `[60,75)`. Поскольку мы используем целочисленное представление возраста, то интервалы будут выглядеть как `[30, 59]` и `[60,74]`.
|
Получим имена людей, чей возраст находится в интервалах `[30,60)` и `[60,75)`. Поскольку мы используем целочисленное представление возраста, то интервалы будут выглядеть как `[30, 59]` и `[60,74]`.
|
||||||
|
|
||||||
Чтобы собрать имена в массив, возьмём агрегатную функцию [groupArray](reference.md#agg_function-grouparray). Она принимает один аргумент. В нашем случае, это столбец `name`. Функция `groupArrayResample` должна использовать столбец `age` для агрегирования имён по возрасту. Чтобы определить необходимые интервалы, передадим в функцию `groupArrayResample` аргументы `30, 75, 30`.
|
Чтобы собрать имена в массив, возьмём агрегатную функцию [groupArray](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray). Она принимает один аргумент. В нашем случае, это столбец `name`. Функция `groupArrayResample` должна использовать столбец `age` для агрегирования имён по возрасту. Чтобы определить необходимые интервалы, передадим в функцию `groupArrayResample` аргументы `30, 75, 30`.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT groupArrayResample(30, 75, 30)(name, age) from people
|
SELECT groupArrayResample(30, 75, 30)(name, age) from people
|
||||||
|
@ -60,4 +60,4 @@ SELECT groupArray(y) FROM t_null_big
|
|||||||
|
|
||||||
`groupArray` не включает `NULL` в результирующий массив.
|
`groupArray` не включает `NULL` в результирующий массив.
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/agg_functions/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/) <!--hide-->
|
||||||
|
File diff suppressed because it is too large
Load Diff
15
docs/ru/sql-reference/aggregate-functions/reference/any.md
Normal file
15
docs/ru/sql-reference/aggregate-functions/reference/any.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 6
|
||||||
|
---
|
||||||
|
|
||||||
|
# any {#agg_function-any}
|
||||||
|
|
||||||
|
Выбирает первое попавшееся значение.
|
||||||
|
Порядок выполнения запроса может быть произвольным и даже каждый раз разным, поэтому результат данной функции недетерминирован.
|
||||||
|
Для получения детерминированного результата, можно использовать функции min или max вместо any.
|
||||||
|
|
||||||
|
В некоторых случаях, вы всё-таки можете рассчитывать на порядок выполнения запроса. Это - случаи, когда SELECT идёт из подзапроса, в котором используется ORDER BY.
|
||||||
|
|
||||||
|
При наличии в запросе `SELECT` секции `GROUP BY` или хотя бы одной агрегатной функции, ClickHouse (в отличие от, например, MySQL) требует, чтобы все выражения в секциях `SELECT`, `HAVING`, `ORDER BY` вычислялись из ключей или из агрегатных функций. То есть, каждый выбираемый из таблицы столбец, должен использоваться либо в ключах, либо внутри агрегатных функций. Чтобы получить поведение, как в MySQL, вы можете поместить остальные столбцы в агрегатную функцию `any`.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/any/) <!--hide-->
|
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 103
|
||||||
|
---
|
||||||
|
|
||||||
|
# anyHeavy {#anyheavyx}
|
||||||
|
|
||||||
|
Выбирает часто встречающееся значение с помощью алгоритма «[heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf)». Если существует значение, которое встречается чаще, чем в половине случаев, в каждом потоке выполнения запроса, то возвращается данное значение. В общем случае, результат недетерминирован.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
anyHeavy(column)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `column` — имя столбца.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Возьмём набор данных [OnTime](../../../getting-started/example-datasets/ontime.md) и выберем произвольное часто встречающееся значение в столбце `AirlineID`.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT anyHeavy(AirlineID) AS res
|
||||||
|
FROM ontime
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌───res─┐
|
||||||
|
│ 19690 │
|
||||||
|
└───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anyheavy/) <!--hide-->
|
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 104
|
||||||
|
---
|
||||||
|
|
||||||
|
## anyLast {#anylastx}
|
||||||
|
|
||||||
|
Выбирает последнее попавшееся значение.
|
||||||
|
Результат так же недетерминирован, как и для функции [any](../../../sql-reference/aggregate-functions/reference/any.md).
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anylast/) <!--hide-->
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 106
|
||||||
|
---
|
||||||
|
|
||||||
|
# argMax {#agg-function-argmax}
|
||||||
|
|
||||||
|
Синтаксис: `argMax(arg, val)`
|
||||||
|
|
||||||
|
Вычисляет значение arg при максимальном значении val. Если есть несколько разных значений arg для максимальных значений val, то выдаётся первое попавшееся из таких значений.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmax/) <!--hide-->
|
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 105
|
||||||
|
---
|
||||||
|
|
||||||
|
# argMin {#agg-function-argmin}
|
||||||
|
|
||||||
|
Синтаксис: `argMin(arg, val)`
|
||||||
|
|
||||||
|
Вычисляет значение arg при минимальном значении val. Если есть несколько разных значений arg для минимальных значений val, то выдаётся первое попавшееся из таких значений.
|
||||||
|
|
||||||
|
**Пример:**
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─user─────┬─salary─┐
|
||||||
|
│ director │ 5000 │
|
||||||
|
│ manager │ 3000 │
|
||||||
|
│ worker │ 1000 │
|
||||||
|
└──────────┴────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT argMin(user, salary) FROM salary
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─argMin(user, salary)─┐
|
||||||
|
│ worker │
|
||||||
|
└──────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmin/) <!--hide-->
|
11
docs/ru/sql-reference/aggregate-functions/reference/avg.md
Normal file
11
docs/ru/sql-reference/aggregate-functions/reference/avg.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 5
|
||||||
|
---
|
||||||
|
|
||||||
|
# avg {#agg_function-avg}
|
||||||
|
|
||||||
|
Вычисляет среднее.
|
||||||
|
Работает только для чисел.
|
||||||
|
Результат всегда Float64.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/avg/) <!--hide-->
|
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 107
|
||||||
|
---
|
||||||
|
|
||||||
|
# avgWeighted {#avgweighted}
|
||||||
|
|
||||||
|
Вычисляет [среднее арифметическое взвешенное](https://ru.wikipedia.org/wiki/Среднее_арифметическое_взвешенное).
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
avgWeighted(x, weight)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `x` — Значения. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md).
|
||||||
|
- `weight` — Веса отдельных значений. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
Типы параметров должны совпадать.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Среднее арифметическое взвешенное.
|
||||||
|
- `NaN`, если все веса равны 0.
|
||||||
|
|
||||||
|
Тип: [Float64](../../../sql-reference/data-types/float.md)
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT avgWeighted(x, w)
|
||||||
|
FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2))
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─avgWeighted(x, weight)─┐
|
||||||
|
│ 8 │
|
||||||
|
└────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/avgweighted/) <!--hide-->
|
14
docs/ru/sql-reference/aggregate-functions/reference/corr.md
Normal file
14
docs/ru/sql-reference/aggregate-functions/reference/corr.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 107
|
||||||
|
---
|
||||||
|
|
||||||
|
# corr {#corrx-y}
|
||||||
|
|
||||||
|
Синтаксис: `corr(x, y)`
|
||||||
|
|
||||||
|
Вычисляет коэффициент корреляции Пирсона: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`.
|
||||||
|
|
||||||
|
!!! note "Примечание"
|
||||||
|
Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `corrStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/corr/) <!--hide-->
|
72
docs/ru/sql-reference/aggregate-functions/reference/count.md
Normal file
72
docs/ru/sql-reference/aggregate-functions/reference/count.md
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
# count {#agg_function-count}
|
||||||
|
|
||||||
|
Вычисляет количество строк или не NULL значений .
|
||||||
|
|
||||||
|
ClickHouse поддерживает следующие виды синтаксиса для `count`:
|
||||||
|
|
||||||
|
- `count(expr)` или `COUNT(DISTINCT expr)`.
|
||||||
|
- `count()` или `COUNT(*)`. Синтаксис `count()` специфичен для ClickHouse.
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
Функция может принимать:
|
||||||
|
|
||||||
|
- Ноль параметров.
|
||||||
|
- Одно [выражение](../../syntax.md#syntax-expressions).
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Если функция вызывается без параметров, она вычисляет количество строк.
|
||||||
|
- Если передаётся [выражение](../../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк.
|
||||||
|
|
||||||
|
В обоих случаях тип возвращаемого значения [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Подробности**
|
||||||
|
|
||||||
|
ClickHouse поддерживает синтаксис `COUNT(DISTINCT ...)`. Поведение этой конструкции зависит от настройки [count\_distinct\_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation). Она определяет, какая из функций [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) используется для выполнения операции. По умолчанию — функция [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact).
|
||||||
|
|
||||||
|
Запрос `SELECT count() FROM table` не оптимизирован, поскольку количество записей в таблице не хранится отдельно. Он выбирает небольшой столбец из таблицы и подсчитывает количество значений в нём.
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Пример 1:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT count() FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─count()─┐
|
||||||
|
│ 5 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Пример 2:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation'
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─name──────────────────────────┬─value─────┐
|
||||||
|
│ count_distinct_implementation │ uniqExact │
|
||||||
|
└───────────────────────────────┴───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT count(DISTINCT num) FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─uniqExact(num)─┐
|
||||||
|
│ 3 │
|
||||||
|
└────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Этот пример показывает, что `count(DISTINCT num)` выполняется с помощью функции `uniqExact` в соответствии со значением настройки `count_distinct_implementation`.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/count/) <!--hide-->
|
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 36
|
||||||
|
---
|
||||||
|
|
||||||
|
# covarPop {#covarpop}
|
||||||
|
|
||||||
|
Синтаксис: `covarPop(x, y)`
|
||||||
|
|
||||||
|
Вычисляет величину `Σ((x - x̅)(y - y̅)) / n`.
|
||||||
|
|
||||||
|
!!! note "Примечание"
|
||||||
|
Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarpop/) <!--hide-->
|
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 37
|
||||||
|
---
|
||||||
|
|
||||||
|
# covarSamp {#covarsamp}
|
||||||
|
|
||||||
|
Синтаксис: `covarSamp(x, y)`
|
||||||
|
|
||||||
|
Вычисляет величину `Σ((x - x̅)(y - y̅)) / (n - 1)`.
|
||||||
|
|
||||||
|
Возвращает Float64. В случае, когда `n <= 1`, возвращается +∞.
|
||||||
|
|
||||||
|
!!! note "Примечание"
|
||||||
|
Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarsamp/) <!--hide-->
|
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 110
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupArray {#agg_function-grouparray}
|
||||||
|
|
||||||
|
Синтаксис: `groupArray(x)` или `groupArray(max_size)(x)`
|
||||||
|
|
||||||
|
Составляет массив из значений аргумента.
|
||||||
|
Значения в массив могут быть добавлены в любом (недетерминированном) порядке.
|
||||||
|
|
||||||
|
Вторая версия (с параметром `max_size`) ограничивает размер результирующего массива `max_size` элементами.
|
||||||
|
Например, `groupArray(1)(x)` эквивалентно `[any(x)]`.
|
||||||
|
|
||||||
|
В некоторых случаях, вы всё же можете рассчитывать на порядок выполнения запроса. Это — случаи, когда `SELECT` идёт из подзапроса, в котором используется `ORDER BY`.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparray/) <!--hide-->
|
@ -0,0 +1,93 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 112
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupArrayInsertAt {#grouparrayinsertat}
|
||||||
|
|
||||||
|
Вставляет значение в заданную позицию массива.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
groupArrayInsertAt(default_x, size)(x, pos);
|
||||||
|
```
|
||||||
|
|
||||||
|
Если запрос вставляет вставляется несколько значений в одну и ту же позицию, то функция ведет себя следующим образом:
|
||||||
|
|
||||||
|
- Если запрос выполняется в одном потоке, то используется первое из вставляемых значений.
|
||||||
|
- Если запрос выполняется в нескольких потоках, то в результирующем массиве может оказаться любое из вставляемых значений.
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `x` — Значение, которое будет вставлено. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../../../sql-reference/data-types/index.md#data_types).
|
||||||
|
- `pos` — Позиция, в которую вставляется заданный элемент `x`. Нумерация индексов в массиве начинается с нуля. [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64).
|
||||||
|
- `default_x` — Значение по умолчанию для подстановки на пустые позиции. Опциональный параметр. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение с типом параметра `x`. Если `default_x` не определен, используются [значения по умолчанию](../../../sql-reference/statements/create.md#create-default-values).
|
||||||
|
- `size`— Длина результирующего массива. Опциональный параметр. При использовании этого параметра должно быть указано значение по умолчанию `default_x`. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges).
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Массив со вставленными значениями.
|
||||||
|
|
||||||
|
Тип: [Array](../../../sql-reference/data-types/array.md#data-type-array).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT groupArrayInsertAt(toString(number), number * 2) FROM numbers(5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─groupArrayInsertAt(toString(number), multiply(number, 2))─┐
|
||||||
|
│ ['0','','1','','2','','3','','4'] │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM numbers(5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─groupArrayInsertAt('-')(toString(number), multiply(number, 2))─┐
|
||||||
|
│ ['0','-','1','-','2','-','3','-','4'] │
|
||||||
|
└────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT groupArrayInsertAt('-', 5)(toString(number), number * 2) FROM numbers(5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─groupArrayInsertAt('-', 5)(toString(number), multiply(number, 2))─┐
|
||||||
|
│ ['0','-','1','-','2'] │
|
||||||
|
└───────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Многопоточная вставка элементов в одну позицию.
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size = 1;
|
||||||
|
```
|
||||||
|
|
||||||
|
В результат этого запроса мы получите случайное целое число в диапазоне `[0,9]`. Например:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─groupArrayInsertAt(number, 0)─┐
|
||||||
|
│ [7] │
|
||||||
|
└───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat/) <!--hide-->
|
@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 114
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupArrayMovingAvg {#agg_function-grouparraymovingavg}
|
||||||
|
|
||||||
|
Вычисляет скользящее среднее для входных значений.
|
||||||
|
|
||||||
|
groupArrayMovingAvg(numbers_for_summing)
|
||||||
|
groupArrayMovingAvg(window_size)(numbers_for_summing)
|
||||||
|
|
||||||
|
Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце.
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа.
|
||||||
|
- `window_size` — размер окна.
|
||||||
|
|
||||||
|
**Возвращаемые значения**
|
||||||
|
|
||||||
|
- Массив того же размера и типа, что и входные данные.
|
||||||
|
|
||||||
|
Функция использует [округление к меньшему по модулю](https://ru.wikipedia.org/wiki/Округление#Методы). Оно усекает десятичные разряды, незначимые для результирующего типа данных.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Таблица с исходными данными:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE t
|
||||||
|
(
|
||||||
|
`int` UInt8,
|
||||||
|
`float` Float32,
|
||||||
|
`dec` Decimal32(2)
|
||||||
|
)
|
||||||
|
ENGINE = TinyLog
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─int─┬─float─┬──dec─┐
|
||||||
|
│ 1 │ 1.1 │ 1.10 │
|
||||||
|
│ 2 │ 2.2 │ 2.20 │
|
||||||
|
│ 4 │ 4.4 │ 4.40 │
|
||||||
|
│ 7 │ 7.77 │ 7.77 │
|
||||||
|
└─────┴───────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запросы:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
groupArrayMovingAvg(int) AS I,
|
||||||
|
groupArrayMovingAvg(float) AS F,
|
||||||
|
groupArrayMovingAvg(dec) AS D
|
||||||
|
FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐
|
||||||
|
│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │
|
||||||
|
└───────────┴─────────────────────────────────────┴───────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
groupArrayMovingAvg(2)(int) AS I,
|
||||||
|
groupArrayMovingAvg(2)(float) AS F,
|
||||||
|
groupArrayMovingAvg(2)(dec) AS D
|
||||||
|
FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐
|
||||||
|
│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │
|
||||||
|
└───────────┴──────────────────────────────────┴───────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg/) <!--hide-->
|
@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 113
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupArrayMovingSum {#agg_function-grouparraymovingsum}
|
||||||
|
|
||||||
|
Вычисляет скользящую сумму входных значений.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupArrayMovingSum(numbers_for_summing)
|
||||||
|
groupArrayMovingSum(window_size)(numbers_for_summing)
|
||||||
|
```
|
||||||
|
|
||||||
|
Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце.
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
- `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа.
|
||||||
|
- `window_size` — размер окна.
|
||||||
|
|
||||||
|
**Возвращаемые значения**
|
||||||
|
|
||||||
|
- Массив того же размера и типа, что и входные данные.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Таблица с исходными данными:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE t
|
||||||
|
(
|
||||||
|
`int` UInt8,
|
||||||
|
`float` Float32,
|
||||||
|
`dec` Decimal32(2)
|
||||||
|
)
|
||||||
|
ENGINE = TinyLog
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─int─┬─float─┬──dec─┐
|
||||||
|
│ 1 │ 1.1 │ 1.10 │
|
||||||
|
│ 2 │ 2.2 │ 2.20 │
|
||||||
|
│ 4 │ 4.4 │ 4.40 │
|
||||||
|
│ 7 │ 7.77 │ 7.77 │
|
||||||
|
└─────┴───────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запросы:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
groupArrayMovingSum(int) AS I,
|
||||||
|
groupArrayMovingSum(float) AS F,
|
||||||
|
groupArrayMovingSum(dec) AS D
|
||||||
|
FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐
|
||||||
|
│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │
|
||||||
|
└────────────┴─────────────────────────────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
groupArrayMovingSum(2)(int) AS I,
|
||||||
|
groupArrayMovingSum(2)(float) AS F,
|
||||||
|
groupArrayMovingSum(2)(dec) AS D
|
||||||
|
FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐
|
||||||
|
│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │
|
||||||
|
└────────────┴─────────────────────────────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum/) <!--hide-->
|
@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 125
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupBitAnd {#groupbitand}
|
||||||
|
|
||||||
|
Применяет побитовое `И` для последовательности чисел.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupBitAnd(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` – выражение, результат которого имеет тип данных `UInt*`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Значение типа `UInt*`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Тестовые данные:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
00101100 = 44
|
||||||
|
00011100 = 28
|
||||||
|
00001101 = 13
|
||||||
|
01010101 = 85
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupBitAnd(num) FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
Где `num` — столбец с тестовыми данными.
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
00000100 = 4
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitand/) <!--hide-->
|
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 128
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupBitmap {#groupbitmap}
|
||||||
|
|
||||||
|
Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../../../sql-reference/functions/bitmap-functions.md).
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupBitmap(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` – выражение, результат которого имеет тип данных `UInt*`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Значение типа `UInt64`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Тестовые данные:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
UserID
|
||||||
|
1
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupBitmap(UserID) as num FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
num
|
||||||
|
3
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitmap/) <!--hide-->
|
@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 126
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupBitOr {#groupbitor}
|
||||||
|
|
||||||
|
Применяет побитовое `ИЛИ` для последовательности чисел.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupBitOr(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` – выражение, результат которого имеет тип данных `UInt*`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Значение типа `UInt*`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Тестовые данные:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
00101100 = 44
|
||||||
|
00011100 = 28
|
||||||
|
00001101 = 13
|
||||||
|
01010101 = 85
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupBitOr(num) FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
Где `num` — столбец с тестовыми данными.
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
01111101 = 125
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitor/) <!--hide-->
|
@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 127
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupBitXor {#groupbitxor}
|
||||||
|
|
||||||
|
Применяет побитовое `ИСКЛЮЧАЮЩЕЕ ИЛИ` для последовательности чисел.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupBitXor(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` – выражение, результат которого имеет тип данных `UInt*`.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Значение типа `UInt*`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Тестовые данные:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
00101100 = 44
|
||||||
|
00011100 = 28
|
||||||
|
00001101 = 13
|
||||||
|
01010101 = 85
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupBitXor(num) FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
Где `num` — столбец с тестовыми данными.
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
binary decimal
|
||||||
|
01101000 = 104
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitxor/) <!--hide-->
|
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 111
|
||||||
|
---
|
||||||
|
|
||||||
|
# groupUniqArray {#groupuniqarray}
|
||||||
|
|
||||||
|
Синтаксис: `groupUniqArray(x)` или `groupUniqArray(max_size)(x)`
|
||||||
|
|
||||||
|
Составляет массив из различных значений аргумента. Расход оперативной памяти такой же, как у функции `uniqExact`.
|
||||||
|
|
||||||
|
Функция `groupUniqArray(max_size)(x)` ограничивает размер результирующего массива до `max_size` элементов. Например, `groupUniqArray(1)(x)` равнозначно `[any(x)]`.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray/) <!--hide-->
|
68
docs/ru/sql-reference/aggregate-functions/reference/index.md
Normal file
68
docs/ru/sql-reference/aggregate-functions/reference/index.md
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
---
|
||||||
|
toc_folder_title: "\u0421\u043f\u0440\u0430\u0432\u043e\u0447\u043d\u0438\u043a"
|
||||||
|
toc_priority: 36
|
||||||
|
toc_hidden: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Перечень агрегатных функций {#aggregate-functions-list}
|
||||||
|
|
||||||
|
Стандартные агрегатные функции:
|
||||||
|
|
||||||
|
- [count](../../../sql-reference/aggregate-functions/reference/count.md)
|
||||||
|
- [min](../../../sql-reference/aggregate-functions/reference/min.md)
|
||||||
|
- [max](../../../sql-reference/aggregate-functions/reference/max.md)
|
||||||
|
- [sum](../../../sql-reference/aggregate-functions/reference/sum.md)
|
||||||
|
- [avg](../../../sql-reference/aggregate-functions/reference/avg.md)
|
||||||
|
- [any](../../../sql-reference/aggregate-functions/reference/any.md)
|
||||||
|
- [stddevPop](../../../sql-reference/aggregate-functions/reference/stddevpop.md)
|
||||||
|
- [stddevSamp](../../../sql-reference/aggregate-functions/reference/stddevsamp.md)
|
||||||
|
- [varPop](../../../sql-reference/aggregate-functions/reference/varpop.md)
|
||||||
|
- [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md)
|
||||||
|
- [covarPop](../../../sql-reference/aggregate-functions/reference/covarpop.md)
|
||||||
|
- [covarSamp](../../../sql-reference/aggregate-functions/reference/covarsamp.md)
|
||||||
|
|
||||||
|
Агрегатные функции, специфичные для ClickHouse:
|
||||||
|
|
||||||
|
- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md)
|
||||||
|
- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md)
|
||||||
|
- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md)
|
||||||
|
- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md)
|
||||||
|
- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md)
|
||||||
|
- [topK](../../../sql-reference/aggregate-functions/reference/topk.md)
|
||||||
|
- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md)
|
||||||
|
- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md)
|
||||||
|
- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md)
|
||||||
|
- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md)
|
||||||
|
- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md)
|
||||||
|
- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md)
|
||||||
|
- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md)
|
||||||
|
- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md)
|
||||||
|
- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md)
|
||||||
|
- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md)
|
||||||
|
- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md)
|
||||||
|
- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md)
|
||||||
|
- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md)
|
||||||
|
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
||||||
|
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||||
|
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||||
|
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
|
||||||
|
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
|
||||||
|
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
||||||
|
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||||
|
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||||
|
- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md)
|
||||||
|
- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md)
|
||||||
|
- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md)
|
||||||
|
- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md)
|
||||||
|
- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md)
|
||||||
|
- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md)
|
||||||
|
- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md)
|
||||||
|
- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md)
|
||||||
|
- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md)
|
||||||
|
- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md)
|
||||||
|
- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md)
|
||||||
|
- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md)
|
||||||
|
- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md)
|
||||||
|
- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md)
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference) <!--hide-->
|
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 153
|
||||||
|
---
|
||||||
|
|
||||||
|
# kurtPop {#kurtpop}
|
||||||
|
|
||||||
|
Вычисляет [коэффициент эксцесса](https://ru.wikipedia.org/wiki/Коэффициент_эксцесса) последовательности.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
kurtPop(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Коэффициент эксцесса заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md)
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT kurtPop(value) FROM series_with_value_column
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtpop/) <!--hide-->
|
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 154
|
||||||
|
---
|
||||||
|
|
||||||
|
# kurtSamp {#kurtsamp}
|
||||||
|
|
||||||
|
Вычисляет [выборочный коэффициент эксцесса](https://ru.wikipedia.org/wiki/Статистика_(функция_выборки)) для последовательности.
|
||||||
|
|
||||||
|
Он представляет собой несмещенную оценку эксцесса случайной величины, если переданные значения образуют ее выборку.
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
kurtSamp(expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Параметры**
|
||||||
|
|
||||||
|
`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число.
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
Коэффициент эксцесса заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT kurtSamp(value) FROM series_with_value_column
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/) <!--hide-->
|
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 3
|
||||||
|
---
|
||||||
|
|
||||||
|
# max {#agg_function-max}
|
||||||
|
|
||||||
|
Вычисляет максимум.
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/max/) <!--hide-->
|
@ -0,0 +1,43 @@
|
|||||||
|
# median {#median}
|
||||||
|
|
||||||
|
Функции `median*` — алиасы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности.
|
||||||
|
|
||||||
|
Functions:
|
||||||
|
|
||||||
|
- `median` — алиас [quantile](#quantile).
|
||||||
|
- `medianDeterministic` — алиас [quantileDeterministic](#quantiledeterministic).
|
||||||
|
- `medianExact` — алиас [quantileExact](#quantileexact).
|
||||||
|
- `medianExactWeighted` — алиас [quantileExactWeighted](#quantileexactweighted).
|
||||||
|
- `medianTiming` — алиас [quantileTiming](#quantiletiming).
|
||||||
|
- `medianTimingWeighted` — алиас [quantileTimingWeighted](#quantiletimingweighted).
|
||||||
|
- `medianTDigest` — алиас [quantileTDigest](#quantiletdigest).
|
||||||
|
- `medianTDigestWeighted` — алиас [quantileTDigestWeighted](#quantiletdigestweighted).
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Входная таблица:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─val─┐
|
||||||
|
│ 1 │
|
||||||
|
│ 1 │
|
||||||
|
│ 2 │
|
||||||
|
│ 3 │
|
||||||
|
└─────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT medianDeterministic(val, 1) FROM t
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─medianDeterministic(val, 1)─┐
|
||||||
|
│ 1.5 │
|
||||||
|
└─────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/median/) <!--hide-->
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user