diff --git a/CMakeLists.txt b/CMakeLists.txt index 1153f418056..e137c2267f0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -378,6 +378,7 @@ include (cmake/find/avro.cmake) include (cmake/find/msgpack.cmake) include (cmake/find/cassandra.cmake) include (cmake/find/sentry.cmake) +include (cmake/find/stats.cmake) find_contrib_lib(cityhash) find_contrib_lib(farmhash) diff --git a/base/common/ReplxxLineReader.cpp b/base/common/ReplxxLineReader.cpp index 251170ab5c1..cc8b963958c 100644 --- a/base/common/ReplxxLineReader.cpp +++ b/base/common/ReplxxLineReader.cpp @@ -16,6 +16,19 @@ void trim(String & s) s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); } +// Uses separate replxx::Replxx instance to avoid loading them again in the +// current context (replxx::Replxx::history_load() will re-load the history +// from the file), since then they will overlaps with history from the current +// session (this will make behavior compatible with other interpreters, i.e. +// bash). +void history_save(const String & history_file_path, const String & line) +{ + replxx::Replxx rx_no_overlap; + rx_no_overlap.history_load(history_file_path); + rx_no_overlap.history_add(line); + rx_no_overlap.history_save(history_file_path); +} + } ReplxxLineReader::ReplxxLineReader( @@ -101,6 +114,10 @@ LineReader::InputStatus ReplxxLineReader::readOneLine(const String & prompt) void ReplxxLineReader::addToHistory(const String & line) { // locking history file to prevent from inconsistent concurrent changes + // + // replxx::Replxx::history_save() already has lockf(), + // but replxx::Replxx::history_load() does not + // and that is why flock() is added here. bool locked = false; if (flock(history_file_fd, LOCK_EX)) rx.print("Lock of history file failed: %s\n", strerror(errno)); @@ -110,7 +127,7 @@ void ReplxxLineReader::addToHistory(const String & line) rx.history_add(line); // flush changes to the disk - rx.history_save(history_file_path); + history_save(history_file_path, line); if (locked && 0 != flock(history_file_fd, LOCK_UN)) rx.print("Unlock of history file failed: %s\n", strerror(errno)); diff --git a/benchmark/omnisci/benchmark.sh b/benchmark/omnisci/benchmark.sh new file mode 100755 index 00000000000..2e4b10fab73 --- /dev/null +++ b/benchmark/omnisci/benchmark.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +grep -v -P '^#' queries.sql | sed -e 's/{table}/hits/' | while read query; do + + echo 3 | sudo tee /proc/sys/vm/drop_caches + sudo systemctl restart omnisci_server + for i in {1..1000}; do + /opt/omnisci/bin/omnisql -t -p HyperInteractive <<< "SELECT 1;" 2>&1 | grep -q '1 rows returned' && break; + sleep 0.1; + done + sleep 10; + + echo "$query"; + for i in {1..3}; do + /opt/omnisci/bin/omnisql -t -p HyperInteractive <<< "$query" 2>&1 | grep -P 'Exception:|Execution time:'; + done; +done; diff --git a/benchmark/omnisci/instruction.md b/benchmark/omnisci/instruction.md new file mode 100644 index 00000000000..81b45241277 --- /dev/null +++ b/benchmark/omnisci/instruction.md @@ -0,0 +1,332 @@ +# Instruction to run benchmark for OmniSci on web-analytics dataset + +OmniSci (former name "MapD") is open-source (open-core) in-memory analytical DBMS with support for GPU processing. +It can run on CPU without GPU as well. It can show competitive performance on simple queries (like - simple aggregation on a single column). + +# How to install + +https://docs.omnisci.com/installation-and-configuration/installation/installing-on-ubuntu + +# Caveats + +- Dataset (at least needed columns) must fit in memory. +- It does not support data compression (only dictionary encoding for strings). +- First query execution is very slow because uncompressed data is read from disk. +- It does not support index for quick range queries. +- It does not support NOT NULL for data types. +- It does not support BLOB. +- No support for UNSIGNED data type (it's Ok according to SQL standard). +- Lack of string processing functions. +- Strings are limited to 32767 bytes. +- GROUP BY on text data type is supported only if it has dictionary encoding. +`Exception: Cannot group by string columns which are not dictionary encoded` +- Some aggregate functions are not supported for strings at all. +`Aggregate on TEXT is not supported yet.` +- Sometimes I hit a bug when query is run in infinite loop and does not finish (after retry it's finished successfully). +- One query executed in hours even with retries. +- Sorting is slow and disabled with default settings for large resultsets. +`Exception: Sorting the result would be too slow` +`Cast from dictionary-encoded string to none-encoded would be slow` +- There is approximate count distinct function but the precision is not documented. + +To enable sorting of large resultsets, see: +https://stackoverflow.com/questions/62977734/omnissci-sorting-the-result-would-be-too-slow + +The list of known issues is here: +https://github.com/omnisci/omniscidb/issues?q=is%3Aissue+author%3Aalexey-milovidov + +# How to prepare data + +Download the 100 million rows dataset from here and insert into ClickHouse: +https://clickhouse.tech/docs/en/getting-started/example-datasets/metrica/ + +Convert the CREATE TABLE query: + +``` +clickhouse-client --query "SHOW CREATE TABLE hits_100m" --format TSVRaw | + tr '`' '"' | + sed -r -e ' + s/U?Int64/BIGINT/; + s/U?Int32/INTEGER/; + s/U?Int16/SMALLINT/; + s/U?Int8/TINYINT/; + s/DateTime/TIMESTAMP ENCODING FIXED(32)/; + s/ Date/ DATE ENCODING DAYS(16)/; + s/FixedString\(2\)/TEXT ENCODING DICT(16)/; + s/FixedString\(3\)/TEXT ENCODING DICT/; + s/FixedString\(\d+\)/TEXT ENCODING DICT/; + s/String/TEXT ENCODING DICT/;' +``` +And cut `ENGINE` part. + +The resulting CREATE TABLE query: +``` +CREATE TABLE hits +( + "WatchID" BIGINT, + "JavaEnable" TINYINT, + "Title" TEXT ENCODING DICT, + "GoodEvent" SMALLINT, + "EventTime" TIMESTAMP ENCODING FIXED(32), + "EventDate" ENCODING DAYS(16) Date, + "CounterID" INTEGER, + "ClientIP" INTEGER, + "RegionID" INTEGER, + "UserID" BIGINT, + "CounterClass" TINYINT, + "OS" TINYINT, + "UserAgent" TINYINT, + "URL" TEXT ENCODING DICT, + "Referer" TEXT ENCODING DICT, + "Refresh" TINYINT, + "RefererCategoryID" SMALLINT, + "RefererRegionID" INTEGER, + "URLCategoryID" SMALLINT, + "URLRegionID" INTEGER, + "ResolutionWidth" SMALLINT, + "ResolutionHeight" SMALLINT, + "ResolutionDepth" TINYINT, + "FlashMajor" TINYINT, + "FlashMinor" TINYINT, + "FlashMinor2" TEXT ENCODING DICT, + "NetMajor" TINYINT, + "NetMinor" TINYINT, + "UserAgentMajor" SMALLINT, + "UserAgentMinor" TEXT ENCODING DICT(16), + "CookieEnable" TINYINT, + "JavascriptEnable" TINYINT, + "IsMobile" TINYINT, + "MobilePhone" TINYINT, + "MobilePhoneModel" TEXT ENCODING DICT, + "Params" TEXT ENCODING DICT, + "IPNetworkID" INTEGER, + "TraficSourceID" TINYINT, + "SearchEngineID" SMALLINT, + "SearchPhrase" TEXT ENCODING DICT, + "AdvEngineID" TINYINT, + "IsArtifical" TINYINT, + "WindowClientWidth" SMALLINT, + "WindowClientHeight" SMALLINT, + "ClientTimeZone" SMALLINT, + "ClientEventTime" TIMESTAMP ENCODING FIXED(32), + "SilverlightVersion1" TINYINT, + "SilverlightVersion2" TINYINT, + "SilverlightVersion3" INTEGER, + "SilverlightVersion4" SMALLINT, + "PageCharset" TEXT ENCODING DICT, + "CodeVersion" INTEGER, + "IsLink" TINYINT, + "IsDownload" TINYINT, + "IsNotBounce" TINYINT, + "FUniqID" BIGINT, + "OriginalURL" TEXT ENCODING DICT, + "HID" INTEGER, + "IsOldCounter" TINYINT, + "IsEvent" TINYINT, + "IsParameter" TINYINT, + "DontCountHits" TINYINT, + "WithHash" TINYINT, + "HitColor" TEXT ENCODING DICT(8), + "LocalEventTime" TIMESTAMP ENCODING FIXED(32), + "Age" TINYINT, + "Sex" TINYINT, + "Income" TINYINT, + "Interests" SMALLINT, + "Robotness" TINYINT, + "RemoteIP" INTEGER, + "WindowName" INTEGER, + "OpenerName" INTEGER, + "HistoryLength" SMALLINT, + "BrowserLanguage" TEXT ENCODING DICT(16), + "BrowserCountry" TEXT ENCODING DICT(16), + "SocialNetwork" TEXT ENCODING DICT, + "SocialAction" TEXT ENCODING DICT, + "HTTPError" SMALLINT, + "SendTiming" INTEGER, + "DNSTiming" INTEGER, + "ConnectTiming" INTEGER, + "ResponseStartTiming" INTEGER, + "ResponseEndTiming" INTEGER, + "FetchTiming" INTEGER, + "SocialSourceNetworkID" TINYINT, + "SocialSourcePage" TEXT ENCODING DICT, + "ParamPrice" BIGINT, + "ParamOrderID" TEXT ENCODING DICT, + "ParamCurrency" TEXT ENCODING DICT, + "ParamCurrencyID" SMALLINT, + "OpenstatServiceName" TEXT ENCODING DICT, + "OpenstatCampaignID" TEXT ENCODING DICT, + "OpenstatAdID" TEXT ENCODING DICT, + "OpenstatSourceID" TEXT ENCODING DICT, + "UTMSource" TEXT ENCODING DICT, + "UTMMedium" TEXT ENCODING DICT, + "UTMCampaign" TEXT ENCODING DICT, + "UTMContent" TEXT ENCODING DICT, + "UTMTerm" TEXT ENCODING DICT, + "FromTag" TEXT ENCODING DICT, + "HasGCLID" TINYINT, + "RefererHash" BIGINT, + "URLHash" BIGINT, + "CLID" INTEGER +); +``` + +Convert the dataset, prepare the list of fields for SELECT: + +``` +clickhouse-client --query "SHOW CREATE TABLE hits_100m" --format TSVRaw | + tr '`' '"' | + sed -r -e ' + s/"(\w+)" U?Int([0-9]+)/toInt\2(\1)/; + s/"(\w+)" (Fixed)?String(\([0-9]+\))?/toValidUTF8(toString(\1))/; + s/"(\w+)" \w+/\1/' +``` + +The resulting SELECT query for data preparation: + +``` +SELECT + toInt64(WatchID), + toInt8(JavaEnable), + toValidUTF8(toString(Title)), + toInt16(GoodEvent), + EventTime, + EventDate, + toInt32(CounterID), + toInt32(ClientIP), + toInt32(RegionID), + toInt64(UserID), + toInt8(CounterClass), + toInt8(OS), + toInt8(UserAgent), + toValidUTF8(toString(URL)), + toValidUTF8(toString(Referer)), + toInt8(Refresh), + toInt16(RefererCategoryID), + toInt32(RefererRegionID), + toInt16(URLCategoryID), + toInt32(URLRegionID), + toInt16(ResolutionWidth), + toInt16(ResolutionHeight), + toInt8(ResolutionDepth), + toInt8(FlashMajor), + toInt8(FlashMinor), + toValidUTF8(toString(FlashMinor2)), + toInt8(NetMajor), + toInt8(NetMinor), + toInt16(UserAgentMajor), + toValidUTF8(toString(UserAgentMinor)), + toInt8(CookieEnable), + toInt8(JavascriptEnable), + toInt8(IsMobile), + toInt8(MobilePhone), + toValidUTF8(toString(MobilePhoneModel)), + toValidUTF8(toString(Params)), + toInt32(IPNetworkID), + toInt8(TraficSourceID), + toInt16(SearchEngineID), + toValidUTF8(toString(SearchPhrase)), + toInt8(AdvEngineID), + toInt8(IsArtifical), + toInt16(WindowClientWidth), + toInt16(WindowClientHeight), + toInt16(ClientTimeZone), + ClientEventTime, + toInt8(SilverlightVersion1), + toInt8(SilverlightVersion2), + toInt32(SilverlightVersion3), + toInt16(SilverlightVersion4), + toValidUTF8(toString(PageCharset)), + toInt32(CodeVersion), + toInt8(IsLink), + toInt8(IsDownload), + toInt8(IsNotBounce), + toInt64(FUniqID), + toValidUTF8(toString(OriginalURL)), + toInt32(HID), + toInt8(IsOldCounter), + toInt8(IsEvent), + toInt8(IsParameter), + toInt8(DontCountHits), + toInt8(WithHash), + toValidUTF8(toString(HitColor)), + LocalEventTime, + toInt8(Age), + toInt8(Sex), + toInt8(Income), + toInt16(Interests), + toInt8(Robotness), + toInt32(RemoteIP), + toInt32(WindowName), + toInt32(OpenerName), + toInt16(HistoryLength), + toValidUTF8(toString(BrowserLanguage)), + toValidUTF8(toString(BrowserCountry)), + toValidUTF8(toString(SocialNetwork)), + toValidUTF8(toString(SocialAction)), + toInt16(HTTPError), + toInt32(SendTiming), + toInt32(DNSTiming), + toInt32(ConnectTiming), + toInt32(ResponseStartTiming), + toInt32(ResponseEndTiming), + toInt32(FetchTiming), + toInt8(SocialSourceNetworkID), + toValidUTF8(toString(SocialSourcePage)), + toInt64(ParamPrice), + toValidUTF8(toString(ParamOrderID)), + toValidUTF8(toString(ParamCurrency)), + toInt16(ParamCurrencyID), + toValidUTF8(toString(OpenstatServiceName)), + toValidUTF8(toString(OpenstatCampaignID)), + toValidUTF8(toString(OpenstatAdID)), + toValidUTF8(toString(OpenstatSourceID)), + toValidUTF8(toString(UTMSource)), + toValidUTF8(toString(UTMMedium)), + toValidUTF8(toString(UTMCampaign)), + toValidUTF8(toString(UTMContent)), + toValidUTF8(toString(UTMTerm)), + toValidUTF8(toString(FromTag)), + toInt8(HasGCLID), + toInt64(RefererHash), + toInt64(URLHash), + toInt32(CLID) +FROM hits_100m_obfuscated +INTO OUTFILE '/home/milovidov/example_datasets/hits_100m_obfuscated.csv' +FORMAT CSV; +``` + +Upload data to OmniSci: +``` +/opt/omnisci/bin/omnisql -t -p HyperInteractive +``` +Run CREATE TABLE statement, then run: +``` +COPY hits FROM '/home/milovidov/example_datasets/hits_100m_obfuscated.csv' WITH (HEADER = 'false'); +``` + +Data loading took +``` +336639 ms +``` +on a server (Linux Ubuntu, Xeon E5-2560v2, 32 logical CPU, 128 GiB RAM, 8xHDD RAID-5, 40 TB). + +Run benchmark: + +``` +./benchmark.sh +``` + +Prepare the result to paste into JSON: + +``` +grep -oP 'Total time: \d+' log.txt | + grep -oP '\d+' | + awk '{ + if (i % 3 == 0) { a = $1 } + else if (i % 3 == 1) { b = $1 } + else if (i % 3 == 2) { c = $1; print "[" a / 1000 ", " b / 1000 ", " c / 1000 "]," }; + ++i; }' +``` + +And fill out `[null, null, null]` for missing runs. diff --git a/benchmark/omnisci/log.txt b/benchmark/omnisci/log.txt new file mode 100644 index 00000000000..986f00d2096 --- /dev/null +++ b/benchmark/omnisci/log.txt @@ -0,0 +1,210 @@ +3 +SELECT count(*) FROM hits; +Execution time: 23471 ms, Total time: 23471 ms +Execution time: 42 ms, Total time: 43 ms +Execution time: 35 ms, Total time: 35 ms +3 +SELECT count(*) FROM hits WHERE AdvEngineID != 0; +Execution time: 17328 ms, Total time: 17329 ms +Execution time: 58 ms, Total time: 59 ms +Execution time: 57 ms, Total time: 59 ms +3 +SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits; +Execution time: 17309 ms, Total time: 17310 ms +Execution time: 115 ms, Total time: 115 ms +Execution time: 129 ms, Total time: 130 ms +3 +SELECT sum(UserID) FROM hits; +Execution time: 26091 ms, Total time: 26091 ms +Execution time: 88 ms, Total time: 89 ms +Execution time: 71 ms, Total time: 72 ms +3 +SELECT APPROX_COUNT_DISTINCT(UserID) FROM hits; +Execution time: 21720 ms, Total time: 21720 ms +Execution time: 364 ms, Total time: 364 ms +Execution time: 344 ms, Total time: 345 ms +3 +SELECT APPROX_COUNT_DISTINCT(SearchPhrase) FROM hits; +Execution time: 19314 ms, Total time: 19315 ms +Execution time: 385 ms, Total time: 386 ms +Execution time: 382 ms, Total time: 382 ms +3 +SELECT min(EventDate), max(EventDate) FROM hits; +Execution time: 19431 ms, Total time: 19432 ms +Execution time: 130 ms, Total time: 131 ms +Execution time: 147 ms, Total time: 148 ms +3 +SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC; +Execution time: 20660 ms, Total time: 20661 ms +Execution time: 63 ms, Total time: 64 ms +Execution time: 88 ms, Total time: 89 ms +3 +SELECT RegionID, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10; +Execution time: 21364 ms, Total time: 21472 ms +Execution time: 1387 ms, Total time: 1504 ms +Execution time: 1443 ms, Total time: 1505 ms +3 +SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), APPROX_COUNT_DISTINCT(UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10; +Execution time: 22205 ms, Total time: 22285 ms +Execution time: 1590 ms, Total time: 1655 ms +Execution time: 1591 ms, Total time: 1658 ms +3 +SELECT MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10; +Execution time: 22343 ms, Total time: 22344 ms +Execution time: 122 ms, Total time: 123 ms +Execution time: 117 ms, Total time: 118 ms +3 +SELECT MobilePhone, MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10; +Execution time: 21681 ms, Total time: 21695 ms +Execution time: 299 ms, Total time: 310 ms +Execution time: 275 ms, Total time: 292 ms +3 +SELECT SearchPhrase, count(*) AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +Execution time: 23346 ms, Total time: 23360 ms +Execution time: 613 ms, Total time: 631 ms +Execution time: 606 ms, Total time: 624 ms +3 +SELECT SearchPhrase, APPROX_COUNT_DISTINCT(UserID) AS u FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +Execution time: 66014 ms, Total time: 68618 ms +Execution time: 44309 ms, Total time: 47296 ms +Execution time: 44019 ms, Total time: 46866 ms +3 +SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10; +Execution time: 25853 ms, Total time: 25984 ms +Execution time: 2590 ms, Total time: 2728 ms +Execution time: 2652 ms, Total time: 2789 ms +3 +SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10; +Execution time: 26581 ms, Total time: 26953 ms +Execution time: 5843 ms, Total time: 6158 ms +Execution time: 5970 ms, Total time: 6286 ms +3 +SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10; +Execution time: 33007 ms, Total time: 33581 ms +Execution time: 9943 ms, Total time: 10509 ms +Execution time: 9470 ms, Total time: 10047 ms +3 +SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10; +Execution time: 39009 ms, Total time: 39575 ms +Execution time: 8151 ms, Total time: 8785 ms +Execution time: 8037 ms, Total time: 8665 ms +3 +SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10; +Execution time: 56207 ms, Total time: 57764 ms +Execution time: 26653 ms, Total time: 28199 ms +Execution time: 25614 ms, Total time: 27336 ms +3 +SELECT UserID FROM hits WHERE UserID = -6101065172474983726; +Execution time: 18975 ms, Total time: 18976 ms +Execution time: 136 ms, Total time: 136 ms +Execution time: 136 ms, Total time: 136 ms +3 +SELECT count(*) FROM hits WHERE URL LIKE '%metrika%'; +Execution time: 32444 ms, Total time: 32445 ms +Execution time: 125 ms, Total time: 126 ms +Execution time: 134 ms, Total time: 136 ms +3 +SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +Exception: Aggregate on TEXT is not supported yet. +Exception: Aggregate on TEXT is not supported yet. +Exception: Aggregate on TEXT is not supported yet. +3 +SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, APPROX_COUNT_DISTINCT(UserID) FROM hits WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +Exception: Aggregate on TEXT is not supported yet. +Exception: Aggregate on TEXT is not supported yet. +Exception: Aggregate on TEXT is not supported yet. +3 +SELECT * FROM hits WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10; +Execution time: 96163 ms, Total time: 96166 ms +Execution time: 312 ms, Total time: 314 ms +Execution time: 303 ms, Total time: 305 ms +3 +SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10; +Execution time: 27493 ms, Total time: 27494 ms +Execution time: 216 ms, Total time: 216 ms +Execution time: 221 ms, Total time: 222 ms +3 +SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10; +Execution time: 38230 ms, Total time: 38308 ms +Execution time: 17175 ms, Total time: 17256 ms +Execution time: 17225 ms, Total time: 17310 ms +3 +SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10; +Execution time: 115614 ms, Total time: 115714 ms +Execution time: 95944 ms, Total time: 96041 ms +Execution time: 94274 ms, Total time: 94383 ms +3 +SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25; +Execution time: 31775 ms, Total time: 31779 ms +Execution time: 2643 ms, Total time: 2647 ms +Execution time: 2933 ms, Total time: 2937 ms +3 +SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25; +Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW() +Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW() +Exception: Exception occurred: org.apache.calcite.runtime.CalciteContextException: From line 1, column 8 to line 1, column 36: No match found for function signature domainWithoutWWW() +3 +SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits; +Execution time: 28853 ms, Total time: 28854 ms +Execution time: 5654 ms, Total time: 5655 ms +Execution time: 5579 ms, Total time: 5581 ms +3 +SELECT SearchEngineID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10; +Execution time: 31694 ms, Total time: 31925 ms +Execution time: 3872 ms, Total time: 4142 ms +Execution time: 3928 ms, Total time: 4162 ms +3 +SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; +Execution time: 43690 ms, Total time: 44297 ms +Execution time: 8221 ms, Total time: 8825 ms +Execution time: 8115 ms, Total time: 8711 ms +3 +SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10; +Execution time: 29669 ms, Total time: 29715 ms +Execution time: 1623 ms, Total time: 1669 ms +Execution time: 1534 ms, Total time: 1586 ms +3 +SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10; +Execution time: 34860 ms, Total time: 35201 ms +Execution time: 7075 ms, Total time: 7414 ms +Execution time: 7164 ms, Total time: 7567 ms +3 +SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10; +Execution time: 26467 ms, Total time: 26724 ms +Execution time: 5740 ms, Total time: 6026 ms +Execution time: 5667 ms, Total time: 5920 ms +3 +SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10; +Execution time: 31899 ms, Total time: 31908 ms +Execution time: 1141 ms, Total time: 1154 ms +Execution time: 1155 ms, Total time: 1168 ms +3 +SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10; +Execution time: 27991 ms, Total time: 27997 ms +Execution time: 719 ms, Total time: 724 ms +Execution time: 737 ms, Total time: 744 ms +3 +SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000; +Execution time: 34651 ms, Total time: 34661 ms +Execution time: 1182 ms, Total time: 1200 ms +Execution time: 1142 ms, Total time: 1159 ms +3 +SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000; +Execution time: 30130 ms, Total time: 30136 ms +Execution time: 461 ms, Total time: 467 ms +Execution time: 445 ms, Total time: 451 ms +3 +SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100; +Execution time: 19989 ms, Total time: 19991 ms +Execution time: 326 ms, Total time: 327 ms +Execution time: 325 ms, Total time: 326 ms +3 +SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; +Execution time: 18658 ms, Total time: 18660 ms +Execution time: 265 ms, Total time: 266 ms +Execution time: 254 ms, Total time: 255 ms +3 +SELECT DATE_TRUNC(minute, EventTime) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "Refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC(minute, EventTime) ORDER BY DATE_TRUNC(minute, EventTime); +Execution time: 25225 ms, Total time: 25227 ms +Execution time: 210 ms, Total time: 212 ms +Execution time: 199 ms, Total time: 200 ms diff --git a/benchmark/omnisci/queries.sql b/benchmark/omnisci/queries.sql new file mode 100644 index 00000000000..342de72db8a --- /dev/null +++ b/benchmark/omnisci/queries.sql @@ -0,0 +1,43 @@ +SELECT count(*) FROM {table}; +SELECT count(*) FROM {table} WHERE AdvEngineID != 0; +SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table}; +SELECT sum(UserID) FROM {table}; +SELECT APPROX_COUNT_DISTINCT(UserID) FROM {table}; +SELECT APPROX_COUNT_DISTINCT(SearchPhrase) FROM {table}; +SELECT min(EventDate), max(EventDate) FROM {table}; +SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC; +SELECT RegionID, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10; +SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), APPROX_COUNT_DISTINCT(UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10; +SELECT MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT MobilePhone, MobilePhoneModel, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT SearchPhrase, APPROX_COUNT_DISTINCT(UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10; +SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10; +SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10; +SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10; +SELECT UserID FROM {table} WHERE UserID = -6101065172474983726; +SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%'; +SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, APPROX_COUNT_DISTINCT(UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10; +SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10; +SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10; +SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10; +SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25; +SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25; +SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table}; +SELECT SearchEngineID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; +#SELECT WatchID, ClientIP, count(*) AS c, sum("Refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10; +SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10; +SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10; +SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10; +SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "Refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10; +SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000; +SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000; +SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100; +SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "Refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; +SELECT DATE_TRUNC(minute, EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "Refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC(minute, EventTime) ORDER BY DATE_TRUNC(minute, EventTime); diff --git a/cmake/find/stats.cmake b/cmake/find/stats.cmake new file mode 100644 index 00000000000..ef5b1a73659 --- /dev/null +++ b/cmake/find/stats.cmake @@ -0,0 +1,20 @@ +option(ENABLE_STATS "Enalbe StatsLib library" ${ENABLE_LIBRARIES}) + +if (ENABLE_STATS) + if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/stats") + message (WARNING "submodule contrib/stats is missing. to fix try run: \n git submodule update --init --recursive") + set (ENABLE_STATS 0) + set (USE_STATS 0) + elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/gcem") + message (WARNING "submodule contrib/gcem is missing. to fix try run: \n git submodule update --init --recursive") + set (ENABLE_STATS 0) + set (USE_STATS 0) + else() + set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include) + set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include) + set (USE_STATS 1) + endif() +endif() + +message (STATUS "Using stats=${USE_STATS} : ${STATS_INCLUDE_DIR}") +message (STATUS "Using gcem=${USE_STATS}: ${GCEM_INCLUDE_DIR}") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index dc0d37f5af5..b4ac383004b 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -306,6 +306,8 @@ if (USE_SENTRY) endif() add_subdirectory (fmtlib-cmake) -add_subdirectory (stats-cmake) -add_subdirectory (gcem) +if (USE_STATS) + add_subdirectory (stats-cmake) + add_subdirectory (gcem) +endif() diff --git a/contrib/stats-cmake/CMakeLists.txt b/contrib/stats-cmake/CMakeLists.txt index e6ee2c85b29..a159e85a0e3 100644 --- a/contrib/stats-cmake/CMakeLists.txt +++ b/contrib/stats-cmake/CMakeLists.txt @@ -1,6 +1,5 @@ -# The stats is a header-only library of probability density functions, +# The stats is a header-only library of probability density functions, # cumulative distribution functions, quantile functions, and random sampling methods. - set(STATS_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/stats/include) set(GCEM_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/gcem/include) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 5da4e8d1160..7036aa8bf36 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -90,8 +90,45 @@ do sleep 0.1 done -TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper" +TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper 01411_bayesian_ab_testing" clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt + +kill_clickhouse () { + kill `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` 2>/dev/null + + for i in {1..10} + do + if ! kill -0 `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'`; then + echo "No clickhouse process" + break + else + echo "Process" `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` "still alive" + sleep 10 + fi + done +} + + +FAILED_TESTS=`grep 'FAIL\|TIMEOUT\|ERROR' /test_output/test_log.txt | awk 'BEGIN { ORS=" " }; { print substr($3, 1, length($3)-1) }'` + + +if [[ ! -z "$FAILED_TESTS" ]]; then + kill_clickhouse + + clickhouse-server --config /etc/clickhouse-server/config.xml --daemon + + until clickhouse-client --query "SELECT 1" + do + sleep 0.1 + done + + echo "Going to run again: $FAILED_TESTS" + + clickhouse-test --no-long --testname --shard --zookeeper $FAILED_TESTS 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_output/test_log.txt +else + echo "No failed tests" +fi + mv /var/log/clickhouse-server/* /test_output diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile index e6f8053f5f6..04d533eb601 100644 --- a/docker/test/fuzzer/Dockerfile +++ b/docker/test/fuzzer/Dockerfile @@ -29,7 +29,9 @@ RUN apt-get update \ COPY * / -CMD cd /workspace \ +SHELL ["/bin/bash", "-c"] +CMD set -o pipefail \ + && cd /workspace \ && /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log # docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> yandex/clickhouse-fuzzer diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 61ca36a7ff5..a9905d274f2 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -100,12 +100,6 @@ function fuzz sleep 1 done killall -9 clickhouse-server ||: - - if [ "$fuzzer_exit_code" == "143" ] - then - # Killed by watchdog, meaning, no errors. - fuzzer_exit_code=0 - fi } case "$stage" in @@ -122,8 +116,9 @@ case "$stage" in # Run the testing script from the repository echo Using the testing script from the repository export stage=download + time ch/docker/test/fuzzer/run-fuzzer.sh # Keep the error code - time ch/docker/test/fuzzer/run-fuzzer.sh || exit $? + exit $? fi ;& "download") @@ -154,19 +149,31 @@ case "$stage" in pstree -aspgT # Make files with status and description we'll show for this check on Github - if [ "$fuzzer_exit_code" == 0 ] + task_exit_code=$fuzzer_exit_code + if [ "$fuzzer_exit_code" == 143 ] then - echo "OK" > description.txt + # SIGTERM -- the fuzzer was killed by timeout, which means a normal run. echo "success" > status.txt - else + echo "OK" > description.txt + task_exit_code=0 + elif [ "$fuzzer_exit_code" == 210 ] + then + # Lost connection to the server. This probably means that the server died + # with abort. echo "failure" > status.txt if ! grep -a "Received signal \|Logical error" server.log > description.txt then - echo "Fuzzer exit code $fuzzer_exit_code. See the logs" > description.txt + echo "Lost connection to server. See the logs" > description.txt fi + else + # Something different -- maybe the fuzzer itself died? Don't grep the + # server log in this case, because we will find a message about normal + # server termination (Received signal 15), which is confusing. + echo "failure" > status.txt + echo "Fuzzer failed ($fuzzer_exit_code). See the logs" > description.txt fi - exit $fuzzer_exit_code + exit $task_exit_code ;& esac diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index f9b90fce863..be087eb956c 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -4,6 +4,8 @@ set -ex # Use the packaged repository to find the revision we will compare to. function find_reference_sha { + git -C right/ch log -1 origin/master + git -C right/ch log -1 pr # Go back from the revision to be tested, trying to find the closest published # testing release. The PR branch may be either pull/*/head which is the # author's branch, or pull/*/merge, which is head merged with some master diff --git a/docs/en/getting-started/example-datasets/metrica.md b/docs/en/getting-started/example-datasets/metrica.md index a9cf80716ce..4131dca78fe 100644 --- a/docs/en/getting-started/example-datasets/metrica.md +++ b/docs/en/getting-started/example-datasets/metrica.md @@ -7,7 +7,7 @@ toc_title: Yandex.Metrica Data Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section. -The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits\_100m\_obfuscated\_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits\_100m\_obfuscated\_v1.tar.xz. +The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz. ## Obtaining Tables from Prepared Partitions {#obtaining-tables-from-prepared-partitions} diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index d14c7c770ac..6351e34b3a3 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -19,27 +19,33 @@ toc_title: Adopters | Benocs | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) | | Bloomberg | Finance, Media | Monitoring | 102 servers | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) | | Bloxy | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) | -| Dataliance for China Telecom | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | | CardsMobile | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) | | CARTO | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) | | CERN | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) | | Cisco | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) | | Citadel Securities | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) | | Citymobil | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) | -| ContentSquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Cloudflare | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) | +| ContentSquare | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) | | Corunet | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) | | CraiditX 氪信 | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) | +| Crazypanda | Games | | — | — | Live session on ClickHouse meetup | | Criteo | Retail | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/3_storetail.pptx) | +| Dataliance for China Telecom | Telecom | Analytics | — | — | [Slides in Chinese, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/telecom.pdf) | | Deutsche Bank | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) | | Diva-e | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) | | Exness | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) | +| Flipkart | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) | +| FunCorp | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | | Geniee | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) | | HUYA | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) | | Idealista | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) | | Infovista | Networks | Analytics | — | — | [Slides in English, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup30/infovista.pdf) | | InnoGames | Games | Metrics, Logging | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/graphite_and_clickHouse.pdf) | +| Instana | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) | | Integros | Platform for video services | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| Ippon Technologies | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) | +| Jinshuju 金数据 | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | | Kodiak Data | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) | | Kontur | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) | | Lawrence Berkeley National Laboratory | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) | @@ -48,29 +54,34 @@ toc_title: Adopters | Marilyn | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) | | MessageBird | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) | | MGID | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) | +| Nuna Inc. | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) | | OneAPM | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) | +| Percent 百分点 | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | +| Plausible | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | +| Postmates | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | | Pragma Innovation | Telemetry and Big Data Analysis | Main product | — | — | [Slides in English, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup18/4_pragma_innovation.pdf) | | QINGCLOUD | Cloud services | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/4.%20Cloud%20%2B%20TSDB%20for%20ClickHouse%20张健%20QingCloud.pdf) | | Qrator | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) | -| Percent 百分点 | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | -| Plausible | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | | Rambler | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) | -| Tencent | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | -| Traffic Stars | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | | S7 Airlines | Airlines | Metrics, Logging | — | — | [Talk in Russian, March 2019](https://www.youtube.com/watch?v=nwG68klRpPg&t=15s) | -| SEMrush | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | | scireum GmbH | e-Commerce | Main product | — | — | [Talk in German, February 2020](https://www.youtube.com/watch?v=7QWAn5RbyR4) | +| SEMrush | Marketing | Main product | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/5_semrush.pdf) | | Sentry | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) | -| SGK | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | | seo.do | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) | +| SGK | Goverment Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) | | Sina | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) | | SMI2 | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) | | Splunk | Business Analytics | Main product | — | — | [Slides in English, January 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup12/splunk.pdf) | | Spotify | Music | Experimentation | — | — | [Slides, July 2018](https://www.slideshare.net/glebus/using-clickhouse-for-experimentation-104247173) | | Tencent | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) | +| Tencent | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) | +| Traffic Stars | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) | | Uber | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/uber.pdf) | | VKontakte | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) | +| Walmart Labs | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) | +| Wargaming | Games | | — | — | [Interview](https://habr.com/en/post/496954/) | | Wisebits | IT Solutions | Analytics | — | — | [Slides in Russian, May 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup22/strategies.pdf) | +| Workato | Automation Software | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=334) | | Xiaoxin Tech | Education | Common purpose | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/sync-clickhouse-with-mysql-mongodb.pptx) | | Ximalaya | Audio sharing | OLAP | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/ximalaya.pdf) | | Yandex Cloud | Public Cloud | Main product | — | — | [Talk in Russian, December 2019](https://www.youtube.com/watch?v=pgnak9e_E0o) | @@ -79,10 +90,5 @@ toc_title: Adopters | Yandex Metrica | Web analytics | Main product | 360 servers in one cluster, 1862 servers in one department | 66.41 PiB / 5.68 PiB | [Slides, February 2020](https://presentations.clickhouse.tech/meetup40/introduction/#13) | | ЦВТ | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) | | МКБ | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) | -| Jinshuju 金数据 | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) | -| Instana | APM Platform | Main product | — | — | [Twitter post](https://twitter.com/mieldonkers/status/1248884119158882304) | -| Wargaming | Games | | — | — | [Interview](https://habr.com/en/post/496954/) | -| Crazypanda | Games | | — | — | Live session on ClickHouse meetup | -| FunCorp | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) | [Original article](https://clickhouse.tech/docs/en/introduction/adopters/) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 829fedc8deb..aaca694dc06 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1459,6 +1459,20 @@ Possible values: Default value: 16. +## parallel_distributed_insert_select {#parallel_distributed_insert_select} + +Enables parallel distributed `INSERT ... SELECT` query. + +If we execute `INSERT INTO distributed_table_a SELECT ... FROM distributed_table_b` queries and both tables use the same cluster, and both tables are either [replicated](../../engines/table-engines/mergetree-family/replication.md) or non-replicated, then this query is processed locally on every shard. + + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: 0. + ## insert_distributed_sync {#insert_distributed_sync} Enables or disables synchronous data insertion into a [Distributed](../../engines/table-engines/special/distributed.md#distributed) table. @@ -1709,4 +1723,21 @@ SELECT * FROM a; +---+ ``` +## mutations_sync {#mutations_sync} + +Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously. + +Possible values: + +- 0 - Mutations execute asynchronously. +- 1 - The query waits for all mutations to complete on the current server. +- 2 - The query waits for all mutations to complete on all replicas (if they exist). + +Default value: `0`. + +**See Also** + +- [Synchronicity of ALTER Queries](../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) +- [Mutations](../../sql-reference/statements/alter/index.md#mutations) + [Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/operations/system-tables/metric_log.md b/docs/en/operations/system-tables/metric_log.md index 91d8553683e..1e38eb7e247 100644 --- a/docs/en/operations/system-tables/metric_log.md +++ b/docs/en/operations/system-tables/metric_log.md @@ -50,6 +50,6 @@ CurrentMetric_ReplicatedChecks: 0 **See also** - [system.asynchronous\_metrics](../../operations/system-tables/asynchronous_metrics.md) — Contains periodically calculated metrics. -- [system.events](../../operations/system-tables/events.md) — Contains a number of events that occurred. +- [system.events](../../operations/system-tables/events.md#system_tables-events) — Contains a number of events that occurred. - [system.metrics](../../operations/system-tables/metrics.md) — Contains instantly calculated metrics. - [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring. diff --git a/docs/en/sql-reference/aggregate-functions/index.md b/docs/en/sql-reference/aggregate-functions/index.md index a75cbc6cac5..270b7d8db39 100644 --- a/docs/en/sql-reference/aggregate-functions/index.md +++ b/docs/en/sql-reference/aggregate-functions/index.md @@ -13,6 +13,7 @@ ClickHouse also supports: - [Parametric aggregate functions](../../sql-reference/aggregate-functions/parametric-functions.md#aggregate_functions_parametric), which accept other parameters in addition to columns. - [Combinators](../../sql-reference/aggregate-functions/combinators.md#aggregate_functions_combinators), which change the behavior of aggregate functions. + ## NULL Processing {#null-processing} During aggregation, all `NULL`s are skipped. @@ -37,9 +38,11 @@ Let’s say you need to total the values in the `y` column: SELECT sum(y) FROM t_null_big ``` - ┌─sum(y)─┐ - │ 7 │ - └────────┘ +```text +┌─sum(y)─┐ +│ 7 │ +└────────┘ +``` The `sum` function interprets `NULL` as `0`. In particular, this means that if the function receives input of a selection where all the values are `NULL`, then the result will be `0`, not `NULL`. @@ -57,4 +60,5 @@ SELECT groupArray(y) FROM t_null_big `groupArray` does not include `NULL` in the resulting array. + [Original article](https://clickhouse.tech/docs/en/query_language/agg_functions/) diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 0fce4c1d8b3..860f1283f8d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -1,10 +1,10 @@ --- toc_folder_title: Reference toc_priority: 36 -toc_title: Reference +toc_hidden: true --- -# Aggregate Function Reference {#aggregate-functions-reference} +# List of Aggregate Functions {#aggregate-functions-reference} Standard aggregate functions: @@ -24,97 +24,51 @@ Standard aggregate functions: ClickHouse-specific aggregate functions: - [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md) - - [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md) - - [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md) - - [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md) - - [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md) - - [topK](../../../sql-reference/aggregate-functions/reference/topkweighted.md) - - [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md) - - [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md) - - [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md) - - [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md) - - [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md) - - [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md) - - [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md) - - [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md) - - [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md) - - [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md) - - [groupBitmapAnd](../../../sql-reference/aggregate-functions/reference/groupbitmapand.md) - - [groupBitmapOr](../../../sql-reference/aggregate-functions/reference/groupbitmapor.md) - - [groupBitmapXor](../../../sql-reference/aggregate-functions/reference/groupbitmapxor.md) - - [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md) - - [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md) - - [minMap](../../../sql-reference/aggregate-functions/reference/minmap.md) - - [maxMap](../../../sql-reference/aggregate-functions/reference/maxmap.md) - - [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md) - - [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md) - - [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md) - - [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md) - - [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md) - - [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md) - - [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md) - - [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md) - - [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md) - - [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md) - - [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md) - - [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md) - - [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md) - - [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md) - - [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md) - - [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md) - - [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md) - - [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md) - - [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) - - [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md) - - [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md) - - [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md) - - [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md) - - [categoricalInformationValue](../../../sql-reference/aggregate-functions/reference/categoricalinformationvalue.md) [Original article](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/) diff --git a/docs/en/sql-reference/functions/tuple-map-functions.md b/docs/en/sql-reference/functions/tuple-map-functions.md new file mode 100644 index 00000000000..343f45135eb --- /dev/null +++ b/docs/en/sql-reference/functions/tuple-map-functions.md @@ -0,0 +1,48 @@ +--- +toc_priority: 46 +toc_title: Working with maps +--- + +# Functions for maps {#functions-for-working-with-tuple-maps} + +## mapAdd(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapadd} + +Collect all the keys and sum corresponding values. + +Arguments are tuples of two arrays, where items in the first array represent keys, and the second array +contains values for the each key. +All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64). +The common promoted type is used as a type for the result array. + +Returns one tuple, where the first array contains the sorted keys and the second array contains values. + +```sql +SELECT mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res) as type; +``` + +```text +┌─res───────────┬─type───────────────────────────────┐ +│ ([1,2],[2,2]) │ Tuple(Array(UInt8), Array(UInt64)) │ +└───────────────┴────────────────────────────────────┘ +``` + +## mapSubtract(Tuple(Array, Array), Tuple(Array, Array) [, ...]) {#function-mapsubtract} + +Collect all the keys and subtract corresponding values. + +Arguments are tuples of two arrays, where items in the first array represent keys, and the second array +contains values for the each key. +All key arrays should have same type, and all value arrays should contain items which are promotable to the one type (Int64, UInt64 or Float64). +The common promoted type is used as a type for the result array. + +Returns one tuple, where the first array contains the sorted keys and the second array contains values. + +```sql +SELECT mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])) as res, toTypeName(res) as type; +``` + +```text +┌─res────────────┬─type──────────────────────────────┐ +│ ([1,2],[-1,0]) │ Tuple(Array(UInt8), Array(Int64)) │ +└────────────────┴───────────────────────────────────┘ +```` diff --git a/docs/en/sql-reference/statements/alter/delete.md b/docs/en/sql-reference/statements/alter/delete.md index 0195ac7fb64..23a7bf0e8f1 100644 --- a/docs/en/sql-reference/statements/alter/delete.md +++ b/docs/en/sql-reference/statements/alter/delete.md @@ -9,7 +9,7 @@ toc_title: DELETE ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr ``` -Allows to asynchronously delete data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/index.md#mutations). +Allows to delete data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). !!! note "Note" The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. @@ -17,3 +17,11 @@ Allows to asynchronously delete data matching the specified filtering expression The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value. One query can contain several commands separated by commas. + +The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous. + +**See also** + +- [Mutations](../../../sql-reference/statements/alter/index.md#mutations) +- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) +- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 2d1d3bb5200..60b7375f76d 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -27,12 +27,6 @@ While these `ALTER` settings modify entities related to role-based access contro - [ROW POLICY](../../../sql-reference/statements/alter/row-policy.md) - [SETTINGS PROFILE](../../../sql-reference/statements/alter/settings-profile.md) -## Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} - -For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. - -For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. - ## Mutations {#mutations} `ALTER` queries that are intended to manipulate table data are implemented with a mechanism called “mutations”, most notably [ALTER TABLE … DELETE](../../../sql-reference/statements/alter/delete.md) and [ALTER TABLE … UPDATE](../../../sql-reference/statements/alter/update.md). They are asynchronous background processes similar to merges in [MergeTree](../../../engines/table-engines/mergetree-family/index.md) tables that to produce new “mutated” versions of parts. @@ -45,4 +39,12 @@ A mutation query returns immediately after the mutation entry is added (in case Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. +## Synchronicity of ALTER Queries {#synchronicity-of-alter-queries} + +For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas. + +For `ALTER ... ATTACH|DETACH|DROP` queries, you can use the `replication_alter_partitions_sync` setting to set up waiting. Possible values: `0` – do not wait; `1` – only wait for own execution (default); `2` – wait for all. + +For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. + [Original article](https://clickhouse.tech/docs/en/query_language/alter/) diff --git a/docs/en/sql-reference/statements/alter/update.md b/docs/en/sql-reference/statements/alter/update.md index 6945d1f3961..45e00236974 100644 --- a/docs/en/sql-reference/statements/alter/update.md +++ b/docs/en/sql-reference/statements/alter/update.md @@ -9,7 +9,7 @@ toc_title: UPDATE ALTER TABLE [db.]table UPDATE column1 = expr1 [, ...] WHERE filter_expr ``` -Allows to asynchronously manipulate data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/index.md#mutations). +Allows to manipulate data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). !!! note "Note" The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. @@ -17,3 +17,12 @@ Allows to asynchronously manipulate data matching the specified filtering expres The `filter_expr` must be of type `UInt8`. This query updates values of specified columns to the values of corresponding expressions in rows for which the `filter_expr` takes a non-zero value. Values are casted to the column type using the `CAST` operator. Updating columns that are used in the calculation of the primary or the partition key is not supported. One query can contain several commands separated by commas. + +The synchronicity of the query processing is defined by the [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting. By default, it is asynchronous. + +**See also** + +- [Mutations](../../../sql-reference/statements/alter/index.md#mutations) +- [Synchronicity of ALTER Queries](../../../sql-reference/statements/alter/index.md#synchronicity-of-alter-queries) +- [mutations_sync](../../../operations/settings/settings.md#mutations_sync) setting + diff --git a/docs/es/engines/table-engines/special/materializedview.md b/docs/es/engines/table-engines/special/materializedview.md index 8b130ab141e..87e5218eb6a 100644 --- a/docs/es/engines/table-engines/special/materializedview.md +++ b/docs/es/engines/table-engines/special/materializedview.md @@ -7,6 +7,6 @@ toc_title: "M\xE9todo de codificaci\xF3n de datos:" # Método de codificación de datos: {#materializedview} -Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql-reference/statements/create.md)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor. +Se utiliza para implementar vistas materializadas (para obtener más información, consulte [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Para almacenar datos, utiliza un motor diferente que se especificó al crear la vista. Al leer desde una tabla, solo usa este motor. [Artículo Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fa/engines/table-engines/special/materializedview.md b/docs/fa/engines/table-engines/special/materializedview.md index 34a4d17848f..01b02766c9d 100644 --- a/docs/fa/engines/table-engines/special/materializedview.md +++ b/docs/fa/engines/table-engines/special/materializedview.md @@ -7,6 +7,6 @@ toc_title: "\u0645\u0627\u062F\u0647 \u0628\u06CC\u0646\u06CC" # ماده بینی {#materializedview} -مورد استفاده برای اجرای نمایش محقق (برای اطلاعات بیشتر, دیدن [CREATE TABLE](../../../sql-reference/statements/create.md)). برای ذخیره سازی داده ها از یک موتور مختلف استفاده می کند که هنگام ایجاد دیدگاه مشخص شده است. هنگام خواندن از یک جدول, فقط با استفاده از این موتور. +مورد استفاده برای اجرای نمایش محقق (برای اطلاعات بیشتر, دیدن [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). برای ذخیره سازی داده ها از یک موتور مختلف استفاده می کند که هنگام ایجاد دیدگاه مشخص شده است. هنگام خواندن از یک جدول, فقط با استفاده از این موتور. [مقاله اصلی](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/fr/engines/table-engines/special/materializedview.md b/docs/fr/engines/table-engines/special/materializedview.md index e1b4d4a708d..700a95b4d65 100644 --- a/docs/fr/engines/table-engines/special/materializedview.md +++ b/docs/fr/engines/table-engines/special/materializedview.md @@ -7,6 +7,6 @@ toc_title: MaterializedView # Materializedview {#materializedview} -Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../../sql-reference/statements/create.md)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur. +Utilisé pour implémenter des vues matérialisées (pour plus d'informations, voir [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Pour stocker des données, il utilise un moteur différent qui a été spécifié lors de la création de la vue. Lors de la lecture d'une table, il utilise juste ce moteur. [Article Original](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/ja/engines/table-engines/special/materializedview.md b/docs/ja/engines/table-engines/special/materializedview.md index 2935e0492ce..64b13600bbb 100644 --- a/docs/ja/engines/table-engines/special/materializedview.md +++ b/docs/ja/engines/table-engines/special/materializedview.md @@ -7,6 +7,6 @@ toc_title: "\u30DE\u30C6\u30EA\u30A2\u30E9\u30A4\u30BA\u30C9\u30D3\u30E5\u30FC" # マテリアライズドビュー {#materializedview} -マテリアライズドビューの実装に使用されます(詳細については、 [CREATE TABLE](../../../sql-reference/statements/create.md)). データを格納するために、ビューの作成時に指定された別のエンジンを使用します。 読み込み時にテーブルから、使用してこのエンジンです。 +マテリアライズドビューの実装に使用されます(詳細については、 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). データを格納するために、ビューの作成時に指定された別のエンジンを使用します。 読み込み時にテーブルから、使用してこのエンジンです。 [元の記事](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/ru/engines/table-engines/log-family/index.md b/docs/ru/engines/table-engines/log-family/index.md index 866cee7d17e..4aa7540c0a8 100644 --- a/docs/ru/engines/table-engines/log-family/index.md +++ b/docs/ru/engines/table-engines/log-family/index.md @@ -26,7 +26,7 @@ toc_priority: 29 Во время запросов `INSERT` таблица блокируется, а другие запросы на чтение и запись ожидают разблокировки таблицы. Если запросов на запись данных нет, то можно выполнять любое количество конкуретных запросов на чтение. -- Не поддерживают операции [мутации](../../../engines/table-engines/log-family/index.md#alter-mutations). +- Не поддерживают операции [мутации](../../../sql-reference/statements/alter.md#mutations). - Не поддерживают индексы. diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md index e8acdccb8d3..5eed6ddd3b4 100644 --- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md @@ -601,7 +601,7 @@ SETTINGS storage_policy = 'moving_from_ssd_to_hdd' В таблицах `MergeTree` данные попадают на диск несколькими способами: - В результате вставки (запрос `INSERT`). -- В фоновых операциях слияний и [мутаций](../../../engines/table-engines/mergetree-family/mergetree.md#alter-mutations). +- В фоновых операциях слияний и [мутаций](../../../sql-reference/statements/alter.md#mutations). - При скачивании данных с другой реплики. - В результате заморозки партиций [ALTER TABLE … FREEZE PARTITION](../../../engines/table-engines/mergetree-family/mergetree.md#alter_freeze-partition). diff --git a/docs/ru/engines/table-engines/special/materializedview.md b/docs/ru/engines/table-engines/special/materializedview.md index 08a2b4a3c07..6eb5853615c 100644 --- a/docs/ru/engines/table-engines/special/materializedview.md +++ b/docs/ru/engines/table-engines/special/materializedview.md @@ -1,5 +1,5 @@ # MaterializedView {#materializedview} -Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../engines/table-engines/special/materializedview.md)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. +Используется для реализации материализованных представлений (подробнее см. запрос [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Для хранения данных, использует другой движок, который был указан при создании представления. При чтении из таблицы, просто использует этот движок. [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/table_engines/materializedview/) diff --git a/docs/ru/guides/apply-catboost-model.md b/docs/ru/guides/apply-catboost-model.md index 1de8f013db8..3515d2731c2 100644 --- a/docs/ru/guides/apply-catboost-model.md +++ b/docs/ru/guides/apply-catboost-model.md @@ -227,4 +227,4 @@ FROM ``` !!! note "Примечание" - Подробнее про функции [avg()](../sql-reference/aggregate-functions/reference.md#agg_function-avg), [log()](../sql-reference/functions/math-functions.md). + Подробнее про функции [avg()](../sql-reference/aggregate-functions/reference/avg.md#agg_function-avg), [log()](../sql-reference/functions/math-functions.md). diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index b8b64ae65c4..55fa93284fa 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1012,15 +1012,15 @@ ClickHouse генерирует исключение ## count\_distinct\_implementation {#settings-count_distinct_implementation} -Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference.md#agg_function-count). +Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count). Возможные значения: -- [uniq](../../sql-reference/aggregate-functions/reference.md#agg_function-uniq) -- [uniqCombined](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined) -- [uniqCombined64](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqcombined64) -- [uniqHLL12](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqhll12) -- [uniqExact](../../sql-reference/aggregate-functions/reference.md#agg_function-uniqexact) +- [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined](../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) +- [uniqCombined64](../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) +- [uniqHLL12](../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) +- [uniqExact](../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) Значение по умолчанию: `uniqExact`. @@ -1278,6 +1278,19 @@ Default value: 0. Значение по умолчанию: 16. +## parallel_distributed_insert_select {#parallel_distributed_insert_select} + +Включает параллельную обработку распределённых запросов `INSERT ... SELECT`. + +Если при выполнении запроса `INSERT INTO distributed_table_a SELECT ... FROM distributed_table_b` оказывается, что обе таблицы находятся в одном кластере, то независимо от того [реплицируемые](../../engines/table-engines/mergetree-family/replication.md) они или нет, запрос выполняется локально на каждом шарде. + +Допустимые значения: + +- 0 — выключена. +- 1 — включена. + +Значение по умолчанию: 0. + ## insert_distributed_sync {#insert_distributed_sync} Включает или отключает режим синхронного добавления данных в распределенные таблицы (таблицы с движком [Distributed](../../engines/table-engines/special/distributed.md#distributed)). @@ -1467,4 +1480,21 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; - [min_insert_block_size_bytes](#min-insert-block-size-bytes) +## mutations_sync {#mutations_sync} + +Позволяет выполнять запросы `ALTER TABLE ... UPDATE|DELETE` ([мутации](../../sql-reference/statements/alter.md#mutations)) синхронно. + +Возможные значения: + +- 0 - мутации выполняются асинхронно. +- 1 - запрос ждет завершения всех мутаций на текущем сервере. +- 2 - запрос ждет завершения всех мутаций на всех репликах (если они есть). + +Значение по умолчанию: `0`. + +**См. также** + +- [Синхронность запросов ALTER](../../sql-reference/statements/alter.md#synchronicity-of-alter-queries) +- [Мутации](../../sql-reference/statements/alter.md#mutations) + [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/operations/system-tables.md b/docs/ru/operations/system-tables.md index 5dd8fc3461a..b6b1dc3e35a 100644 --- a/docs/ru/operations/system-tables.md +++ b/docs/ru/operations/system-tables.md @@ -1359,7 +1359,7 @@ path: /clickhouse/tables/01-08/visits/replicas ## system.mutations {#system_tables-mutations} -Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#alter-mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы. +Таблица содержит информацию о ходе выполнения [мутаций](../sql-reference/statements/alter.md#mutations) таблиц семейства MergeTree. Каждой команде мутации соответствует одна строка таблицы. Столбцы: @@ -1400,7 +1400,7 @@ path: /clickhouse/tables/01-08/visits/replicas **См. также** -- [Мутации](../sql-reference/statements/alter.md#alter-mutations) +- [Мутации](../sql-reference/statements/alter.md#mutations) - [Движок MergeTree](../engines/table-engines/mergetree-family/mergetree.md) - [Репликация данных](../engines/table-engines/mergetree-family/replication.md) (семейство ReplicatedMergeTree) diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index ec325d62b02..ca65f6ac615 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -23,7 +23,7 @@ ## -State {#state} -В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации. +В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации. Для работы с промежуточными состояниями предназначены: @@ -206,7 +206,7 @@ FROM Получим имена людей, чей возраст находится в интервалах `[30,60)` и `[60,75)`. Поскольку мы используем целочисленное представление возраста, то интервалы будут выглядеть как `[30, 59]` и `[60,74]`. -Чтобы собрать имена в массив, возьмём агрегатную функцию [groupArray](reference.md#agg_function-grouparray). Она принимает один аргумент. В нашем случае, это столбец `name`. Функция `groupArrayResample` должна использовать столбец `age` для агрегирования имён по возрасту. Чтобы определить необходимые интервалы, передадим в функцию `groupArrayResample` аргументы `30, 75, 30`. +Чтобы собрать имена в массив, возьмём агрегатную функцию [groupArray](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray). Она принимает один аргумент. В нашем случае, это столбец `name`. Функция `groupArrayResample` должна использовать столбец `age` для агрегирования имён по возрасту. Чтобы определить необходимые интервалы, передадим в функцию `groupArrayResample` аргументы `30, 75, 30`. ``` sql SELECT groupArrayResample(30, 75, 30)(name, age) from people diff --git a/docs/ru/sql-reference/aggregate-functions/index.md b/docs/ru/sql-reference/aggregate-functions/index.md index 6bdb2a7808d..e7f6acee738 100644 --- a/docs/ru/sql-reference/aggregate-functions/index.md +++ b/docs/ru/sql-reference/aggregate-functions/index.md @@ -60,4 +60,4 @@ SELECT groupArray(y) FROM t_null_big `groupArray` не включает `NULL` в результирующий массив. -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/agg_functions/) +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference.md b/docs/ru/sql-reference/aggregate-functions/reference.md deleted file mode 100644 index 612d073c484..00000000000 --- a/docs/ru/sql-reference/aggregate-functions/reference.md +++ /dev/null @@ -1,1835 +0,0 @@ -# Справочник функций {#aggregate-functions-reference} - -## count {#agg_function-count} - -Вычисляет количество строк или не NULL значений . - -ClickHouse поддерживает следующие виды синтаксиса для `count`: - -- `count(expr)` или `COUNT(DISTINCT expr)`. -- `count()` или `COUNT(*)`. Синтаксис `count()` специфичен для ClickHouse. - -**Параметры** - -Функция может принимать: - -- Ноль параметров. -- Одно [выражение](../syntax.md#syntax-expressions). - -**Возвращаемое значение** - -- Если функция вызывается без параметров, она вычисляет количество строк. -- Если передаётся [выражение](../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. - -В обоих случаях тип возвращаемого значения [UInt64](../../sql-reference/data-types/int-uint.md). - -**Подробности** - -ClickHouse поддерживает синтаксис `COUNT(DISTINCT ...)`. Поведение этой конструкции зависит от настройки [count\_distinct\_implementation](../../operations/settings/settings.md#settings-count_distinct_implementation). Она определяет, какая из функций [uniq\*](#agg_function-uniq) используется для выполнения операции. По умолчанию — функция [uniqExact](#agg_function-uniqexact). - -Запрос `SELECT count() FROM table` не оптимизирован, поскольку количество записей в таблице не хранится отдельно. Он выбирает небольшой столбец из таблицы и подсчитывает количество значений в нём. - -**Примеры** - -Пример 1: - -``` sql -SELECT count() FROM t -``` - -``` text -┌─count()─┐ -│ 5 │ -└─────────┘ -``` - -Пример 2: - -``` sql -SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' -``` - -``` text -┌─name──────────────────────────┬─value─────┐ -│ count_distinct_implementation │ uniqExact │ -└───────────────────────────────┴───────────┘ -``` - -``` sql -SELECT count(DISTINCT num) FROM t -``` - -``` text -┌─uniqExact(num)─┐ -│ 3 │ -└────────────────┘ -``` - -Этот пример показывает, что `count(DISTINCT num)` выполняется с помощью функции `uniqExact` в соответствии со значением настройки `count_distinct_implementation`. - -## any(x) {#agg_function-any} - -Выбирает первое попавшееся значение. -Порядок выполнения запроса может быть произвольным и даже каждый раз разным, поэтому результат данной функции недетерминирован. -Для получения детерминированного результата, можно использовать функции min или max вместо any. - -В некоторых случаях, вы всё-таки можете рассчитывать на порядок выполнения запроса. Это - случаи, когда SELECT идёт из подзапроса, в котором используется ORDER BY. - -При наличии в запросе `SELECT` секции `GROUP BY` или хотя бы одной агрегатной функции, ClickHouse (в отличие от, например, MySQL) требует, чтобы все выражения в секциях `SELECT`, `HAVING`, `ORDER BY` вычислялись из ключей или из агрегатных функций. То есть, каждый выбираемый из таблицы столбец, должен использоваться либо в ключах, либо внутри агрегатных функций. Чтобы получить поведение, как в MySQL, вы можете поместить остальные столбцы в агрегатную функцию `any`. - -## anyHeavy(x) {#agg_function-anyheavy} - -Выбирает часто встречающееся значение с помощью алгоритма «[heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf)». Если существует значение, которое встречается чаще, чем в половине случаев, в каждом потоке выполнения запроса, то возвращается данное значение. В общем случае, результат недетерминирован. - -``` sql -anyHeavy(column) -``` - -**Аргументы** - -- `column` — имя столбца. - -**Пример** - -Возьмём набор данных [OnTime](../../getting-started/example-datasets/ontime.md) и выберем произвольное часто встречающееся значение в столбце `AirlineID`. - -``` sql -SELECT anyHeavy(AirlineID) AS res -FROM ontime -``` - -``` text -┌───res─┐ -│ 19690 │ -└───────┘ -``` - -## anyLast(x) {#anylastx} - -Выбирает последнее попавшееся значение. -Результат так же недетерминирован, как и для функции `any`. - -## groupBitAnd {#groupbitand} - -Применяет побитовое `И` для последовательности чисел. - -``` sql -groupBitAnd(expr) -``` - -**Параметры** - -`expr` – выражение, результат которого имеет тип данных `UInt*`. - -**Возвращаемое значение** - -Значение типа `UInt*`. - -**Пример** - -Тестовые данные: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Запрос: - -``` sql -SELECT groupBitAnd(num) FROM t -``` - -Где `num` — столбец с тестовыми данными. - -Результат: - -``` text -binary decimal -00000100 = 4 -``` - -## groupBitOr {#groupbitor} - -Применяет побитовое `ИЛИ` для последовательности чисел. - -``` sql -groupBitOr(expr) -``` - -**Параметры** - -`expr` – выражение, результат которого имеет тип данных `UInt*`. - -**Возвращаемое значение** - -Значение типа `UInt*`. - -**Пример** - -Тестовые данные: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Запрос: - -``` sql -SELECT groupBitOr(num) FROM t -``` - -Где `num` — столбец с тестовыми данными. - -Результат: - -``` text -binary decimal -01111101 = 125 -``` - -## groupBitXor {#groupbitxor} - -Применяет побитовое `ИСКЛЮЧАЮЩЕЕ ИЛИ` для последовательности чисел. - -``` sql -groupBitXor(expr) -``` - -**Параметры** - -`expr` – выражение, результат которого имеет тип данных `UInt*`. - -**Возвращаемое значение** - -Значение типа `UInt*`. - -**Пример** - -Тестовые данные: - -``` text -binary decimal -00101100 = 44 -00011100 = 28 -00001101 = 13 -01010101 = 85 -``` - -Запрос: - -``` sql -SELECT groupBitXor(num) FROM t -``` - -Где `num` — столбец с тестовыми данными. - -Результат: - -``` text -binary decimal -01101000 = 104 -``` - -## groupBitmap {#groupbitmap} - -Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../../sql-reference/aggregate-functions/reference.md). - -``` sql -groupBitmap(expr) -``` - -**Параметры** - -`expr` – выражение, результат которого имеет тип данных `UInt*`. - -**Возвращаемое значение** - -Значение типа `UInt64`. - -**Пример** - -Тестовые данные: - -``` text -UserID -1 -1 -2 -3 -``` - -Запрос: - -``` sql -SELECT groupBitmap(UserID) as num FROM t -``` - -Результат: - -``` text -num -3 -``` - -## min(x) {#agg_function-min} - -Вычисляет минимум. - -## max(x) {#agg_function-max} - -Вычисляет максимум. - -## argMin(arg, val) {#agg-function-argmin} - -Вычисляет значение arg при минимальном значении val. Если есть несколько разных значений arg для минимальных значений val, то выдаётся первое попавшееся из таких значений. - -**Пример:** - -``` text -┌─user─────┬─salary─┐ -│ director │ 5000 │ -│ manager │ 3000 │ -│ worker │ 1000 │ -└──────────┴────────┘ -``` - -``` sql -SELECT argMin(user, salary) FROM salary -``` - -``` text -┌─argMin(user, salary)─┐ -│ worker │ -└──────────────────────┘ -``` - -## argMax(arg, val) {#agg-function-argmax} - -Вычисляет значение arg при максимальном значении val. Если есть несколько разных значений arg для максимальных значений val, то выдаётся первое попавшееся из таких значений. - -## sum(x) {#agg_function-sum} - -Вычисляет сумму. -Работает только для чисел. - -## sumWithOverflow(x) {#agg_function-sumwithoverflow} - -Вычисляет сумму чисел, используя для результата тот же тип данных, что и для входных параметров. Если сумма выйдет за максимальное значение для заданного типа данных, то функция вернёт ошибку. - -Работает только для чисел. - -## sumMap(key, value) {#summapkey-value} - -Производит суммирование массива ‘value’ по соответствующим ключам заданным в массиве ‘key’. -Количество элементов в ‘key’ и ‘value’ должно быть одинаковым для каждой строки, для которой происходит суммирование. -Возвращает кортеж из двух массивов - ключи в отсортированном порядке и значения, просуммированные по соответствующим ключам. - -Пример: - -``` sql -CREATE TABLE sum_map( - date Date, - timeslot DateTime, - statusMap Nested( - status UInt16, - requests UInt64 - ) -) ENGINE = Log; -INSERT INTO sum_map VALUES - ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), - ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); -SELECT - timeslot, - sumMap(statusMap.status, statusMap.requests) -FROM sum_map -GROUP BY timeslot -``` - -``` text -┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ -│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ -│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ -└─────────────────────┴──────────────────────────────────────────────┘ -``` - -## skewPop {#skewpop} - -Вычисляет [коэффициент асимметрии](https://ru.wikipedia.org/wiki/Коэффициент_асимметрии) для последовательности. - -``` sql -skewPop(expr) -``` - -**Параметры** - -`expr` — [Выражение](../syntax.md#syntax-expressions), возвращающее число. - -**Возвращаемое значение** - -Коэффициент асимметрии заданного распределения. Тип — [Float64](../../sql-reference/aggregate-functions/reference.md) - -**Пример** - -``` sql -SELECT skewPop(value) FROM series_with_value_column -``` - -## skewSamp {#skewsamp} - -Вычисляет [выборочный коэффициент асимметрии](https://ru.wikipedia.org/wiki/Статистика_(функция_выборки)) для последовательности. - -Он представляет собой несмещенную оценку асимметрии случайной величины, если переданные значения образуют ее выборку. - -``` sql -skewSamp(expr) -``` - -**Параметры** - -`expr` — [Выражение](../syntax.md#syntax-expressions), возвращающее число. - -**Возвращаемое значение** - -Коэффициент асимметрии заданного распределения. Тип — [Float64](../../sql-reference/aggregate-functions/reference.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. - -**Пример** - -``` sql -SELECT skewSamp(value) FROM series_with_value_column -``` - -## kurtPop {#kurtpop} - -Вычисляет [коэффициент эксцесса](https://ru.wikipedia.org/wiki/Коэффициент_эксцесса) последовательности. - -``` sql -kurtPop(expr) -``` - -**Параметры** - -`expr` — [Выражение](../syntax.md#syntax-expressions), возвращающее число. - -**Возвращаемое значение** - -Коэффициент эксцесса заданного распределения. Тип — [Float64](../../sql-reference/aggregate-functions/reference.md) - -**Пример** - -``` sql -SELECT kurtPop(value) FROM series_with_value_column -``` - -## kurtSamp {#kurtsamp} - -Вычисляет [выборочный коэффициент эксцесса](https://ru.wikipedia.org/wiki/Статистика_(функция_выборки)) для последовательности. - -Он представляет собой несмещенную оценку эксцесса случайной величины, если переданные значения образуют ее выборку. - -``` sql -kurtSamp(expr) -``` - -**Параметры** - -`expr` — [Выражение](../syntax.md#syntax-expressions), возвращающее число. - -**Возвращаемое значение** - -Коэффициент эксцесса заданного распределения. Тип — [Float64](../../sql-reference/aggregate-functions/reference.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. - -**Пример** - -``` sql -SELECT kurtSamp(value) FROM series_with_value_column -``` - -## timeSeriesGroupSum(uid, timestamp, value) {#agg-function-timeseriesgroupsum} - -`timeSeriesGroupSum` агрегирует временные ряды в которых не совпадают моменты. -Функция использует линейную интерполяцию между двумя значениями времени, а затем суммирует значения для одного и того же момента (как измеренные так и интерполированные) по всем рядам. - -- `uid` уникальный идентификатор временного ряда, `UInt64`. -- `timestamp` имеет тип `Int64` чтобы можно было учитывать милли и микросекунды. -- `value` представляет собой значение метрики. - -Функция возвращает массив кортежей с парами `(timestamp, aggregated_value)`. - -Временные ряды должны быть отсортированы по возрастанию `timestamp`. - -Пример: - -``` text -┌─uid─┬─timestamp─┬─value─┐ -│ 1 │ 2 │ 0.2 │ -│ 1 │ 7 │ 0.7 │ -│ 1 │ 12 │ 1.2 │ -│ 1 │ 17 │ 1.7 │ -│ 1 │ 25 │ 2.5 │ -│ 2 │ 3 │ 0.6 │ -│ 2 │ 8 │ 1.6 │ -│ 2 │ 12 │ 2.4 │ -│ 2 │ 18 │ 3.6 │ -│ 2 │ 24 │ 4.8 │ -└─────┴───────────┴───────┘ -``` - -``` sql -CREATE TABLE time_series( - uid UInt64, - timestamp Int64, - value Float64 -) ENGINE = Memory; -INSERT INTO time_series VALUES - (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), - (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); - -SELECT timeSeriesGroupSum(uid, timestamp, value) -FROM ( - SELECT * FROM time_series order by timestamp ASC -); -``` - -И результат будет: - -``` text -[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] -``` - -## timeSeriesGroupRateSum(uid, ts, val) {#agg-function-timeseriesgroupratesum} - -Аналогично timeSeriesGroupSum, timeSeriesGroupRateSum будет вычислять производные по timestamp для рядов, а затем суммировать полученные производные для всех рядов для одного значения timestamp. -Также ряды должны быть отсортированы по возрастанию timestamp. - -Для пример из описания timeSeriesGroupSum результат будет следующим: - -``` text -[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] -``` - -## avg(x) {#agg_function-avg} - -Вычисляет среднее. -Работает только для чисел. -Результат всегда Float64. - -## avgWeighted {#avgweighted} - -Вычисляет [среднее арифметическое взвешенное](https://ru.wikipedia.org/wiki/Среднее_арифметическое_взвешенное). - -**Синтаксис** - -``` sql -avgWeighted(x, weight) -``` - -**Параметры** - -- `x` — Значения. [Целые числа](../../sql-reference/aggregate-functions/reference.md) или [числа с плавающей запятой](../../sql-reference/aggregate-functions/reference.md). -- `weight` — Веса отдельных значений. [Целые числа](../../sql-reference/aggregate-functions/reference.md) или [числа с плавающей запятой](../../sql-reference/aggregate-functions/reference.md). - -Типы параметров должны совпадать. - -**Возвращаемое значение** - -- Среднее арифметическое взвешенное. -- `NaN`, если все веса равны 0. - -Тип: [Float64](../../sql-reference/aggregate-functions/reference.md) - -**Пример** - -Запрос: - -``` sql -SELECT avgWeighted(x, w) -FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) -``` - -Результат: - -``` text -┌─avgWeighted(x, weight)─┐ -│ 8 │ -└────────────────────────┘ -``` - -## uniq {#agg_function-uniq} - -Приближённо вычисляет количество различных значений аргумента. - -``` sql -uniq(x[, ...]) -``` - -**Параметры** - -Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. - -**Возвращаемое значение** - -- Значение с типом данных [UInt64](../../sql-reference/aggregate-functions/reference.md). - -**Детали реализации** - -Функция: - -- Вычисляет хэш для всех параметров агрегации, а затем использует его в вычислениях. - -- Использует адаптивный алгоритм выборки. В качестве состояния вычисления функция использует выборку хэш-значений элементов размером до 65536. - - Этот алгоритм очень точен и очень эффективен по использованию CPU. Если запрос содержит небольшое количество этих функций, использование `uniq` почти так же эффективно, как и использование других агрегатных функций. - -- Результат детерминирован (не зависит от порядка выполнения запроса). - -Эту функцию рекомендуется использовать практически во всех сценариях. - -**Смотрите также** - -- [uniqCombined](#agg_function-uniqcombined) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined {#agg_function-uniqcombined} - -Приближённо вычисляет количество различных значений аргумента. - -``` sql -uniqCombined(HLL_precision)(x[, ...]) -``` - -Функция `uniqCombined` — это хороший выбор для вычисления количества различных значений. - -**Параметры** - -Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. - -`HLL_precision` — это логарифм по основанию 2 от числа ячеек в [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Необязательный, можно использовать функцию как `uniqCombined (x [,...])`. Для `HLL_precision` значение по умолчанию — 17, что фактически составляет 96 КБ пространства (2^17 ячеек, 6 бит каждая). - -**Возвращаемое значение** - -- Число типа [UInt64](../../sql-reference/aggregate-functions/reference.md). - -**Детали реализации** - -Функция: - -- Вычисляет хэш (64-битный для `String` и 32-битный для всех остальных типов) для всех параметров агрегации, а затем использует его в вычислениях. - -- Используется комбинация трёх алгоритмов: массив, хэш-таблица и HyperLogLog с таблицей коррекции погрешности. - - Для небольшого количества различных значений используется массив. Если размер набора больше, используется хэш-таблица. При дальнейшем увеличении количества значений, используется структура HyperLogLog, имеющая фиксированный размер в памяти. - -- Результат детерминирован (не зависит от порядка выполнения запроса). - -!!! note "Note" - Так как используется 32-битный хэш для не-`String` типов, результат будет иметь очень очень большую ошибку для количества разичных элементов существенно больше `UINT_MAX` (ошибка быстро растёт начиная с нескольких десятков миллиардов различных значений), таким образом в этом случае нужно использовать [uniqCombined64](#agg_function-uniqcombined64) - -По сравнению с функцией [uniq](#agg_function-uniq), `uniqCombined`: - -- Потребляет в несколько раз меньше памяти. -- Вычисляет с в несколько раз более высокой точностью. -- Обычно имеет немного более низкую производительность. В некоторых сценариях `uniqCombined` может показывать более высокую производительность, чем `uniq`, например, в случае распределенных запросов, при которых по сети передаётся большое количество состояний агрегации. - -**Смотрите также** - -- [uniq](#agg_function-uniq) -- [uniqCombined64](#agg_function-uniqcombined64) -- [uniqHLL12](#agg_function-uniqhll12) -- [uniqExact](#agg_function-uniqexact) - -## uniqCombined64 {#agg_function-uniqcombined64} - -Использует 64-битный хэш для всех типов, в отличие от [uniqCombined](#agg_function-uniqcombined). - -## uniqHLL12 {#agg_function-uniqhll12} - -Вычисляет приблизительное число различных значений аргументов, используя алгоритм [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). - -``` sql -uniqHLL12(x[, ...]) -``` - -**Параметры** - -Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. - -**Возвращаемое значение** - -- Значение хэша с типом данных [UInt64](../../sql-reference/aggregate-functions/reference.md). - -**Детали реализации** - -Функция: - -- Вычисляет хэш для всех параметров агрегации, а затем использует его в вычислениях. - -- Использует алгоритм HyperLogLog для аппроксимации числа различных значений аргументов. - - Используется 212 5-битовых ячеек. Размер состояния чуть больше 2.5 КБ. Результат не точный (ошибка до ~10%) для небольших множеств (<10K элементов). Однако для множеств большой кардинальности (10K - 100M) результат довольно точен (ошибка до ~1.6%). Начиная с 100M ошибка оценки будет только расти и для множеств огромной кардинальности (1B+ элементов) функция возвращает результат с очень большой неточностью. - -- Результат детерминирован (не зависит от порядка выполнения запроса). - -Мы не рекомендуем использовать эту функцию. В большинстве случаев используйте функцию [uniq](#agg_function-uniq) или [uniqCombined](#agg_function-uniqcombined). - -**Смотрите также** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqExact](#agg_function-uniqexact) - -## uniqExact {#agg_function-uniqexact} - -Вычисляет точное количество различных значений аргументов. - -``` sql -uniqExact(x[, ...]) -``` - -Функцию `uniqExact` следует использовать, если вам обязательно нужен точный результат. В противном случае используйте функцию [uniq](#agg_function-uniq). - -Функция `uniqExact` расходует больше оперативной памяти, чем функция `uniq`, так как размер состояния неограниченно растёт по мере роста количества различных значений. - -**Параметры** - -Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. - -**Смотрите также** - -- [uniq](#agg_function-uniq) -- [uniqCombined](#agg_function-uniqcombined) -- [uniqHLL12](#agg_function-uniqhll12) - -## groupArray(x), groupArray(max\_size)(x) {#agg_function-grouparray} - -Составляет массив из значений аргумента. -Значения в массив могут быть добавлены в любом (недетерминированном) порядке. - -Вторая версия (с параметром `max_size`) ограничивает размер результирующего массива `max_size` элементами. -Например, `groupArray(1)(x)` эквивалентно `[any(x)]`. - -В некоторых случаях, вы всё же можете рассчитывать на порядок выполнения запроса. Это — случаи, когда `SELECT` идёт из подзапроса, в котором используется `ORDER BY`. - -## groupArrayInsertAt {#grouparrayinsertat} - -Вставляет значение в заданную позицию массива. - -**Синтаксис** - -```sql -groupArrayInsertAt(default_x, size)(x, pos); -``` - -Если запрос вставляет вставляется несколько значений в одну и ту же позицию, то функция ведет себя следующим образом: - -- Если запрос выполняется в одном потоке, то используется первое из вставляемых значений. -- Если запрос выполняется в нескольких потоках, то в результирующем массиве может оказаться любое из вставляемых значений. - -**Параметры** - -- `x` — Значение, которое будет вставлено. [Выражение](../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../../sql-reference/data-types/index.md#data_types). -- `pos` — Позиция, в которую вставляется заданный элемент `x`. Нумерация индексов в массиве начинается с нуля. [UInt32](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64). -- `default_x` — Значение по умолчанию для подстановки на пустые позиции. Опциональный параметр. [Выражение](../syntax.md#syntax-expressions), возвращающее значение с типом параметра `x`. Если `default_x` не определен, используются [значения по умолчанию](../../sql-reference/statements/create.md#create-default-values). -- `size`— Длина результирующего массива. Опциональный параметр. При использовании этого параметра должно быть указано значение по умолчанию `default_x`. [UInt32](../../sql-reference/data-types/int-uint.md#uint-ranges). - -**Возвращаемое значение** - -- Массив со вставленными значениями. - -Тип: [Array](../../sql-reference/data-types/array.md#data-type-array). - -**Примеры** - -Запрос: - -```sql -SELECT groupArrayInsertAt(toString(number), number * 2) FROM numbers(5); -``` - -Результат: - -```text -┌─groupArrayInsertAt(toString(number), multiply(number, 2))─┐ -│ ['0','','1','','2','','3','','4'] │ -└───────────────────────────────────────────────────────────┘ -``` - -Запрос: - -```sql -SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM numbers(5); -``` - -Результат: - -```text -┌─groupArrayInsertAt('-')(toString(number), multiply(number, 2))─┐ -│ ['0','-','1','-','2','-','3','-','4'] │ -└────────────────────────────────────────────────────────────────┘ -``` - -Запрос: - -```sql -SELECT groupArrayInsertAt('-', 5)(toString(number), number * 2) FROM numbers(5); -``` - -Результат: - -```text -┌─groupArrayInsertAt('-', 5)(toString(number), multiply(number, 2))─┐ -│ ['0','-','1','-','2'] │ -└───────────────────────────────────────────────────────────────────┘ -``` - -Многопоточная вставка элементов в одну позицию. - -Запрос: - -```sql -SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size = 1; -``` - -В результат этого запроса мы получите случайное целое число в диапазоне `[0,9]`. Например: - -```text -┌─groupArrayInsertAt(number, 0)─┐ -│ [7] │ -└───────────────────────────────┘ -``` - - -## groupArrayMovingSum {#agg_function-grouparraymovingsum} - -Вычисляет скользящую сумму входных значений. - -``` sql -groupArrayMovingSum(numbers_for_summing) -groupArrayMovingSum(window_size)(numbers_for_summing) -``` - -Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. - -**Параметры** - -- `numbers_for_summing` — [выражение](../syntax.md#syntax-expressions), возвращающее значение числового типа. -- `window_size` — размер окна. - -**Возвращаемые значения** - -- Массив того же размера и типа, что и входные данные. - -**Пример** - -Таблица с исходными данными: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -Запросы: - -``` sql -SELECT - groupArrayMovingSum(int) AS I, - groupArrayMovingSum(float) AS F, - groupArrayMovingSum(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingSum(2)(int) AS I, - groupArrayMovingSum(2)(float) AS F, - groupArrayMovingSum(2)(dec) AS D -FROM t -``` - -``` text -┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ -│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ -└────────────┴─────────────────────────────────┴────────────────────────┘ -``` - -## groupArrayMovingAvg {#agg_function-grouparraymovingavg} - -Вычисляет скользящее среднее для входных значений. - - groupArrayMovingAvg(numbers_for_summing) - groupArrayMovingAvg(window_size)(numbers_for_summing) - -Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. - -**Параметры** - -- `numbers_for_summing` — [выражение](../syntax.md#syntax-expressions), возвращающее значение числового типа. -- `window_size` — размер окна. - -**Возвращаемые значения** - -- Массив того же размера и типа, что и входные данные. - -Функция использует [округление к меньшему по модулю](https://ru.wikipedia.org/wiki/Округление#Методы). Оно усекает десятичные разряды, незначимые для результирующего типа данных. - -**Пример** - -Таблица с исходными данными: - -``` sql -CREATE TABLE t -( - `int` UInt8, - `float` Float32, - `dec` Decimal32(2) -) -ENGINE = TinyLog -``` - -``` text -┌─int─┬─float─┬──dec─┐ -│ 1 │ 1.1 │ 1.10 │ -│ 2 │ 2.2 │ 2.20 │ -│ 4 │ 4.4 │ 4.40 │ -│ 7 │ 7.77 │ 7.77 │ -└─────┴───────┴──────┘ -``` - -Запросы: - -``` sql -SELECT - groupArrayMovingAvg(int) AS I, - groupArrayMovingAvg(float) AS F, - groupArrayMovingAvg(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ -│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ -└───────────┴─────────────────────────────────────┴───────────────────────┘ -``` - -``` sql -SELECT - groupArrayMovingAvg(2)(int) AS I, - groupArrayMovingAvg(2)(float) AS F, - groupArrayMovingAvg(2)(dec) AS D -FROM t -``` - -``` text -┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ -│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ -└───────────┴──────────────────────────────────┴───────────────────────┘ -``` - -## groupUniqArray(x), groupUniqArray(max\_size)(x) {#groupuniqarrayx-groupuniqarraymax-sizex} - -Составляет массив из различных значений аргумента. Расход оперативной памяти такой же, как у функции `uniqExact`. - -Функция `groupUniqArray(max_size)(x)` ограничивает размер результирующего массива до `max_size` элементов. Например, `groupUniqArray(1)(x)` равнозначно `[any(x)]`. - -## quantile {#quantile} - -Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. - -Функция использует алгоритм [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) с размером резервуара до 8192 и случайным генератором чисел для для сэмплирования. Результат не детерминирован. Чтобы получить точную квантиль используйте функцию [quantileExact](#quantileexact). - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantile(level)(expr) -``` - -Алиас: `median`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). - -**Возвращаемое значение** - -- Приблизительный квантиль заданного уровня. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md), если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md), если входные значения имеют тип `DateTime`. - -**Пример** - -Входная таблица: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Запрос: - -``` sql -SELECT quantile(val) FROM t -``` - -Результат: - -``` text -┌─quantile(val)─┐ -│ 1.5 │ -└───────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileDeterministic {#quantiledeterministic} - -Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. - -Функция использует алгоритм [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) с размером резервуара до 8192 и детерминированным алгоритмом сэмплирования. Результат детерминирован. Чтобы получить точную квантиль используйте функцию [quantileExact](#quantileexact). - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileDeterministic(level)(expr, determinator) -``` - -Алиас: `medianDeterministic`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). -- `determinator` — Число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. - -**Возвращаемое значение** - -- Приблизительный квантиль заданного уровня. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `DateTime`. - -**Пример** - -Входная таблица: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Запрос: - -``` sql -SELECT quantileDeterministic(val, 1) FROM t -``` - -Результат: - -``` text -┌─quantileDeterministic(val, 1)─┐ -│ 1.5 │ -└───────────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExact {#quantileexact} - -Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. - -Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileExact(level)(expr) -``` - -Алиас: `medianExact`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). - -**Возвращаемое значение** - -- Квантиль заданного уровня. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `DateTime`. - -**Пример** - -Запрос: - -``` sql -SELECT quantileExact(number) FROM numbers(10) -``` - -Результат: - -``` text -┌─quantileExact(number)─┐ -│ 5 │ -└───────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileExactWeighted {#quantileexactweighted} - -Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, учитывая вес каждого её элемента. - -Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Для каждого значения учитывается его вес (количество значений в выборке). В алгоритме используется хэш-таблица. Таким образом, если переданные значения часто повторяются, функция потребляет меньше оперативной памяти, чем [quantileExact](#quantileexact). Эту функцию можно использовать вместо `quantileExact` если указать вес 1. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileExactWeighted(level)(expr, weight) -``` - -Алиас: `medianExactWeighted`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. - -**Возвращаемое значение** - -- Quantile of the specified level. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `DateTime`. - -**Пример** - -Входная таблица: - -``` text -┌─n─┬─val─┐ -│ 0 │ 3 │ -│ 1 │ 2 │ -│ 2 │ 1 │ -│ 5 │ 4 │ -└───┴─────┘ -``` - -Запрос: - -``` sql -SELECT quantileExactWeighted(n, val) FROM t -``` - -Результат: - -``` text -┌─quantileExactWeighted(n, val)─┐ -│ 1 │ -└───────────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTiming {#quantiletiming} - -Вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности с детерминированной точностью. - -Результат детерминирован (не зависит от порядка обработки запроса). Функция оптимизирована для работы с последовательностями, описывающими такие распределения, как время загрузки веб-страниц или время отклика бэкенда. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileTiming(level)(expr) -``` - -Алиас: `medianTiming`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). - -- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../sql-reference/aggregate-functions/reference.md). - - - Если в функцию передать отрицательные значения, то её поведение не определено. - - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. - -**Точность** - -Вычисления точны при соблюдении следующих условий: - -- Размер выборки не превышает 5670 элементов. -- Размер выборки превышает 5670 элементов, но значение каждого элемента не больше 1024. - -В противном случае, результат вычисления округляется до ближайшего множителя числа 16. - -!!! note "Примечание" - Для указанного типа последовательностей функция производительнее и точнее, чем [quantile](#quantile). - -**Возвращаемое значение** - -- Квантиль заданного уровня. - -Тип: `Float32`. - -!!! note "Примечания" - Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../sql-reference/aggregate-functions/reference.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../sql-reference/statements/select/order-by.md#select-order-by). - -**Пример** - -Входная таблица: - -``` text -┌─response_time─┐ -│ 72 │ -│ 112 │ -│ 126 │ -│ 145 │ -│ 104 │ -│ 242 │ -│ 313 │ -│ 168 │ -│ 108 │ -└───────────────┘ -``` - -Запрос: - -``` sql -SELECT quantileTiming(response_time) FROM t -``` - -Результат: - -``` text -┌─quantileTiming(response_time)─┐ -│ 126 │ -└───────────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTimingWeighted {#quantiletimingweighted} - -С детерминированной точностью вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, учитывая вес каждого элемента. - -Результат детерминирован (не зависит от порядка обработки запроса). Функция оптимизирована для работы с последовательностями, описывающими такие распределения, как время загрузки веб-страниц или время отклика бэкенда. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileTimingWeighted(level)(expr, weight) -``` - -Алиас: `medianTimingWeighted`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). - -- `expr` — [Выражение](../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../sql-reference/aggregate-functions/reference.md). - - - Если в функцию передать отрицательные значения, то её поведение не определено. - - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. - -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. - -**Точность** - -Вычисления точны при соблюдении следующих условий: - -- Размер выборки не превышает 5670 элементов. -- Размер выборки превышает 5670 элементов, но значение каждого элемента не больше 1024. - -В противном случае, результат вычисления округляется до ближайшего множителя числа 16. - -!!! note "Примечание" - Для указанного типа последовательностей функция производительнее и точнее, чем [quantile](#quantile). - -**Возвращаемое значение** - -- Квантиль заданного уровня. - -Тип: `Float32`. - -!!! note "Примечания" - Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../sql-reference/aggregate-functions/reference.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../sql-reference/statements/select/order-by.md#select-order-by). - -**Пример** - -Входная таблица: - -``` text -┌─response_time─┬─weight─┐ -│ 68 │ 1 │ -│ 104 │ 2 │ -│ 112 │ 3 │ -│ 126 │ 2 │ -│ 138 │ 1 │ -│ 162 │ 1 │ -└───────────────┴────────┘ -``` - -Запрос: - -``` sql -SELECT quantileTimingWeighted(response_time, weight) FROM t -``` - -Результат: - -``` text -┌─quantileTimingWeighted(response_time, weight)─┐ -│ 112 │ -└───────────────────────────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigest {#quantiletdigest} - -Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, используя алгоритм [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf). - -Максимальная ошибка 1%. Потребление памяти — `log(n)`, где `n` — число значений. Результат не детерминирован и зависит от порядка выполнения запроса. - -Производительность функции ниже, чем производительность функции [quantile](#quantile) или [quantileTiming](#quantiletiming). По соотношению размера состояния к точности вычисления, эта функция значительно превосходит `quantile`. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileTDigest(level)(expr) -``` - -Алиас: `medianTDigest`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). - -**Возвращаемое значение** - -- Приблизительную квантиль заданного уровня. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `DateTime`. - -**Пример** - -Запрос: - -``` sql -SELECT quantileTDigest(number) FROM numbers(10) -``` - -Результат: - -``` text -┌─quantileTDigest(number)─┐ -│ 4.5 │ -└─────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## quantileTDigestWeighted {#quantiletdigestweighted} - -Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, используя алгоритм [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf). Функция учитывает вес каждого элемента последовательности. - -Максимальная ошибка 1%. Потребление памяти — `log(n)`, где `n` — число значений. Результат не детерминирован и зависит от порядка выполнения запроса. - -Производительность функции ниже, чем производительность функции [quantile](#quantile) или [quantileTiming](#quantiletiming). По соотношению размера состояния к точности вычисления, эта функция значительно превосходит `quantile`. - -Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. - -**Синтаксис** - -``` sql -quantileTDigestWeighted(level)(expr, weight) -``` - -Алиас: `medianTDigest`. - -**Параметры** - -- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). -- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../sql-reference/aggregate-functions/reference.md#data_types) или типов [Date](../../sql-reference/aggregate-functions/reference.md), [DateTime](../../sql-reference/aggregate-functions/reference.md). -- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. - -**Возвращаемое значение** - -- Приблизительный квантиль заданного уровня. - -Тип: - -- [Float64](../../sql-reference/aggregate-functions/reference.md) для входных данных числового типа. -- [Date](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `Date`. -- [DateTime](../../sql-reference/aggregate-functions/reference.md) если входные значения имеют тип `DateTime`. - -**Пример** - -Запрос: - -``` sql -SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) -``` - -Результат: - -``` text -┌─quantileTDigestWeighted(number, 1)─┐ -│ 4.5 │ -└────────────────────────────────────┘ -``` - -**Смотрите также** - -- [median](#median) -- [quantiles](#quantiles) - -## median {#median} - -Функции `median*` — алиасы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности. - -Functions: - -- `median` — алиас [quantile](#quantile). -- `medianDeterministic` — алиас [quantileDeterministic](#quantiledeterministic). -- `medianExact` — алиас [quantileExact](#quantileexact). -- `medianExactWeighted` — алиас [quantileExactWeighted](#quantileexactweighted). -- `medianTiming` — алиас [quantileTiming](#quantiletiming). -- `medianTimingWeighted` — алиас [quantileTimingWeighted](#quantiletimingweighted). -- `medianTDigest` — алиас [quantileTDigest](#quantiletdigest). -- `medianTDigestWeighted` — алиас [quantileTDigestWeighted](#quantiletdigestweighted). - -**Пример** - -Входная таблица: - -``` text -┌─val─┐ -│ 1 │ -│ 1 │ -│ 2 │ -│ 3 │ -└─────┘ -``` - -Запрос: - -``` sql -SELECT medianDeterministic(val, 1) FROM t -``` - -Результат: - -``` text -┌─medianDeterministic(val, 1)─┐ -│ 1.5 │ -└─────────────────────────────┘ -``` - -## quantiles(level1, level2, …)(x) {#quantiles} - -Для всех quantile-функций, также присутствуют соответствующие quantiles-функции: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. Эти функции за один проход вычисляют все квантили перечисленных уровней и возвращают массив вычисленных значений. - -## varSamp(x) {#varsampx} - -Вычисляет величину `Σ((x - x̅)^2) / (n - 1)`, где `n` - размер выборки, `x̅`- среднее значение `x`. - -Она представляет собой несмещённую оценку дисперсии случайной величины, если переданные в функцию значения являются выборкой этой случайной величины. - -Возвращает `Float64`. В случае, когда `n <= 1`, возвращается `+∞`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varSampStable`. Она работает медленнее, но обеспечиват меньшую вычислительную ошибку. - -## varPop(x) {#varpopx} - -Вычисляет величину `Σ((x - x̅)^2) / n`, где `n` - размер выборки, `x̅`- среднее значение `x`. - -То есть, дисперсию для множества значений. Возвращает `Float64`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - -## stddevSamp(x) {#stddevsampx} - -Результат равен квадратному корню от `varSamp(x)`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - -## stddevPop(x) {#stddevpopx} - -Результат равен квадратному корню от `varPop(x)`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - - -## topK(N)(column) {#topkncolumn} - -Возвращает массив наиболее часто встречающихся значений в указанном столбце. Результирующий массив упорядочен по убыванию частоты значения (не по самим значениям). - -Реализует [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) алгоритм для анализа TopK, на основе reduce-and-combine алгоритма из методики [Parallel Space Saving](https://arxiv.org/pdf/1401.0702.pdf). - -``` sql -topK(N)(column) -``` - -Функция не дает гарантированного результата. В некоторых ситуациях могут возникать ошибки, и функция возвращает частые, но не наиболее частые значения. - -Рекомендуем использовать значения `N < 10`, при больших `N` снижается производительность. Максимально возможное значение `N = 65536`. - -**Аргументы** - -- ‘N’ - Количество значений. -- ‘x’ – Столбец. - -**Пример** - -Возьмём набор данных [OnTime](../../getting-started/example-datasets/ontime.md) и выберем 3 наиболее часто встречающихся значения в столбце `AirlineID`. - -``` sql -SELECT topK(3)(AirlineID) AS res -FROM ontime -``` - -``` text -┌─res─────────────────┐ -│ [19393,19790,19805] │ -└─────────────────────┘ -``` - -## topKWeighted {#topkweighted} - -Аналогична `topK`, но дополнительно принимает положительный целочисленный параметр `weight`. Каждое значение учитывается `weight` раз при расчёте частоты. - -**Синтаксис** - -``` sql -topKWeighted(N)(x, weight) -``` - -**Параметры** - -- `N` — Количество элементов для выдачи. - -**Аргументы** - -- `x` – значение. -- `weight` — вес. [UInt8](../../sql-reference/aggregate-functions/reference.md). - -**Возвращаемое значение** - -Возвращает массив значений с максимально приближенной суммой весов. - -**Пример** - -Запрос: - -``` sql -SELECT topKWeighted(10)(number, number) FROM numbers(1000) -``` - -Результат: - -``` text -┌─topKWeighted(10)(number, number)──────────┐ -│ [999,998,997,996,995,994,993,992,991,990] │ -└───────────────────────────────────────────┘ -``` - -## covarSamp(x, y) {#covarsampx-y} - -Вычисляет величину `Σ((x - x̅)(y - y̅)) / (n - 1)`. - -Возвращает Float64. В случае, когда `n <= 1`, возвращается +∞. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - -## covarPop(x, y) {#covarpopx-y} - -Вычисляет величину `Σ((x - x̅)(y - y̅)) / n`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - - -## corr(x, y) {#corrx-y} - -Вычисляет коэффициент корреляции Пирсона: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. - -!!! note "Примечание" - Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `corrStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. - -## simpleLinearRegression {#simplelinearregression} - -Выполняет простую (одномерную) линейную регрессию. - -``` sql -simpleLinearRegression(x, y) -``` - -Параметры: - -- `x` — столбец со значениями зависимой переменной. -- `y` — столбец со значениями наблюдаемой переменной. - -Возвращаемые значения: - -Константы `(a, b)` результирующей прямой `y = a*x + b`. - -**Примеры** - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ -│ (1,0) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -``` sql -SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) -``` - -``` text -┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ -│ (1,3) │ -└───────────────────────────────────────────────────────────────────┘ -``` - -## stochasticLinearRegression {#agg_functions-stochasticlinearregression} - -Функция реализует стохастическую линейную регрессию. Поддерживает пользовательские параметры для скорости обучения, коэффициента регуляризации L2, размера mini-batch и имеет несколько методов обновления весов ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (по умолчанию), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). - -### Параметры {#agg_functions-stochasticlinearregression-parameters} - -Есть 4 настраиваемых параметра. Они передаются в функцию последовательно, однако не обязательно указывать все, используются значения по умолчанию, однако хорошая модель требует некоторой настройки параметров. - -``` text -stochasticLinearRegression(1.0, 1.0, 10, 'SGD') -``` - -1. Скорость обучения — коэффициент длины шага, при выполнении градиентного спуска. Слишком большая скорость обучения может привести к бесконечным весам модели. По умолчанию `0.00001`. -2. Коэффициент регуляризации l2. Помогает предотвратить подгонку. По умолчанию `0.1`. -3. Размер mini-batch задаёт количество элементов, чьи градиенты будут вычислены и просуммированы при выполнении одного шага градиентного спуска. Чистый стохастический спуск использует один элемент, однако использование mini-batch (около 10 элементов) делает градиентные шаги более стабильными. По умолчанию `15`. -4. Метод обновления весов, можно выбрать один из следующих: `Adam` (по умолчанию), `SGD`, `Momentum`, `Nesterov`. `Momentum` и `Nesterov` более требовательные к вычислительным ресурсам и памяти, однако они имеют высокую скорость схождения и устойчивости методов стохастического градиента. - -### Использование {#agg_functions-stochasticlinearregression-usage} - -`stochasticLinearRegression` используется на двух этапах: построение модели и предсказание новых данных. Чтобы построить модель и сохранить её состояние для дальнейшего использования, мы используем комбинатор `-State`. -Для прогнозирования мы используем функцию [evalMLMethod](../functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), которая принимает в качестве аргументов состояние и свойства для прогнозирования. - - - -**1.** Построение модели - -Пример запроса: - -``` sql -CREATE TABLE IF NOT EXISTS train_data -( - param1 Float64, - param2 Float64, - target Float64 -) ENGINE = Memory; - -CREATE TABLE your_model ENGINE = Memory AS SELECT -stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) -AS state FROM train_data; -``` - -Здесь нам также нужно вставить данные в таблицу `train_data`. Количество параметров не фиксировано, оно зависит только от количества аргументов, перешедших в `linearRegressionState`. Все они должны быть числовыми значениями. -Обратите внимание, что столбец с целевым значением (которое мы хотели бы научиться предсказывать) вставляется в качестве первого аргумента. - -**2.** Прогнозирование - -После сохранения состояния в таблице мы можем использовать его несколько раз для прогнозирования или смёржить с другими состояниями и создать новые, улучшенные модели. - -``` sql -WITH (SELECT state FROM your_model) AS model SELECT -evalMLMethod(model, param1, param2) FROM test_data -``` - -Запрос возвращает столбец прогнозируемых значений. Обратите внимание, что первый аргумент `evalMLMethod` это объект `AggregateFunctionState`, далее идут столбцы свойств. - -`test_data` — это таблица, подобная `train_data`, но при этом может не содержать целевое значение. - -### Примечания {#agg_functions-stochasticlinearregression-notes} - -1. Объединить две модели можно следующим запросом: - - - - ``` sql - SELECT state1 + state2 FROM your_models - ``` - -где таблица `your_models` содержит обе модели. Запрос вернёт новый объект `AggregateFunctionState`. - -1. Пользователь может получать веса созданной модели для своих целей без сохранения модели, если не использовать комбинатор `-State`. - - - - ``` sql - SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data - ``` - -Подобный запрос строит модель и возвращает её веса, отвечающие параметрам моделей и смещение. Таким образом, в приведенном выше примере запрос вернет столбец с тремя значениями. - -**Смотрите также** - -- [stochasticLogisticRegression](#agg_functions-stochasticlogisticregression) -- [Отличие линейной от логистической регрессии.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) - -## stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} - -Функция реализует стохастическую логистическую регрессию. Её можно использовать для задачи бинарной классификации, функция поддерживает те же пользовательские параметры, что и stochasticLinearRegression и работает таким же образом. - -### Параметры {#agg_functions-stochasticlogisticregression-parameters} - -Параметры те же, что и в stochasticLinearRegression: -`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. -Смотрите раздел [parameters](#agg_functions-stochasticlinearregression-parameters). - -``` text -stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') -``` - -1. Построение модели - - - -Смотрите раздел `Построение модели` в описании [stochasticLinearRegression](#stochasticlinearregression-usage-fitting) . - - Прогнозируемые метки должны быть в диапазоне \[-1, 1\]. - -1. Прогнозирование - - - -Используя сохраненное состояние, можно предсказать вероятность наличия у объекта метки `1`. - - ``` sql - WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) FROM test_data - ``` - -Запрос возвращает столбец вероятностей. Обратите внимание, что первый аргумент `evalMLMethod` это объект `AggregateFunctionState`, далее идут столбцы свойств. - -Мы также можем установить границу вероятности, которая присваивает элементам различные метки. - - ``` sql - SELECT ans < 1.1 AND ans > 0.5 FROM - (WITH (SELECT state FROM your_model) AS model SELECT - evalMLMethod(model, param1, param2) AS ans FROM test_data) - ``` - -Тогда результатом будут метки. - -`test_data` — это таблица, подобная `train_data`, но при этом может не содержать целевое значение. - -**Смотрите также** - -- [stochasticLinearRegression](#agg_functions-stochasticlinearregression) -- [Отличие линейной от логистической регрессии](https://moredez.ru/q/51225972/) - -[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/agg_functions/reference/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/any.md b/docs/ru/sql-reference/aggregate-functions/reference/any.md new file mode 100644 index 00000000000..38c412813ab --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/any.md @@ -0,0 +1,15 @@ +--- +toc_priority: 6 +--- + +# any {#agg_function-any} + +Выбирает первое попавшееся значение. +Порядок выполнения запроса может быть произвольным и даже каждый раз разным, поэтому результат данной функции недетерминирован. +Для получения детерминированного результата, можно использовать функции min или max вместо any. + +В некоторых случаях, вы всё-таки можете рассчитывать на порядок выполнения запроса. Это - случаи, когда SELECT идёт из подзапроса, в котором используется ORDER BY. + +При наличии в запросе `SELECT` секции `GROUP BY` или хотя бы одной агрегатной функции, ClickHouse (в отличие от, например, MySQL) требует, чтобы все выражения в секциях `SELECT`, `HAVING`, `ORDER BY` вычислялись из ключей или из агрегатных функций. То есть, каждый выбираемый из таблицы столбец, должен использоваться либо в ключах, либо внутри агрегатных функций. Чтобы получить поведение, как в MySQL, вы можете поместить остальные столбцы в агрегатную функцию `any`. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/any/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md new file mode 100644 index 00000000000..19fda7f64b7 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/anyheavy.md @@ -0,0 +1,32 @@ +--- +toc_priority: 103 +--- + +# anyHeavy {#anyheavyx} + +Выбирает часто встречающееся значение с помощью алгоритма «[heavy hitters](http://www.cs.umd.edu/~samir/498/karp.pdf)». Если существует значение, которое встречается чаще, чем в половине случаев, в каждом потоке выполнения запроса, то возвращается данное значение. В общем случае, результат недетерминирован. + +``` sql +anyHeavy(column) +``` + +**Аргументы** + +- `column` — имя столбца. + +**Пример** + +Возьмём набор данных [OnTime](../../../getting-started/example-datasets/ontime.md) и выберем произвольное часто встречающееся значение в столбце `AirlineID`. + +``` sql +SELECT anyHeavy(AirlineID) AS res +FROM ontime +``` + +``` text +┌───res─┐ +│ 19690 │ +└───────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anyheavy/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/anylast.md b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md new file mode 100644 index 00000000000..da68c926d43 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/anylast.md @@ -0,0 +1,10 @@ +--- +toc_priority: 104 +--- + +## anyLast {#anylastx} + +Выбирает последнее попавшееся значение. +Результат так же недетерминирован, как и для функции [any](../../../sql-reference/aggregate-functions/reference/any.md). + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/anylast/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmax.md b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md new file mode 100644 index 00000000000..97edd5773c8 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmax.md @@ -0,0 +1,11 @@ +--- +toc_priority: 106 +--- + +# argMax {#agg-function-argmax} + +Синтаксис: `argMax(arg, val)` + +Вычисляет значение arg при максимальном значении val. Если есть несколько разных значений arg для максимальных значений val, то выдаётся первое попавшееся из таких значений. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmax/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/argmin.md b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md new file mode 100644 index 00000000000..58161cd226a --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/argmin.md @@ -0,0 +1,31 @@ +--- +toc_priority: 105 +--- + +# argMin {#agg-function-argmin} + +Синтаксис: `argMin(arg, val)` + +Вычисляет значение arg при минимальном значении val. Если есть несколько разных значений arg для минимальных значений val, то выдаётся первое попавшееся из таких значений. + +**Пример:** + +``` text +┌─user─────┬─salary─┐ +│ director │ 5000 │ +│ manager │ 3000 │ +│ worker │ 1000 │ +└──────────┴────────┘ +``` + +``` sql +SELECT argMin(user, salary) FROM salary +``` + +``` text +┌─argMin(user, salary)─┐ +│ worker │ +└──────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/argmin/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avg.md b/docs/ru/sql-reference/aggregate-functions/reference/avg.md new file mode 100644 index 00000000000..b0bee64ec66 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/avg.md @@ -0,0 +1,11 @@ +--- +toc_priority: 5 +--- + +# avg {#agg_function-avg} + +Вычисляет среднее. +Работает только для чисел. +Результат всегда Float64. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/avg/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md new file mode 100644 index 00000000000..72e6ca5c88c --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/avgweighted.md @@ -0,0 +1,46 @@ +--- +toc_priority: 107 +--- + +# avgWeighted {#avgweighted} + +Вычисляет [среднее арифметическое взвешенное](https://ru.wikipedia.org/wiki/Среднее_арифметическое_взвешенное). + +**Синтаксис** + +``` sql +avgWeighted(x, weight) +``` + +**Параметры** + +- `x` — Значения. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). +- `weight` — Веса отдельных значений. [Целые числа](../../../sql-reference/data-types/int-uint.md) или [числа с плавающей запятой](../../../sql-reference/data-types/float.md). + +Типы параметров должны совпадать. + +**Возвращаемое значение** + +- Среднее арифметическое взвешенное. +- `NaN`, если все веса равны 0. + +Тип: [Float64](../../../sql-reference/data-types/float.md) + +**Пример** + +Запрос: + +``` sql +SELECT avgWeighted(x, w) +FROM values('x Int8, w Int8', (4, 1), (1, 0), (10, 2)) +``` + +Результат: + +``` text +┌─avgWeighted(x, weight)─┐ +│ 8 │ +└────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/avgweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/corr.md b/docs/ru/sql-reference/aggregate-functions/reference/corr.md new file mode 100644 index 00000000000..6d631241f6a --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/corr.md @@ -0,0 +1,14 @@ +--- +toc_priority: 107 +--- + +# corr {#corrx-y} + +Синтаксис: `corr(x, y)` + +Вычисляет коэффициент корреляции Пирсона: `Σ((x - x̅)(y - y̅)) / sqrt(Σ((x - x̅)^2) * Σ((y - y̅)^2))`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `corrStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/corr/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/count.md b/docs/ru/sql-reference/aggregate-functions/reference/count.md new file mode 100644 index 00000000000..68f7d3dcc6b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/count.md @@ -0,0 +1,72 @@ +--- +toc_priority: 1 +--- + +# count {#agg_function-count} + +Вычисляет количество строк или не NULL значений . + +ClickHouse поддерживает следующие виды синтаксиса для `count`: + +- `count(expr)` или `COUNT(DISTINCT expr)`. +- `count()` или `COUNT(*)`. Синтаксис `count()` специфичен для ClickHouse. + +**Параметры** + +Функция может принимать: + +- Ноль параметров. +- Одно [выражение](../../syntax.md#syntax-expressions). + +**Возвращаемое значение** + +- Если функция вызывается без параметров, она вычисляет количество строк. +- Если передаётся [выражение](../../syntax.md#syntax-expressions) , то функция вычисляет количество раз, когда выражение возвращает не NULL. Если выражение возвращает значение типа [Nullable](../../../sql-reference/data-types/nullable.md), то результат `count` не становится `Nullable`. Функция возвращает 0, если выражение возвращает `NULL` для всех строк. + +В обоих случаях тип возвращаемого значения [UInt64](../../../sql-reference/data-types/int-uint.md). + +**Подробности** + +ClickHouse поддерживает синтаксис `COUNT(DISTINCT ...)`. Поведение этой конструкции зависит от настройки [count\_distinct\_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation). Она определяет, какая из функций [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) используется для выполнения операции. По умолчанию — функция [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact). + +Запрос `SELECT count() FROM table` не оптимизирован, поскольку количество записей в таблице не хранится отдельно. Он выбирает небольшой столбец из таблицы и подсчитывает количество значений в нём. + +**Примеры** + +Пример 1: + +``` sql +SELECT count() FROM t +``` + +``` text +┌─count()─┐ +│ 5 │ +└─────────┘ +``` + +Пример 2: + +``` sql +SELECT name, value FROM system.settings WHERE name = 'count_distinct_implementation' +``` + +``` text +┌─name──────────────────────────┬─value─────┐ +│ count_distinct_implementation │ uniqExact │ +└───────────────────────────────┴───────────┘ +``` + +``` sql +SELECT count(DISTINCT num) FROM t +``` + +``` text +┌─uniqExact(num)─┐ +│ 3 │ +└────────────────┘ +``` + +Этот пример показывает, что `count(DISTINCT num)` выполняется с помощью функции `uniqExact` в соответствии со значением настройки `count_distinct_implementation`. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/count/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md new file mode 100644 index 00000000000..e30b19924f9 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarpop.md @@ -0,0 +1,14 @@ +--- +toc_priority: 36 +--- + +# covarPop {#covarpop} + +Синтаксис: `covarPop(x, y)` + +Вычисляет величину `Σ((x - x̅)(y - y̅)) / n`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md new file mode 100644 index 00000000000..7fa9a1d3f2c --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/covarsamp.md @@ -0,0 +1,16 @@ +--- +toc_priority: 37 +--- + +# covarSamp {#covarsamp} + +Синтаксис: `covarSamp(x, y)` + +Вычисляет величину `Σ((x - x̅)(y - y̅)) / (n - 1)`. + +Возвращает Float64. В случае, когда `n <= 1`, возвращается +∞. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `covarSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/covarsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md new file mode 100644 index 00000000000..7640795fc51 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparray.md @@ -0,0 +1,17 @@ +--- +toc_priority: 110 +--- + +# groupArray {#agg_function-grouparray} + +Синтаксис: `groupArray(x)` или `groupArray(max_size)(x)` + +Составляет массив из значений аргумента. +Значения в массив могут быть добавлены в любом (недетерминированном) порядке. + +Вторая версия (с параметром `max_size`) ограничивает размер результирующего массива `max_size` элементами. +Например, `groupArray(1)(x)` эквивалентно `[any(x)]`. + +В некоторых случаях, вы всё же можете рассчитывать на порядок выполнения запроса. Это — случаи, когда `SELECT` идёт из подзапроса, в котором используется `ORDER BY`. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparray/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md new file mode 100644 index 00000000000..d13c056f5c0 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparrayinsertat.md @@ -0,0 +1,93 @@ +--- +toc_priority: 112 +--- + +# groupArrayInsertAt {#grouparrayinsertat} + +Вставляет значение в заданную позицию массива. + +**Синтаксис** + +```sql +groupArrayInsertAt(default_x, size)(x, pos); +``` + +Если запрос вставляет вставляется несколько значений в одну и ту же позицию, то функция ведет себя следующим образом: + +- Если запрос выполняется в одном потоке, то используется первое из вставляемых значений. +- Если запрос выполняется в нескольких потоках, то в результирующем массиве может оказаться любое из вставляемых значений. + +**Параметры** + +- `x` — Значение, которое будет вставлено. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение одного из [поддерживаемых типов данных](../../../sql-reference/data-types/index.md#data_types). +- `pos` — Позиция, в которую вставляется заданный элемент `x`. Нумерация индексов в массиве начинается с нуля. [UInt32](../../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-int8-int16-int32-int64). +- `default_x` — Значение по умолчанию для подстановки на пустые позиции. Опциональный параметр. [Выражение](../../syntax.md#syntax-expressions), возвращающее значение с типом параметра `x`. Если `default_x` не определен, используются [значения по умолчанию](../../../sql-reference/statements/create.md#create-default-values). +- `size`— Длина результирующего массива. Опциональный параметр. При использовании этого параметра должно быть указано значение по умолчанию `default_x`. [UInt32](../../../sql-reference/data-types/int-uint.md#uint-ranges). + +**Возвращаемое значение** + +- Массив со вставленными значениями. + +Тип: [Array](../../../sql-reference/data-types/array.md#data-type-array). + +**Примеры** + +Запрос: + +```sql +SELECT groupArrayInsertAt(toString(number), number * 2) FROM numbers(5); +``` + +Результат: + +```text +┌─groupArrayInsertAt(toString(number), multiply(number, 2))─┐ +│ ['0','','1','','2','','3','','4'] │ +└───────────────────────────────────────────────────────────┘ +``` + +Запрос: + +```sql +SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM numbers(5); +``` + +Результат: + +```text +┌─groupArrayInsertAt('-')(toString(number), multiply(number, 2))─┐ +│ ['0','-','1','-','2','-','3','-','4'] │ +└────────────────────────────────────────────────────────────────┘ +``` + +Запрос: + +```sql +SELECT groupArrayInsertAt('-', 5)(toString(number), number * 2) FROM numbers(5); +``` + +Результат: + +```text +┌─groupArrayInsertAt('-', 5)(toString(number), multiply(number, 2))─┐ +│ ['0','-','1','-','2'] │ +└───────────────────────────────────────────────────────────────────┘ +``` + +Многопоточная вставка элементов в одну позицию. + +Запрос: + +```sql +SELECT groupArrayInsertAt(number, 0) FROM numbers_mt(10) SETTINGS max_block_size = 1; +``` + +В результат этого запроса мы получите случайное целое число в диапазоне `[0,9]`. Например: + +```text +┌─groupArrayInsertAt(number, 0)─┐ +│ [7] │ +└───────────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md new file mode 100644 index 00000000000..6307189c440 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingavg.md @@ -0,0 +1,78 @@ +--- +toc_priority: 114 +--- + +# groupArrayMovingAvg {#agg_function-grouparraymovingavg} + +Вычисляет скользящее среднее для входных значений. + + groupArrayMovingAvg(numbers_for_summing) + groupArrayMovingAvg(window_size)(numbers_for_summing) + +Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. + +**Параметры** + +- `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа. +- `window_size` — размер окна. + +**Возвращаемые значения** + +- Массив того же размера и типа, что и входные данные. + +Функция использует [округление к меньшему по модулю](https://ru.wikipedia.org/wiki/Округление#Методы). Оно усекает десятичные разряды, незначимые для результирующего типа данных. + +**Пример** + +Таблица с исходными данными: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +Запросы: + +``` sql +SELECT + groupArrayMovingAvg(int) AS I, + groupArrayMovingAvg(float) AS F, + groupArrayMovingAvg(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F───────────────────────────────────┬─D─────────────────────┐ +│ [0,0,1,3] │ [0.275,0.82500005,1.9250001,3.8675] │ [0.27,0.82,1.92,3.86] │ +└───────────┴─────────────────────────────────────┴───────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingAvg(2)(int) AS I, + groupArrayMovingAvg(2)(float) AS F, + groupArrayMovingAvg(2)(dec) AS D +FROM t +``` + +``` text +┌─I─────────┬─F────────────────────────────────┬─D─────────────────────┐ +│ [0,1,3,5] │ [0.55,1.6500001,3.3000002,6.085] │ [0.55,1.65,3.30,6.08] │ +└───────────┴──────────────────────────────────┴───────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md new file mode 100644 index 00000000000..c95f1b0b0eb --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/grouparraymovingsum.md @@ -0,0 +1,78 @@ +--- +toc_priority: 113 +--- + +# groupArrayMovingSum {#agg_function-grouparraymovingsum} + +Вычисляет скользящую сумму входных значений. + +``` sql +groupArrayMovingSum(numbers_for_summing) +groupArrayMovingSum(window_size)(numbers_for_summing) +``` + +Функция может принимать размер окна в качестве параметра. Если окно не указано, то функция использует размер окна, равный количеству строк в столбце. + +**Параметры** + +- `numbers_for_summing` — [выражение](../../syntax.md#syntax-expressions), возвращающее значение числового типа. +- `window_size` — размер окна. + +**Возвращаемые значения** + +- Массив того же размера и типа, что и входные данные. + +**Пример** + +Таблица с исходными данными: + +``` sql +CREATE TABLE t +( + `int` UInt8, + `float` Float32, + `dec` Decimal32(2) +) +ENGINE = TinyLog +``` + +``` text +┌─int─┬─float─┬──dec─┐ +│ 1 │ 1.1 │ 1.10 │ +│ 2 │ 2.2 │ 2.20 │ +│ 4 │ 4.4 │ 4.40 │ +│ 7 │ 7.77 │ 7.77 │ +└─────┴───────┴──────┘ +``` + +Запросы: + +``` sql +SELECT + groupArrayMovingSum(int) AS I, + groupArrayMovingSum(float) AS F, + groupArrayMovingSum(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,7,14] │ [1.1,3.3000002,7.7000003,15.47] │ [1.10,3.30,7.70,15.47] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +``` sql +SELECT + groupArrayMovingSum(2)(int) AS I, + groupArrayMovingSum(2)(float) AS F, + groupArrayMovingSum(2)(dec) AS D +FROM t +``` + +``` text +┌─I──────────┬─F───────────────────────────────┬─D──────────────────────┐ +│ [1,3,6,11] │ [1.1,3.3000002,6.6000004,12.17] │ [1.10,3.30,6.60,12.17] │ +└────────────┴─────────────────────────────────┴────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md new file mode 100644 index 00000000000..03aff64fecf --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitand.md @@ -0,0 +1,48 @@ +--- +toc_priority: 125 +--- + +# groupBitAnd {#groupbitand} + +Применяет побитовое `И` для последовательности чисел. + +``` sql +groupBitAnd(expr) +``` + +**Параметры** + +`expr` – выражение, результат которого имеет тип данных `UInt*`. + +**Возвращаемое значение** + +Значение типа `UInt*`. + +**Пример** + +Тестовые данные: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Запрос: + +``` sql +SELECT groupBitAnd(num) FROM t +``` + +Где `num` — столбец с тестовыми данными. + +Результат: + +``` text +binary decimal +00000100 = 4 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitand/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md new file mode 100644 index 00000000000..c01636e155d --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitmap.md @@ -0,0 +1,46 @@ +--- +toc_priority: 128 +--- + +# groupBitmap {#groupbitmap} + +Bitmap или агрегатные вычисления для столбца с типом данных `UInt*`, возвращают кардинальность в виде значения типа UInt64, если добавить суффикс -State, то возвращают [объект bitmap](../../../sql-reference/functions/bitmap-functions.md). + +``` sql +groupBitmap(expr) +``` + +**Параметры** + +`expr` – выражение, результат которого имеет тип данных `UInt*`. + +**Возвращаемое значение** + +Значение типа `UInt64`. + +**Пример** + +Тестовые данные: + +``` text +UserID +1 +1 +2 +3 +``` + +Запрос: + +``` sql +SELECT groupBitmap(UserID) as num FROM t +``` + +Результат: + +``` text +num +3 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitmap/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md new file mode 100644 index 00000000000..e1afced014f --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitor.md @@ -0,0 +1,48 @@ +--- +toc_priority: 126 +--- + +# groupBitOr {#groupbitor} + +Применяет побитовое `ИЛИ` для последовательности чисел. + +``` sql +groupBitOr(expr) +``` + +**Параметры** + +`expr` – выражение, результат которого имеет тип данных `UInt*`. + +**Возвращаемое значение** + +Значение типа `UInt*`. + +**Пример** + +Тестовые данные: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Запрос: + +``` sql +SELECT groupBitOr(num) FROM t +``` + +Где `num` — столбец с тестовыми данными. + +Результат: + +``` text +binary decimal +01111101 = 125 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitor/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md new file mode 100644 index 00000000000..a80f86b2a5f --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupbitxor.md @@ -0,0 +1,48 @@ +--- +toc_priority: 127 +--- + +# groupBitXor {#groupbitxor} + +Применяет побитовое `ИСКЛЮЧАЮЩЕЕ ИЛИ` для последовательности чисел. + +``` sql +groupBitXor(expr) +``` + +**Параметры** + +`expr` – выражение, результат которого имеет тип данных `UInt*`. + +**Возвращаемое значение** + +Значение типа `UInt*`. + +**Пример** + +Тестовые данные: + +``` text +binary decimal +00101100 = 44 +00011100 = 28 +00001101 = 13 +01010101 = 85 +``` + +Запрос: + +``` sql +SELECT groupBitXor(num) FROM t +``` + +Где `num` — столбец с тестовыми данными. + +Результат: + +``` text +binary decimal +01101000 = 104 +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupbitxor/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md new file mode 100644 index 00000000000..cecc63aef22 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/groupuniqarray.md @@ -0,0 +1,13 @@ +--- +toc_priority: 111 +--- + +# groupUniqArray {#groupuniqarray} + +Синтаксис: `groupUniqArray(x)` или `groupUniqArray(max_size)(x)` + +Составляет массив из различных значений аргумента. Расход оперативной памяти такой же, как у функции `uniqExact`. + +Функция `groupUniqArray(max_size)(x)` ограничивает размер результирующего массива до `max_size` элементов. Например, `groupUniqArray(1)(x)` равнозначно `[any(x)]`. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/index.md b/docs/ru/sql-reference/aggregate-functions/reference/index.md new file mode 100644 index 00000000000..e621e68e8f2 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/index.md @@ -0,0 +1,68 @@ +--- +toc_folder_title: "\u0421\u043f\u0440\u0430\u0432\u043e\u0447\u043d\u0438\u043a" +toc_priority: 36 +toc_hidden: true +--- + +# Перечень агрегатных функций {#aggregate-functions-list} + +Стандартные агрегатные функции: + +- [count](../../../sql-reference/aggregate-functions/reference/count.md) +- [min](../../../sql-reference/aggregate-functions/reference/min.md) +- [max](../../../sql-reference/aggregate-functions/reference/max.md) +- [sum](../../../sql-reference/aggregate-functions/reference/sum.md) +- [avg](../../../sql-reference/aggregate-functions/reference/avg.md) +- [any](../../../sql-reference/aggregate-functions/reference/any.md) +- [stddevPop](../../../sql-reference/aggregate-functions/reference/stddevpop.md) +- [stddevSamp](../../../sql-reference/aggregate-functions/reference/stddevsamp.md) +- [varPop](../../../sql-reference/aggregate-functions/reference/varpop.md) +- [varSamp](../../../sql-reference/aggregate-functions/reference/varsamp.md) +- [covarPop](../../../sql-reference/aggregate-functions/reference/covarpop.md) +- [covarSamp](../../../sql-reference/aggregate-functions/reference/covarsamp.md) + +Агрегатные функции, специфичные для ClickHouse: + +- [anyHeavy](../../../sql-reference/aggregate-functions/reference/anyheavy.md) +- [anyLast](../../../sql-reference/aggregate-functions/reference/anylast.md) +- [argMin](../../../sql-reference/aggregate-functions/reference/argmin.md) +- [argMax](../../../sql-reference/aggregate-functions/reference/argmax.md) +- [avgWeighted](../../../sql-reference/aggregate-functions/reference/avgweighted.md) +- [topK](../../../sql-reference/aggregate-functions/reference/topk.md) +- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md) +- [groupArray](../../../sql-reference/aggregate-functions/reference/grouparray.md) +- [groupUniqArray](../../../sql-reference/aggregate-functions/reference/groupuniqarray.md) +- [groupArrayInsertAt](../../../sql-reference/aggregate-functions/reference/grouparrayinsertat.md) +- [groupArrayMovingAvg](../../../sql-reference/aggregate-functions/reference/grouparraymovingavg.md) +- [groupArrayMovingSum](../../../sql-reference/aggregate-functions/reference/grouparraymovingsum.md) +- [groupBitAnd](../../../sql-reference/aggregate-functions/reference/groupbitand.md) +- [groupBitOr](../../../sql-reference/aggregate-functions/reference/groupbitor.md) +- [groupBitXor](../../../sql-reference/aggregate-functions/reference/groupbitxor.md) +- [groupBitmap](../../../sql-reference/aggregate-functions/reference/groupbitmap.md) +- [sumWithOverflow](../../../sql-reference/aggregate-functions/reference/sumwithoverflow.md) +- [sumMap](../../../sql-reference/aggregate-functions/reference/summap.md) +- [skewSamp](../../../sql-reference/aggregate-functions/reference/skewsamp.md) +- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md) +- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md) +- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md) +- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md) +- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md) +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md) +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md) +- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md) +- [quantile](../../../sql-reference/aggregate-functions/reference/quantile.md) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md) +- [quantileExact](../../../sql-reference/aggregate-functions/reference/quantileexact.md) +- [quantileExactWeighted](../../../sql-reference/aggregate-functions/reference/quantileexactweighted.md) +- [quantileTiming](../../../sql-reference/aggregate-functions/reference/quantiletiming.md) +- [quantileTimingWeighted](../../../sql-reference/aggregate-functions/reference/quantiletimingweighted.md) +- [quantileDeterministic](../../../sql-reference/aggregate-functions/reference/quantiledeterministic.md) +- [quantileTDigest](../../../sql-reference/aggregate-functions/reference/quantiletdigest.md) +- [quantileTDigestWeighted](../../../sql-reference/aggregate-functions/reference/quantiletdigestweighted.md) +- [simpleLinearRegression](../../../sql-reference/aggregate-functions/reference/simplelinearregression.md) +- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md) +- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md) + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/aggregate-functions/reference) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md new file mode 100644 index 00000000000..a00dae51ed6 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtpop.md @@ -0,0 +1,27 @@ +--- +toc_priority: 153 +--- + +# kurtPop {#kurtpop} + +Вычисляет [коэффициент эксцесса](https://ru.wikipedia.org/wiki/Коэффициент_эксцесса) последовательности. + +``` sql +kurtPop(expr) +``` + +**Параметры** + +`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. + +**Возвращаемое значение** + +Коэффициент эксцесса заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md) + +**Пример** + +``` sql +SELECT kurtPop(value) FROM series_with_value_column +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md new file mode 100644 index 00000000000..379d74ec0c3 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/kurtsamp.md @@ -0,0 +1,29 @@ +--- +toc_priority: 154 +--- + +# kurtSamp {#kurtsamp} + +Вычисляет [выборочный коэффициент эксцесса](https://ru.wikipedia.org/wiki/Статистика_(функция_выборки)) для последовательности. + +Он представляет собой несмещенную оценку эксцесса случайной величины, если переданные значения образуют ее выборку. + +``` sql +kurtSamp(expr) +``` + +**Параметры** + +`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. + +**Возвращаемое значение** + +Коэффициент эксцесса заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. + +**Пример** + +``` sql +SELECT kurtSamp(value) FROM series_with_value_column +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/max.md b/docs/ru/sql-reference/aggregate-functions/reference/max.md new file mode 100644 index 00000000000..4ee577471ea --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/max.md @@ -0,0 +1,9 @@ +--- +toc_priority: 3 +--- + +# max {#agg_function-max} + +Вычисляет максимум. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/max/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/median.md b/docs/ru/sql-reference/aggregate-functions/reference/median.md new file mode 100644 index 00000000000..803b2309665 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/median.md @@ -0,0 +1,43 @@ +# median {#median} + +Функции `median*` — алиасы для соответствущих функций `quantile*`. Они вычисляют медиану числовой последовательности. + +Functions: + +- `median` — алиас [quantile](#quantile). +- `medianDeterministic` — алиас [quantileDeterministic](#quantiledeterministic). +- `medianExact` — алиас [quantileExact](#quantileexact). +- `medianExactWeighted` — алиас [quantileExactWeighted](#quantileexactweighted). +- `medianTiming` — алиас [quantileTiming](#quantiletiming). +- `medianTimingWeighted` — алиас [quantileTimingWeighted](#quantiletimingweighted). +- `medianTDigest` — алиас [quantileTDigest](#quantiletdigest). +- `medianTDigestWeighted` — алиас [quantileTDigestWeighted](#quantiletdigestweighted). + +**Пример** + +Входная таблица: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Запрос: + +``` sql +SELECT medianDeterministic(val, 1) FROM t +``` + +Результат: + +``` text +┌─medianDeterministic(val, 1)─┐ +│ 1.5 │ +└─────────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/median/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/min.md b/docs/ru/sql-reference/aggregate-functions/reference/min.md new file mode 100644 index 00000000000..7b56de3aed4 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/min.md @@ -0,0 +1,9 @@ +--- +toc_priority: 2 +--- + +## min {#agg_function-min} + +Вычисляет минимум. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/min/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantile.md b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md new file mode 100644 index 00000000000..10fec16ab94 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantile.md @@ -0,0 +1,68 @@ +--- +toc_priority: 200 +--- + +# quantile {#quantile} + +Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Функция использует алгоритм [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) с размером резервуара до 8192 и случайным генератором чисел для для сэмплирования. Результат не детерминирован. Чтобы получить точную квантиль используйте функцию [quantileExact](#quantileexact). + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantile(level)(expr) +``` + +Алиас: `median`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемое значение** + +- Приблизительный квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Входная таблица: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Запрос: + +``` sql +SELECT quantile(val) FROM t +``` + +Результат: + +``` text +┌─quantile(val)─┐ +│ 1.5 │ +└───────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantile/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md new file mode 100644 index 00000000000..fdbcda821f6 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiledeterministic.md @@ -0,0 +1,68 @@ +--- +toc_priority: 206 +--- + +# quantileDeterministic {#quantiledeterministic} + +Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Функция использует алгоритм [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) с размером резервуара до 8192 и детерминированным алгоритмом сэмплирования. Результат детерминирован. Чтобы получить точную квантиль используйте функцию [quantileExact](#quantileexact). + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileDeterministic(level)(expr, determinator) +``` + +Алиас: `medianDeterministic`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `determinator` — Число, хэш которого используется при сэмплировании в алгоритме reservoir sampling, чтобы сделать результат детерминированным. В качестве детерминатора можно использовать любое определённое положительное число, например, идентификатор пользователя или события. Если одно и то же значение детерминатора попадается в выборке слишком часто, то функция выдаёт некорректный результат. + +**Возвращаемое значение** + +- Приблизительный квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. +**Пример** + +Входная таблица: + +``` text +┌─val─┐ +│ 1 │ +│ 1 │ +│ 2 │ +│ 3 │ +└─────┘ +``` + +Запрос: + +``` sql +SELECT quantileDeterministic(val, 1) FROM t +``` + +Результат: + +``` text +┌─quantileDeterministic(val, 1)─┐ +│ 1.5 │ +└───────────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/qurntiledeterministic/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md new file mode 100644 index 00000000000..49415d96af9 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexact.md @@ -0,0 +1,56 @@ +--- +toc_priority: 202 +--- + +# quantileExact {#quantileexact} + +Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Таким образом, функция потребляет объем памяти `O(n)`, где `n` — количество переданных значений. Для небольшого числа значений эта функция эффективна. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileExact(level)(expr) +``` + +Алиас: `medianExact`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемое значение** + +- Квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. +**Пример** + +Запрос: + +``` sql +SELECT quantileExact(number) FROM numbers(10) +``` + +Результат: + +``` text +┌─quantileExact(number)─┐ +│ 5 │ +└───────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantileexact/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md new file mode 100644 index 00000000000..f6982d4566f --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantileexactweighted.md @@ -0,0 +1,69 @@ +--- +toc_priority: 203 +--- + +# quantileExactWeighted {#quantileexactweighted} + +Точно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, учитывая вес каждого её элемента. + +Чтобы получить точный результат, все переданные значения собираются в массив, который затем частично сортируется. Для каждого значения учитывается его вес (количество значений в выборке). В алгоритме используется хэш-таблица. Таким образом, если переданные значения часто повторяются, функция потребляет меньше оперативной памяти, чем [quantileExact](#quantileexact). Эту функцию можно использовать вместо `quantileExact` если указать вес 1. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileExactWeighted(level)(expr, weight) +``` + +Алиас: `medianExactWeighted`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. + +**Возвращаемое значение** + +- Quantile of the specified level. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Входная таблица: + +``` text +┌─n─┬─val─┐ +│ 0 │ 3 │ +│ 1 │ 2 │ +│ 2 │ 1 │ +│ 5 │ 4 │ +└───┴─────┘ +``` + +Запрос: + +``` sql +SELECT quantileExactWeighted(n, val) FROM t +``` + +Результат: + +``` text +┌─quantileExactWeighted(n, val)─┐ +│ 1 │ +└───────────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantileexactweited/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md new file mode 100644 index 00000000000..82e806b67fa --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiles.md @@ -0,0 +1,11 @@ +--- +toc_priority: 201 +--- + +# quantiles {#quantiles} + +Syntax: `quantiles(level1, level2, …)(x)` + +All the quantile functions also have corresponding quantiles functions: `quantiles`, `quantilesDeterministic`, `quantilesTiming`, `quantilesTimingWeighted`, `quantilesExact`, `quantilesExactWeighted`, `quantilesTDigest`. These functions calculate all the quantiles of the listed levels in one pass, and return an array of the resulting values. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiles/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md new file mode 100644 index 00000000000..f372e308e73 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigest.md @@ -0,0 +1,59 @@ +--- +toc_priority: 207 +--- + +# quantileTDigest {#quantiletdigest} + +Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, используя алгоритм [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf). + +Максимальная ошибка 1%. Потребление памяти — `log(n)`, где `n` — число значений. Результат не детерминирован и зависит от порядка выполнения запроса. + +Производительность функции ниже, чем производительность функции [quantile](#quantile) или [quantileTiming](#quantiletiming). По соотношению размера состояния к точности вычисления, эта функция значительно превосходит `quantile`. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileTDigest(level)(expr) +``` + +Алиас: `medianTDigest`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). + +**Возвращаемое значение** + +- Приблизительную квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +SELECT quantileTDigest(number) FROM numbers(10) +``` + +Результат: + +``` text +┌─quantileTDigest(number)─┐ +│ 4.5 │ +└─────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/qurntiledigest/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md new file mode 100644 index 00000000000..b6dd846967b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md @@ -0,0 +1,60 @@ +--- +toc_priority: 208 +--- + +# quantileTDigestWeighted {#quantiletdigestweighted} + +Приблизительно вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, используя алгоритм [t-digest](https://github.com/tdunning/t-digest/blob/master/docs/t-digest-paper/histo.pdf). Функция учитывает вес каждого элемента последовательности. + +Максимальная ошибка 1%. Потребление памяти — `log(n)`, где `n` — число значений. Результат не детерминирован и зависит от порядка выполнения запроса. + +Производительность функции ниже, чем производительность функции [quantile](#quantile) или [quantileTiming](#quantiletiming). По соотношению размера состояния к точности вычисления, эта функция значительно превосходит `quantile`. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileTDigestWeighted(level)(expr, weight) +``` + +Алиас: `medianTDigest`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). +- `expr` — Выражение над значениями столбца, которое возвращает данные [числовых типов](../../../sql-reference/data-types/index.md#data_types) или типов [Date](../../../sql-reference/data-types/date.md), [DateTime](../../../sql-reference/data-types/datetime.md). +- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. + +**Возвращаемое значение** + +- Приблизительный квантиль заданного уровня. + +Тип: + +- [Float64](../../../sql-reference/data-types/float.md) для входных данных числового типа. +- [Date](../../../sql-reference/data-types/date.md), если входные значения имеют тип `Date`. +- [DateTime](../../../sql-reference/data-types/datetime.md), если входные значения имеют тип `DateTime`. + +**Пример** + +Запрос: + +``` sql +SELECT quantileTDigestWeighted(number, 1) FROM numbers(10) +``` + +Результат: + +``` text +┌─quantileTDigestWeighted(number, 1)─┐ +│ 4.5 │ +└────────────────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiledigestweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md new file mode 100644 index 00000000000..32e5e6ce31b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletiming.md @@ -0,0 +1,88 @@ +--- +toc_priority: 204 +--- + +# quantileTiming {#quantiletiming} + +Вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности с детерминированной точностью. + +Результат детерминирован (не зависит от порядка обработки запроса). Функция оптимизирована для работы с последовательностями, описывающими такие распределения, как время загрузки веб-страниц или время отклика бэкенда. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileTiming(level)(expr) +``` + +Алиас: `medianTiming`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). + +- `expr` — [Выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). + + - Если в функцию передать отрицательные значения, то её поведение не определено. + - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. + +**Точность** + +Вычисления точны при соблюдении следующих условий: + +- Размер выборки не превышает 5670 элементов. +- Размер выборки превышает 5670 элементов, но значение каждого элемента не больше 1024. + +В противном случае, результат вычисления округляется до ближайшего множителя числа 16. + +!!! note "Примечание" + Для указанного типа последовательностей функция производительнее и точнее, чем [quantile](#quantile). + +**Возвращаемое значение** + +- Квантиль заданного уровня. + +Тип: `Float32`. + +!!! note "Примечания" + Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../../sql-reference/data-types/float.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../../sql-reference/statements/select/order-by.md#select-order-by). + +**Пример** + +Входная таблица: + +``` text +┌─response_time─┐ +│ 72 │ +│ 112 │ +│ 126 │ +│ 145 │ +│ 104 │ +│ 242 │ +│ 313 │ +│ 168 │ +│ 108 │ +└───────────────┘ +``` + +Запрос: + +``` sql +SELECT quantileTiming(response_time) FROM t +``` + +Результат: + +``` text +┌─quantileTiming(response_time)─┐ +│ 126 │ +└───────────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiletiming/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md new file mode 100644 index 00000000000..4a7fcc666d5 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/quantiletimingweighted.md @@ -0,0 +1,87 @@ +--- +toc_priority: 205 +--- + +# quantileTimingWeighted {#quantiletimingweighted} + +С детерминированной точностью вычисляет [квантиль](https://ru.wikipedia.org/wiki/Квантиль) числовой последовательности, учитывая вес каждого элемента. + +Результат детерминирован (не зависит от порядка обработки запроса). Функция оптимизирована для работы с последовательностями, описывающими такие распределения, как время загрузки веб-страниц или время отклика бэкенда. + +Внутренние состояния функций `quantile*` не объединяются, если они используются в одном запросе. Если вам необходимо вычислить квантили нескольких уровней, используйте функцию [quantiles](#quantiles), это повысит эффективность запроса. + +**Синтаксис** + +``` sql +quantileTimingWeighted(level)(expr, weight) +``` + +Алиас: `medianTimingWeighted`. + +**Параметры** + +- `level` — Уровень квантили. Опционально. Константное значение с плавающей запятой от 0 до 1. Мы рекомендуем использовать значение `level` из диапазона `[0.01, 0.99]`. Значение по умолчанию: 0.5. При `level=0.5` функция вычисляет [медиану](https://ru.wikipedia.org/wiki/Медиана_(статистика)). + +- `expr` — [Выражение](../../syntax.md#syntax-expressions) над значения столбца, которые возвращают данные типа [Float\*](../../../sql-reference/data-types/float.md). + + - Если в функцию передать отрицательные значения, то её поведение не определено. + - Если значение больше, чем 30 000 (например, время загрузки страницы превышает 30 секунд), то оно приравнивается к 30 000. + +- `weight` — Столбец с весам элементов последовательности. Вес — это количество повторений элемента в последовательности. + +**Точность** + +Вычисления точны при соблюдении следующих условий: + +- Размер выборки не превышает 5670 элементов. +- Размер выборки превышает 5670 элементов, но значение каждого элемента не больше 1024. + +В противном случае, результат вычисления округляется до ближайшего множителя числа 16. + +!!! note "Примечание" + Для указанного типа последовательностей функция производительнее и точнее, чем [quantile](#quantile). + +**Возвращаемое значение** + +- Квантиль заданного уровня. + +Тип: `Float32`. + +!!! note "Примечания" + Если в функцию `quantileTimingIf` не передать значений, то вернётся [NaN](../../../sql-reference/data-types/float.md#data_type-float-nan-inf). Это необходимо для отделения подобных случаев от случаев, когда результат 0. Подробности про сортировку `NaN` cмотрите в разделе [Секция ORDER BY](../../../sql-reference/statements/select/order-by.md#select-order-by). + +**Пример** + +Входная таблица: + +``` text +┌─response_time─┬─weight─┐ +│ 68 │ 1 │ +│ 104 │ 2 │ +│ 112 │ 3 │ +│ 126 │ 2 │ +│ 138 │ 1 │ +│ 162 │ 1 │ +└───────────────┴────────┘ +``` + +Запрос: + +``` sql +SELECT quantileTimingWeighted(response_time, weight) FROM t +``` + +Результат: + +``` text +┌─quantileTimingWeighted(response_time, weight)─┐ +│ 112 │ +└───────────────────────────────────────────────┘ +``` + +**Смотрите также** + +- [median](../../../sql-reference/aggregate-functions/reference/median.md#median) +- [quantiles](../../../sql-reference/aggregate-functions/reference/quantiles.md#quantiles) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/quantiletiming weighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md new file mode 100644 index 00000000000..370b1bde8d2 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/simplelinearregression.md @@ -0,0 +1,44 @@ +--- +toc_priority: 220 +--- + +# simpleLinearRegression {#simplelinearregression} + +Выполняет простую (одномерную) линейную регрессию. + +``` sql +simpleLinearRegression(x, y) +``` + +Параметры: + +- `x` — столбец со значениями зависимой переменной. +- `y` — столбец со значениями наблюдаемой переменной. + +Возвращаемые значения: + +Константы `(a, b)` результирующей прямой `y = a*x + b`. + +**Примеры** + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [0, 1, 2, 3])─┐ +│ (1,0) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +``` sql +SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) +``` + +``` text +┌─arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6])─┐ +│ (1,3) │ +└───────────────────────────────────────────────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md new file mode 100644 index 00000000000..a6dee5dc5ef --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewpop.md @@ -0,0 +1,27 @@ +--- +toc_priority: 150 +--- + +# skewPop {#skewpop} + +Вычисляет [коэффициент асимметрии](https://ru.wikipedia.org/wiki/Коэффициент_асимметрии) для последовательности. + +``` sql +skewPop(expr) +``` + +**Параметры** + +`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. + +**Возвращаемое значение** + +Коэффициент асимметрии заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md) + +**Пример** + +``` sql +SELECT skewPop(value) FROM series_with_value_column +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md new file mode 100644 index 00000000000..171eb5e304a --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/skewsamp.md @@ -0,0 +1,29 @@ +--- +toc_priority: 151 +--- + +# skewSamp {#skewsamp} + +Вычисляет [выборочный коэффициент асимметрии](https://ru.wikipedia.org/wiki/Статистика_(функция_выборки)) для последовательности. + +Он представляет собой несмещенную оценку асимметрии случайной величины, если переданные значения образуют ее выборку. + +``` sql +skewSamp(expr) +``` + +**Параметры** + +`expr` — [Выражение](../../syntax.md#syntax-expressions), возвращающее число. + +**Возвращаемое значение** + +Коэффициент асимметрии заданного распределения. Тип — [Float64](../../../sql-reference/data-types/float.md). Если `n <= 1` (`n` — размер выборки), тогда функция возвращает `nan`. + +**Пример** + +``` sql +SELECT skewSamp(value) FROM series_with_value_column +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/skewsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md new file mode 100644 index 00000000000..ada8b8884cd --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevpop.md @@ -0,0 +1,12 @@ +--- +toc_priority: 30 +--- + +# stddevPop {#stddevpop} + +Результат равен квадратному корню от `varPop(x)`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stddevpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md new file mode 100644 index 00000000000..952b6bcde68 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/stddevsamp.md @@ -0,0 +1,12 @@ +--- +toc_priority: 31 +--- + +# stddevSamp {#stddevsamp} + +Результат равен квадратному корню от `varSamp(x)`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `stddevSampStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stddevsamp/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md new file mode 100644 index 00000000000..0b268e9ea1b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlinearregression.md @@ -0,0 +1,89 @@ +--- +toc_priority: 221 +--- + +# stochasticLinearRegression {#agg_functions-stochasticlinearregression} + +Функция реализует стохастическую линейную регрессию. Поддерживает пользовательские параметры для скорости обучения, коэффициента регуляризации L2, размера mini-batch и имеет несколько методов обновления весов ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (по умолчанию), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). + +### Параметры {#agg_functions-stochasticlinearregression-parameters} + +Есть 4 настраиваемых параметра. Они передаются в функцию последовательно, однако не обязательно указывать все, используются значения по умолчанию, однако хорошая модель требует некоторой настройки параметров. + +``` text +stochasticLinearRegression(1.0, 1.0, 10, 'SGD') +``` + +1. Скорость обучения — коэффициент длины шага, при выполнении градиентного спуска. Слишком большая скорость обучения может привести к бесконечным весам модели. По умолчанию `0.00001`. +2. Коэффициент регуляризации l2. Помогает предотвратить подгонку. По умолчанию `0.1`. +3. Размер mini-batch задаёт количество элементов, чьи градиенты будут вычислены и просуммированы при выполнении одного шага градиентного спуска. Чистый стохастический спуск использует один элемент, однако использование mini-batch (около 10 элементов) делает градиентные шаги более стабильными. По умолчанию `15`. +4. Метод обновления весов, можно выбрать один из следующих: `Adam` (по умолчанию), `SGD`, `Momentum`, `Nesterov`. `Momentum` и `Nesterov` более требовательные к вычислительным ресурсам и памяти, однако они имеют высокую скорость схождения и устойчивости методов стохастического градиента. + +### Использование {#agg_functions-stochasticlinearregression-usage} + +`stochasticLinearRegression` используется на двух этапах: построение модели и предсказание новых данных. Чтобы построить модель и сохранить её состояние для дальнейшего использования, мы используем комбинатор `-State`. +Для прогнозирования мы используем функцию [evalMLMethod](../../functions/machine-learning-functions.md#machine_learning_methods-evalmlmethod), которая принимает в качестве аргументов состояние и свойства для прогнозирования. + + + +**1.** Построение модели + +Пример запроса: + +``` sql +CREATE TABLE IF NOT EXISTS train_data +( + param1 Float64, + param2 Float64, + target Float64 +) ENGINE = Memory; + +CREATE TABLE your_model ENGINE = Memory AS SELECT +stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2) +AS state FROM train_data; +``` + +Здесь нам также нужно вставить данные в таблицу `train_data`. Количество параметров не фиксировано, оно зависит только от количества аргументов, перешедших в `linearRegressionState`. Все они должны быть числовыми значениями. +Обратите внимание, что столбец с целевым значением (которое мы хотели бы научиться предсказывать) вставляется в качестве первого аргумента. + +**2.** Прогнозирование + +После сохранения состояния в таблице мы можем использовать его несколько раз для прогнозирования или смёржить с другими состояниями и создать новые, улучшенные модели. + +``` sql +WITH (SELECT state FROM your_model) AS model SELECT +evalMLMethod(model, param1, param2) FROM test_data +``` + +Запрос возвращает столбец прогнозируемых значений. Обратите внимание, что первый аргумент `evalMLMethod` это объект `AggregateFunctionState`, далее идут столбцы свойств. + +`test_data` — это таблица, подобная `train_data`, но при этом может не содержать целевое значение. + +### Примечания {#agg_functions-stochasticlinearregression-notes} + +1. Объединить две модели можно следующим запросом: + + + + ``` sql + SELECT state1 + state2 FROM your_models + ``` + +где таблица `your_models` содержит обе модели. Запрос вернёт новый объект `AggregateFunctionState`. + +1. Пользователь может получать веса созданной модели для своих целей без сохранения модели, если не использовать комбинатор `-State`. + + + + ``` sql + SELECT stochasticLinearRegression(0.01)(target, param1, param2) FROM train_data + ``` + +Подобный запрос строит модель и возвращает её веса, отвечающие параметрам моделей и смещение. Таким образом, в приведенном выше примере запрос вернет столбец с тремя значениями. + +**Смотрите также** + +- [stochasticLogisticRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlogisticregression) +- [Отличие линейной от логистической регрессии.](https://stackoverflow.com/questions/12146914/what-is-the-difference-between-linear-regression-and-logistic-regression) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md new file mode 100644 index 00000000000..01d3a0797bd --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md @@ -0,0 +1,57 @@ +--- +toc_priority: 222 +--- + +# stochasticLogisticRegression {#agg_functions-stochasticlogisticregression} + +Функция реализует стохастическую логистическую регрессию. Её можно использовать для задачи бинарной классификации, функция поддерживает те же пользовательские параметры, что и stochasticLinearRegression и работает таким же образом. + +### Параметры {#agg_functions-stochasticlogisticregression-parameters} + +Параметры те же, что и в stochasticLinearRegression: +`learning rate`, `l2 regularization coefficient`, `mini-batch size`, `method for updating weights`. +Смотрите раздел [parameters](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression-parameters). + +``` text +stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') +``` + +1. Построение модели + + + +Смотрите раздел `Построение модели` в описании [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#stochasticlinearregression-usage-fitting) . + + Прогнозируемые метки должны быть в диапазоне \[-1, 1\]. + +1. Прогнозирование + + + +Используя сохраненное состояние, можно предсказать вероятность наличия у объекта метки `1`. + + ``` sql + WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) FROM test_data + ``` + +Запрос возвращает столбец вероятностей. Обратите внимание, что первый аргумент `evalMLMethod` это объект `AggregateFunctionState`, далее идут столбцы свойств. + +Мы также можем установить границу вероятности, которая присваивает элементам различные метки. + + ``` sql + SELECT ans < 1.1 AND ans > 0.5 FROM + (WITH (SELECT state FROM your_model) AS model SELECT + evalMLMethod(model, param1, param2) AS ans FROM test_data) + ``` + +Тогда результатом будут метки. + +`test_data` — это таблица, подобная `train_data`, но при этом может не содержать целевое значение. + +**Смотрите также** + +- [stochasticLinearRegression](../../../sql-reference/aggregate-functions/reference/stochasticlinearregression.md#agg_functions-stochasticlinearregression) +- [Отличие линейной от логистической регрессии](https://moredez.ru/q/51225972/) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sum.md b/docs/ru/sql-reference/aggregate-functions/reference/sum.md new file mode 100644 index 00000000000..5fa769f3479 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/sum.md @@ -0,0 +1,10 @@ +--- +toc_priority: 4 +--- + +# sum {#agg_function-sum} + +Вычисляет сумму. +Работает только для чисел. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/sum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/summap.md b/docs/ru/sql-reference/aggregate-functions/reference/summap.md new file mode 100644 index 00000000000..d127d7df491 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/summap.md @@ -0,0 +1,43 @@ +--- +toc_priority: 141 +--- + +# sumMap {#agg_functions-summap} + +Синтаксис: `sumMap(key, value)` или `sumMap(Tuple(key, value))` + +Производит суммирование массива ‘value’ по соответствующим ключам заданным в массиве ‘key’. +Количество элементов в ‘key’ и ‘value’ должно быть одинаковым для каждой строки, для которой происходит суммирование. +Возвращает кортеж из двух массивов - ключи в отсортированном порядке и значения, просуммированные по соответствующим ключам. + +Пример: + +``` sql +CREATE TABLE sum_map( + date Date, + timeslot DateTime, + statusMap Nested( + status UInt16, + requests UInt64 + ) +) ENGINE = Log; +INSERT INTO sum_map VALUES + ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), + ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); +SELECT + timeslot, + sumMap(statusMap.status, statusMap.requests) +FROM sum_map +GROUP BY timeslot +``` + +``` text +┌────────────timeslot─┬─sumMap(statusMap.status, statusMap.requests)─┐ +│ 2000-01-01 00:00:00 │ ([1,2,3,4,5],[10,10,20,10,10]) │ +│ 2000-01-01 00:01:00 │ ([4,5,6,7,8],[10,10,20,10,10]) │ +└─────────────────────┴──────────────────────────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/summap/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md new file mode 100644 index 00000000000..845adc510f2 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/sumwithoverflow.md @@ -0,0 +1,11 @@ +--- +toc_priority: 140 +--- + +# sumWithOverflow {#sumwithoverflowx} + +Вычисляет сумму чисел, используя для результата тот же тип данных, что и для входных параметров. Если сумма выйдет за максимальное значение для заданного типа данных, то функция вернёт ошибку. + +Работает только для чисел. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md b/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md new file mode 100644 index 00000000000..da5935c8f61 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md @@ -0,0 +1,18 @@ +--- +toc_priority: 171 +--- + +# timeSeriesGroupRateSum {#agg-function-timeseriesgroupratesum} + +Синтаксис: `timeSeriesGroupRateSum(uid, ts, val)` + +Аналогично timeSeriesGroupSum, timeSeriesGroupRateSum будет вычислять производные по timestamp для рядов, а затем суммировать полученные производные для всех рядов для одного значения timestamp. +Также ряды должны быть отсортированы по возрастанию timestamp. + +Для пример из описания timeSeriesGroupSum результат будет следующим: + +``` text +[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)] +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/timeseriesgroupratesum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupsum.md b/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupsum.md new file mode 100644 index 00000000000..6a34c08c8c5 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/timeseriesgroupsum.md @@ -0,0 +1,59 @@ +--- +toc_priority: 170 +--- + +# timeSeriesGroupSum {#agg-function-timeseriesgroupsum} + +Синтаксис: `timeSeriesGroupSum(uid, timestamp, value)` + +`timeSeriesGroupSum` агрегирует временные ряды в которых не совпадают моменты. +Функция использует линейную интерполяцию между двумя значениями времени, а затем суммирует значения для одного и того же момента (как измеренные так и интерполированные) по всем рядам. + +- `uid` уникальный идентификатор временного ряда, `UInt64`. +- `timestamp` имеет тип `Int64` чтобы можно было учитывать милли и микросекунды. +- `value` представляет собой значение метрики. + +Функция возвращает массив кортежей с парами `(timestamp, aggregated_value)`. + +Временные ряды должны быть отсортированы по возрастанию `timestamp`. + +Пример: + +``` text +┌─uid─┬─timestamp─┬─value─┐ +│ 1 │ 2 │ 0.2 │ +│ 1 │ 7 │ 0.7 │ +│ 1 │ 12 │ 1.2 │ +│ 1 │ 17 │ 1.7 │ +│ 1 │ 25 │ 2.5 │ +│ 2 │ 3 │ 0.6 │ +│ 2 │ 8 │ 1.6 │ +│ 2 │ 12 │ 2.4 │ +│ 2 │ 18 │ 3.6 │ +│ 2 │ 24 │ 4.8 │ +└─────┴───────────┴───────┘ +``` + +``` sql +CREATE TABLE time_series( + uid UInt64, + timestamp Int64, + value Float64 +) ENGINE = Memory; +INSERT INTO time_series VALUES + (1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5), + (2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8); + +SELECT timeSeriesGroupSum(uid, timestamp, value) +FROM ( + SELECT * FROM time_series order by timestamp ASC +); +``` + +И результат будет: + +``` text +[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)] +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/timeseriesgroupsum/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topk.md b/docs/ru/sql-reference/aggregate-functions/reference/topk.md new file mode 100644 index 00000000000..6aefd38bf34 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/topk.md @@ -0,0 +1,39 @@ +--- +toc_priority: 108 +--- + +# topK {#topk} + +Возвращает массив наиболее часто встречающихся значений в указанном столбце. Результирующий массив упорядочен по убыванию частоты значения (не по самим значениям). + +Реализует [Filtered Space-Saving](http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf) алгоритм для анализа TopK, на основе reduce-and-combine алгоритма из методики [Parallel Space Saving](https://arxiv.org/pdf/1401.0702.pdf). + +``` sql +topK(N)(column) +``` + +Функция не дает гарантированного результата. В некоторых ситуациях могут возникать ошибки, и функция возвращает частые, но не наиболее частые значения. + +Рекомендуем использовать значения `N < 10`, при больших `N` снижается производительность. Максимально возможное значение `N = 65536`. + +**Аргументы** + +- ‘N’ - Количество значений. +- ‘x’ – Столбец. + +**Пример** + +Возьмём набор данных [OnTime](../../../getting-started/example-datasets/ontime.md) и выберем 3 наиболее часто встречающихся значения в столбце `AirlineID`. + +``` sql +SELECT topK(3)(AirlineID) AS res +FROM ontime +``` + +``` text +┌─res─────────────────┐ +│ [19393,19790,19805] │ +└─────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/topk/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md new file mode 100644 index 00000000000..20bd3ee85ff --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/topkweighted.md @@ -0,0 +1,44 @@ +--- +toc_priority: 109 +--- + +# topKWeighted {#topkweighted} + +Аналогична `topK`, но дополнительно принимает положительный целочисленный параметр `weight`. Каждое значение учитывается `weight` раз при расчёте частоты. + +**Синтаксис** + +``` sql +topKWeighted(N)(x, weight) +``` + +**Параметры** + +- `N` — Количество элементов для выдачи. + +**Аргументы** + +- `x` – значение. +- `weight` — вес. [UInt8](../../../sql-reference/data-types/int-uint.md). + +**Возвращаемое значение** + +Возвращает массив значений с максимально приближенной суммой весов. + +**Пример** + +Запрос: + +``` sql +SELECT topKWeighted(10)(number, number) FROM numbers(1000) +``` + +Результат: + +``` text +┌─topKWeighted(10)(number, number)──────────┐ +│ [999,998,997,996,995,994,993,992,991,990] │ +└───────────────────────────────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/topkweighted/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniq.md b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md new file mode 100644 index 00000000000..f5f3f198139 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniq.md @@ -0,0 +1,42 @@ +--- +toc_priority: 190 +--- + +# uniq {#agg_function-uniq} + +Приближённо вычисляет количество различных значений аргумента. + +``` sql +uniq(x[, ...]) +``` + +**Параметры** + +Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. + +**Возвращаемое значение** + +- Значение с типом данных [UInt64](../../../sql-reference/data-types/int-uint.md). + +**Детали реализации** + +Функция: + +- Вычисляет хэш для всех параметров агрегации, а затем использует его в вычислениях. + +- Использует адаптивный алгоритм выборки. В качестве состояния вычисления функция использует выборку хэш-значений элементов размером до 65536. + + Этот алгоритм очень точен и очень эффективен по использованию CPU. Если запрос содержит небольшое количество этих функций, использование `uniq` почти так же эффективно, как и использование других агрегатных функций. + +- Результат детерминирован (не зависит от порядка выполнения запроса). + +Эту функцию рекомендуется использовать практически во всех сценариях. + +**Смотрите также** + +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) +- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniq/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md new file mode 100644 index 00000000000..751dc1a8c98 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined.md @@ -0,0 +1,53 @@ +--- +toc_priority: 192 +--- + +# uniqCombined {#agg_function-uniqcombined} + +Приближённо вычисляет количество различных значений аргумента. + +``` sql +uniqCombined(HLL_precision)(x[, ...]) +``` + +Функция `uniqCombined` — это хороший выбор для вычисления количества различных значений. + +**Параметры** + +Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. + +`HLL_precision` — это логарифм по основанию 2 от числа ячеек в [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). Необязательный, можно использовать функцию как `uniqCombined (x [,...])`. Для `HLL_precision` значение по умолчанию — 17, что фактически составляет 96 КБ пространства (2^17 ячеек, 6 бит каждая). + +**Возвращаемое значение** + +- Число типа [UInt64](../../../sql-reference/data-types/int-uint.md). + +**Детали реализации** + +Функция: + +- Вычисляет хэш (64-битный для `String` и 32-битный для всех остальных типов) для всех параметров агрегации, а затем использует его в вычислениях. + +- Используется комбинация трёх алгоритмов: массив, хэш-таблица и HyperLogLog с таблицей коррекции погрешности. + + Для небольшого количества различных значений используется массив. Если размер набора больше, используется хэш-таблица. При дальнейшем увеличении количества значений, используется структура HyperLogLog, имеющая фиксированный размер в памяти. + +- Результат детерминирован (не зависит от порядка выполнения запроса). + +!!! note "Note" + Так как используется 32-битный хэш для не-`String` типов, результат будет иметь очень очень большую ошибку для количества разичных элементов существенно больше `UINT_MAX` (ошибка быстро растёт начиная с нескольких десятков миллиардов различных значений), таким образом в этом случае нужно использовать [uniqCombined64](#agg_function-uniqcombined64) + +По сравнению с функцией [uniq](#agg_function-uniq), `uniqCombined`: + +- Потребляет в несколько раз меньше памяти. +- Вычисляет с в несколько раз более высокой точностью. +- Обычно имеет немного более низкую производительность. В некоторых сценариях `uniqCombined` может показывать более высокую производительность, чем `uniq`, например, в случае распределенных запросов, при которых по сети передаётся большое количество состояний агрегации. + +**Смотрите также** + +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined64](../../../sql-reference/aggregate-functions/reference/uniqcombined64.md#agg_function-uniqcombined64) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniqhll12.md#agg_function-uniqhll12) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqcombined/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md new file mode 100644 index 00000000000..5db27fb301d --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqcombined64.md @@ -0,0 +1,9 @@ +--- +toc_priority: 193 +--- + +# uniqCombined64 {#agg_function-uniqcombined64} + +Использует 64-битный хэш для всех типов, в отличие от [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined). + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md new file mode 100644 index 00000000000..3dd22b2b4bc --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqexact.md @@ -0,0 +1,27 @@ +--- +toc_priority: 191 +--- + +# uniqExact {#agg_function-uniqexact} + +Вычисляет точное количество различных значений аргументов. + +``` sql +uniqExact(x[, ...]) +``` + +Функцию `uniqExact` следует использовать, если вам обязательно нужен точный результат. В противном случае используйте функцию [uniq](#agg_function-uniq). + +Функция `uniqExact` расходует больше оперативной памяти, чем функция `uniq`, так как размер состояния неограниченно растёт по мере роста количества различных значений. + +**Параметры** + +Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. + +**Смотрите также** + +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqcombined) +- [uniqHLL12](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniqhll12) + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqexact/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md new file mode 100644 index 00000000000..09e52ac6833 --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/uniqhll12.md @@ -0,0 +1,41 @@ +--- +toc_priority: 194 +--- + +# uniqHLL12 {#agg_function-uniqhll12} + +Вычисляет приблизительное число различных значений аргументов, используя алгоритм [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog). + +``` sql +uniqHLL12(x[, ...]) +``` + +**Параметры** + +Функция принимает переменное число входных параметров. Параметры могут быть числовых типов, а также `Tuple`, `Array`, `Date`, `DateTime`, `String`. + +**Возвращаемое значение** + +- Значение хэша с типом данных [UInt64](../../../sql-reference/data-types/int-uint.md). + +**Детали реализации** + +Функция: + +- Вычисляет хэш для всех параметров агрегации, а затем использует его в вычислениях. + +- Использует алгоритм HyperLogLog для аппроксимации числа различных значений аргументов. + + Используется 212 5-битовых ячеек. Размер состояния чуть больше 2.5 КБ. Результат не точный (ошибка до ~10%) для небольших множеств (<10K элементов). Однако для множеств большой кардинальности (10K - 100M) результат довольно точен (ошибка до ~1.6%). Начиная с 100M ошибка оценки будет только расти и для множеств огромной кардинальности (1B+ элементов) функция возвращает результат с очень большой неточностью. + +- Результат детерминирован (не зависит от порядка выполнения запроса). + +Мы не рекомендуем использовать эту функцию. В большинстве случаев используйте функцию [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) или [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined). + + +- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) +- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md#agg_function-uniqcombined) +- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) + + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/uniqhll12/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varpop.md b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md new file mode 100644 index 00000000000..9615e03673b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/varpop.md @@ -0,0 +1,14 @@ +--- +toc_priority: 32 +--- + +# varPop(x) {#varpopx} + +Вычисляет величину `Σ((x - x̅)^2) / n`, где `n` - размер выборки, `x̅`- среднее значение `x`. + +То есть, дисперсию для множества значений. Возвращает `Float64`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varPopStable`. Она работает медленнее, но обеспечивает меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/varpop/) diff --git a/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md new file mode 100644 index 00000000000..31aaac68e7b --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/varsamp.md @@ -0,0 +1,16 @@ +--- +toc_priority: 33 +--- + +# varSamp {#varsamp} + +Вычисляет величину `Σ((x - x̅)^2) / (n - 1)`, где `n` - размер выборки, `x̅`- среднее значение `x`. + +Она представляет собой несмещённую оценку дисперсии случайной величины, если переданные в функцию значения являются выборкой этой случайной величины. + +Возвращает `Float64`. В случае, когда `n <= 1`, возвращается `+∞`. + +!!! note "Примечание" + Функция использует вычислительно неустойчивый алгоритм. Если для ваших расчётов необходима [вычислительная устойчивость](https://ru.wikipedia.org/wiki/Вычислительная_устойчивость), используйте функцию `varSampStable`. Она работает медленнее, но обеспечиват меньшую вычислительную ошибку. + +[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/vasamp/) diff --git a/docs/ru/sql-reference/data-types/aggregatefunction.md b/docs/ru/sql-reference/data-types/aggregatefunction.md index c6680197966..073e392c8da 100644 --- a/docs/ru/sql-reference/data-types/aggregatefunction.md +++ b/docs/ru/sql-reference/data-types/aggregatefunction.md @@ -23,7 +23,7 @@ CREATE TABLE t ) ENGINE = ... ``` -[uniq](../../sql-reference/data-types/aggregatefunction.md#agg_function-uniq), anyIf ([any](../../sql-reference/data-types/aggregatefunction.md#agg_function-any)+[If](../../sql-reference/data-types/aggregatefunction.md#agg-functions-combinator-if)) и [quantiles](../../sql-reference/data-types/aggregatefunction.md) — агрегатные функции, поддержанные в ClickHouse. +[uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq), anyIf ([any](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any)+[If](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-if)) и [quantiles](../../sql-reference/aggregate-functions/reference/quantiles.md) — агрегатные функции, поддержанные в ClickHouse. ## Особенности использования {#osobennosti-ispolzovaniia} diff --git a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md index dc3286d035a..d36dc87e8ba 100644 --- a/docs/ru/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/ru/sql-reference/data-types/simpleaggregatefunction.md @@ -4,16 +4,16 @@ The following aggregate functions are supported: -- [`any`](../../sql-reference/aggregate-functions/reference.md#agg_function-any) -- [`anyLast`](../../sql-reference/aggregate-functions/reference.md#anylastx) -- [`min`](../../sql-reference/aggregate-functions/reference.md#agg_function-min) -- [`max`](../../sql-reference/aggregate-functions/reference.md#agg_function-max) -- [`sum`](../../sql-reference/aggregate-functions/reference.md#agg_function-sum) -- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference.md#groupbitand) -- [`groupBitOr`](../../sql-reference/aggregate-functions/reference.md#groupbitor) -- [`groupBitXor`](../../sql-reference/aggregate-functions/reference.md#groupbitxor) -- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference.md#agg_function-grouparray) -- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference.md#groupuniqarrayx-groupuniqarraymax-sizex) +- [`any`](../../sql-reference/aggregate-functions/reference/any.md#agg_function-any) +- [`anyLast`](../../sql-reference/aggregate-functions/reference/anylast.md#anylastx) +- [`min`](../../sql-reference/aggregate-functions/reference/min.md#agg_function-min) +- [`max`](../../sql-reference/aggregate-functions/reference/max.md#agg_function-max) +- [`sum`](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum) +- [`groupBitAnd`](../../sql-reference/aggregate-functions/reference/groupbitand.md#groupbitand) +- [`groupBitOr`](../../sql-reference/aggregate-functions/reference/groupbitor.md#groupbitor) +- [`groupBitXor`](../../sql-reference/aggregate-functions/reference/groupbitxor.md#groupbitxor) +- [`groupArrayArray`](../../sql-reference/aggregate-functions/reference/grouparray.md#agg_function-grouparray) +- [`groupUniqArrayArray`](../../sql-reference/aggregate-functions/reference/groupuniqarray.md#groupuniqarray) Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way as `Type`, so you do not need to apply functions with `-Merge`/`-State` suffixes. `SimpleAggregateFunction` has better performance than `AggregateFunction` with same aggregation function. diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md new file mode 100644 index 00000000000..81fe87745ce --- /dev/null +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon.md @@ -0,0 +1,86 @@ +# Cловари полигонов {#slovari-polygonov} + +Словари полигонов позволяют эффективно искать полигон, в который попадают данные точки, среди множества полигонов. +Для примера: определение района города по географическим координатам. + +Пример конфигурации: + +``` xml + + + + key + Array(Array(Array(Array(Float64)))) + + + + name + String + + + + + value + UInt64 + 0 + + + + + + + + + +``` + +Соответствущий [DDL-запрос](../../../sql-reference/statements/create.md#create-dictionary-query): +``` sql +CREATE DICTIONARY polygon_dict_name ( + key Array(Array(Array(Array(Float64)))), + name String, + value UInt64 +) +PRIMARY KEY key +LAYOUT(POLYGON()) +... +``` + +При конфигурации словаря полигонов ключ должен иметь один из двух типов: +- Простой полигон. Представляет из себя массив точек. +- Мультиполигон. Представляет из себя массив полигонов. Каждый полигон задается двумерным массивом точек — первый элемент этого массива задает внешнюю границу полигона, +последующие элементы могут задавать дырки, вырезаемые из него. + +Точки могут задаваться массивом или кортежем из своих координат. В текущей реализации поддерживается только двумерные точки. + +Пользователь может [загружать свои собственные данные](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) во всех поддерживаемых ClickHouse форматах. + + +Доступно 3 типа [хранения данных в памяти](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md): + +- POLYGON_SIMPLE. Это наивная реализация, в которой на каждый запрос делается линейный проход по всем полигонам, и для каждого проверяется принадлежность без использования дополнительных индексов. + +- POLYGON_INDEX_EACH. Для каждого полигона строится отдельный индекс, который позволяет быстро проверять принадлежность в большинстве случаев (оптимизирован под географические регионы). +Также на рассматриваемую область накладывается сетка, которая значительно сужает количество рассматриваемых полигонов. +Сетка строится рекурсивным делением ячейки на 16 равных частей и конфигурируется двумя параметрами. +Деление прекращается при достижении глубины рекурсии MAX_DEPTH или в тот момент, когда ячейку пересекают не более MIN_INTERSECTIONS полигонов. +Для ответа на запрос находится соответствующая ячейка, и происходит поочередное обращение к индексу для сохранных в ней полигонов. + +- POLYGON_INDEX_CELL. В этом размещении также строится сетка, описанная выше. Доступны такие же параметры. Для каждой ячейки-листа строится индекс на всех попадающих в неё кусках полигонов, который позволяет быстро отвечать на запрос. + +- POLYGON. Синоним к POLYGON_INDEX_CELL. + +Запросы к словарю осуществляются с помощью стандартных [функций](../../../sql-reference/functions/ext-dict-functions.md) для работы со внешними словарями. +Важным отличием является то, что здесь ключами будут являются точки, для которых хочется найти содержащий их полигон. + +Пример работы со словарем, определенным выше: +``` sql +CREATE TABLE points ( + x Float64, + y Float64 +) +... +SELECT tuple(x, y) AS key, dictGet(dict_name, 'name', key), dictGet(dict_name, 'value', key) FROM points ORDER BY x, y; +``` + +В результате исполнения последней команды для каждой точки в таблице `points` будет найден полигон минимальной площади, содержащий данную точку, и выведены запрошенные аттрибуты. diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index f477482428d..468e15e7d57 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1087,7 +1087,7 @@ SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) └───┴─────┘ ``` -Подзапрос формирует `sumState` для каждого числа от `0` до `9`. `sumState` возвращает состояние функции [sum](../../sql-reference/aggregate-functions/reference.md#agg_function-sum), содержащее сумму одного числа. +Подзапрос формирует `sumState` для каждого числа от `0` до `9`. `sumState` возвращает состояние функции [sum](../../sql-reference/aggregate-functions/reference/sum.md#agg_function-sum), содержащее сумму одного числа. Весь запрос делает следующее: diff --git a/docs/ru/sql-reference/statements/alter.md b/docs/ru/sql-reference/statements/alter.md index 1cf061c174f..a706c8e23b6 100644 --- a/docs/ru/sql-reference/statements/alter.md +++ b/docs/ru/sql-reference/statements/alter.md @@ -461,14 +461,7 @@ OPTIMIZE TABLE table_not_partitioned PARTITION tuple() FINAL; ALTER TABLE table-name MODIFY TTL ttl-expression ``` -### Синхронность запросов ALTER {#sinkhronnost-zaprosov-alter} - -Для нереплицируемых таблиц, все запросы `ALTER` выполняются синхронно. Для реплицируемых таблиц, запрос всего лишь добавляет инструкцию по соответствующим действиям в `ZooKeeper`, а сами действия осуществляются при первой возможности. Но при этом, запрос может ждать завершения выполнения этих действий на всех репликах. - -Для запросов `ALTER ... ATTACH|DETACH|DROP` можно настроить ожидание, с помощью настройки `replication_alter_partitions_sync`. -Возможные значения: `0` - не ждать, `1` - ждать выполнения только у себя (по умолчанию), `2` - ждать всех. - -### Мутации {#alter-mutations} +### Мутации {#mutations} Мутации - разновидность запроса ALTER, позволяющая изменять или удалять данные в таблице. В отличие от стандартных запросов `DELETE` и `UPDATE`, рассчитанных на точечное изменение данных, область применения мутаций - достаточно тяжёлые изменения, затрагивающие много строк в таблице. Поддержана для движков таблиц семейства `MergeTree`, в том числе для движков с репликацией. @@ -504,6 +497,15 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name Записи о последних выполненных мутациях удаляются не сразу (количество сохраняемых мутаций определяется параметром движка таблиц `finished_mutations_to_keep`). Более старые записи удаляются. +### Синхронность запросов ALTER {#synchronicity-of-alter-queries} + +Для нереплицируемых таблиц, все запросы `ALTER` выполняются синхронно. Для реплицируемых таблиц, запрос всего лишь добавляет инструкцию по соответствующим действиям в `ZooKeeper`, а сами действия осуществляются при первой возможности. Но при этом, запрос может ждать завершения выполнения этих действий на всех репликах. + +Для запросов `ALTER ... ATTACH|DETACH|DROP` можно настроить ожидание, с помощью настройки `replication_alter_partitions_sync`. +Возможные значения: `0` - не ждать, `1` - ждать выполнения только у себя (по умолчанию), `2` - ждать всех. + +Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../operations/settings/settings.md#mutations_sync). + ## ALTER USER {#alter-user-statement} Изменяет аккаунт пользователя ClickHouse. diff --git a/docs/ru/sql-reference/statements/misc.md b/docs/ru/sql-reference/statements/misc.md index 77f9570ae47..e5d30b0e226 100644 --- a/docs/ru/sql-reference/statements/misc.md +++ b/docs/ru/sql-reference/statements/misc.md @@ -212,7 +212,7 @@ KILL MUTATION [ON CLUSTER cluster] [FORMAT format] ``` -Пытается остановить выполняющиеся в данные момент [мутации](alter.md#alter-mutations). Мутации для остановки выбираются из таблицы [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) с помощью условия, указанного в секции `WHERE` запроса `KILL`. +Пытается остановить выполняющиеся в данные момент [мутации](alter.md#mutations). Мутации для остановки выбираются из таблицы [`system.mutations`](../../operations/system-tables.md#system_tables-mutations) с помощью условия, указанного в секции `WHERE` запроса `KILL`. Тестовый вариант запроса (`TEST`) только проверяет права пользователя и выводит список запросов для остановки. diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index d87375acc6a..7ace18b639c 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -21,7 +21,7 @@ mkdocs-htmlproofer-plugin==0.0.3 mkdocs-macros-plugin==0.4.9 nltk==3.5 nose==1.3.7 -protobuf==3.12.2 +protobuf==3.12.4 numpy==1.19.1 Pygments==2.5.2 pymdown-extensions==7.1 diff --git a/docs/tr/engines/table-engines/special/materializedview.md b/docs/tr/engines/table-engines/special/materializedview.md index 485b5ae7d14..f947bf822ff 100644 --- a/docs/tr/engines/table-engines/special/materializedview.md +++ b/docs/tr/engines/table-engines/special/materializedview.md @@ -7,6 +7,6 @@ toc_title: MaterializedView # Materializedview {#materializedview} -Somut görünümler uygulamak için kullanılır (Daha fazla bilgi için bkz . [CREATE TABLE](../../../sql-reference/statements/create.md)). Verileri depolamak için, görünümü oluştururken belirtilen farklı bir motor kullanır. Bir tablodan okurken, sadece bu motoru kullanır. +Somut görünümler uygulamak için kullanılır (Daha fazla bilgi için bkz . [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query)). Verileri depolamak için, görünümü oluştururken belirtilen farklı bir motor kullanır. Bir tablodan okurken, sadece bu motoru kullanır. [Orijinal makale](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/docs/zh/engines/table-engines/special/materializedview.md b/docs/zh/engines/table-engines/special/materializedview.md index c634baafbc6..21c4008a158 100644 --- a/docs/zh/engines/table-engines/special/materializedview.md +++ b/docs/zh/engines/table-engines/special/materializedview.md @@ -1,5 +1,5 @@ # MaterializedView {#materializedview} -物化视图的使用(更多信息请参阅 [CREATE TABLE](../../../engines/table-engines/special/materializedview.md) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 +物化视图的使用(更多信息请参阅 [CREATE TABLE](../../../sql-reference/statements/create.md#create-table-query) )。它需要使用一个不同的引擎来存储数据,这个引擎要在创建物化视图时指定。当从表中读取时,它就会使用该引擎。 [来源文章](https://clickhouse.tech/docs/en/operations/table_engines/materializedview/) diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 6813b5eb558..ffedfb038a4 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include #include #include @@ -39,110 +37,71 @@ namespace } - template > - ResultType doTry(const Func & func) + template + bool tryCall(const Func & function) { try { - return func(); + function(); + return true; } - catch (Exception &) + catch (...) { - return {}; + return false; } } - template , - typename ResultType = std::conditional_t, void, std::vector>> - ResultType applyToMultipleEntities( - const std::vector & multiple_entities, - const ApplyFunc & apply_function, - const char * error_message_format [[maybe_unused]] = nullptr, - const GetNameFunc & get_name_function [[maybe_unused]] = nullptr) + class ErrorsTracker { - std::optional exception; - std::vector success; + public: + explicit ErrorsTracker(size_t count_) { succeed.reserve(count_); } - auto helper = [&](const auto & apply_and_store_result_function) + template + bool tryCall(const Func & func) { - for (size_t i = 0; i != multiple_entities.size(); ++i) + try { - try - { - apply_and_store_result_function(multiple_entities[i]); - if constexpr (!ignore_errors) - success[i] = true; - } - catch (Exception & e) - { - if (!ignore_errors && !exception) - exception.emplace(e); - } - catch (Poco::Exception & e) - { - if (!ignore_errors && !exception) - exception.emplace(Exception::CreateFromPocoTag{}, e); - } - catch (std::exception & e) - { - if (!ignore_errors && !exception) - exception.emplace(Exception::CreateFromSTDTag{}, e); - } + func(); } - }; - - if constexpr (std::is_same_v) - { - if (multiple_entities.empty()) - return; - - if (multiple_entities.size() == 1) + catch (Exception & e) { - apply_function(multiple_entities.front()); - return; + if (!exception) + exception.emplace(e); + succeed.push_back(false); + return false; } - - if constexpr (!ignore_errors) - success.resize(multiple_entities.size(), false); - - helper(apply_function); - - if (ignore_errors || !exception) - return; - } - else - { - ResultType result; - if (multiple_entities.empty()) - return result; - - if (multiple_entities.size() == 1) + catch (Poco::Exception & e) { - result.emplace_back(apply_function(multiple_entities.front())); - return result; + if (!exception) + exception.emplace(Exception::CreateFromPocoTag{}, e); + succeed.push_back(false); + return false; } - - result.reserve(multiple_entities.size()); - if constexpr (!ignore_errors) - success.resize(multiple_entities.size(), false); - - helper([&](const T & entity) { result.emplace_back(apply_function(entity)); }); - - if (ignore_errors || !exception) - return result; + catch (std::exception & e) + { + if (!exception) + exception.emplace(Exception::CreateFromSTDTag{}, e); + succeed.push_back(false); + return false; + } + succeed.push_back(true); + return true; } - if constexpr (!ignore_errors) + bool errors() const { return exception.has_value(); } + + void showErrors(const char * format, const std::function & get_name_function) { + if (!exception) + return; + Strings succeeded_names_list; Strings failed_names_list; - for (size_t i = 0; i != multiple_entities.size(); ++i) + for (size_t i = 0; i != succeed.size(); ++i) { - const auto & entity = multiple_entities[i]; - String name = get_name_function(entity); - if (success[i]) + String name = get_name_function(i); + if (succeed[i]) succeeded_names_list.emplace_back(name); else failed_names_list.emplace_back(name); @@ -152,14 +111,17 @@ namespace if (succeeded_names.empty()) succeeded_names = "none"; - String error_message = error_message_format; + String error_message = format; boost::replace_all(error_message, "{succeeded_names}", succeeded_names); boost::replace_all(error_message, "{failed_names}", failed_names); exception->addMessage(error_message); exception->rethrow(); } - __builtin_unreachable(); - } + + private: + std::vector succeed; + std::optional exception; + }; } @@ -216,7 +178,11 @@ bool IAccessStorage::exists(const UUID & id) const AccessEntityPtr IAccessStorage::tryReadBase(const UUID & id) const { - return doTry([&] { return readImpl(id); }); + AccessEntityPtr entity; + auto func = [&] { entity = readImpl(id); }; + if (!tryCall(func)) + return nullptr; + return entity; } @@ -228,7 +194,11 @@ String IAccessStorage::readName(const UUID & id) const std::optional IAccessStorage::tryReadName(const UUID & id) const { - return doTry([&] { return std::optional{readNameImpl(id)}; }); + String name; + auto func = [&] { name = readNameImpl(id); }; + if (!tryCall(func)) + return {}; + return name; } @@ -240,41 +210,77 @@ UUID IAccessStorage::insert(const AccessEntityPtr & entity) std::vector IAccessStorage::insert(const std::vector & multiple_entities) { - return applyToMultipleEntities( - multiple_entities, - [this](const AccessEntityPtr & entity) { return insertImpl(entity, /* replace_if_exists = */ false); }, - "Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", - [](const AccessEntityPtr & entity) { return entity->outputTypeAndName(); }); + ErrorsTracker tracker(multiple_entities.size()); + + std::vector ids; + for (const auto & entity : multiple_entities) + { + UUID id; + auto func = [&] { id = insertImpl(entity, /* replace_if_exists = */ false); }; + if (tracker.tryCall(func)) + ids.push_back(id); + } + + if (tracker.errors()) + { + auto get_name_function = [&](size_t i) { return multiple_entities[i]->outputTypeAndName(); }; + tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function); + } + + return ids; } std::optional IAccessStorage::tryInsert(const AccessEntityPtr & entity) { - return doTry([&] { return std::optional{insertImpl(entity, false)}; }); + UUID id; + auto func = [&] { id = insertImpl(entity, /* replace_if_exists = */ false); }; + if (!tryCall(func)) + return {}; + return id; } std::vector IAccessStorage::tryInsert(const std::vector & multiple_entities) { - return applyToMultipleEntities( - multiple_entities, - [this](const AccessEntityPtr & entity) { return insertImpl(entity, /* replace_if_exists = */ false); }); + std::vector ids; + for (const auto & entity : multiple_entities) + { + UUID id; + auto func = [&] { id = insertImpl(entity, /* replace_if_exists = */ false); }; + if (tryCall(func)) + ids.push_back(id); + } + return ids; } UUID IAccessStorage::insertOrReplace(const AccessEntityPtr & entity) { - return insertImpl(entity, true); + return insertImpl(entity, /* replace_if_exists = */ true); } std::vector IAccessStorage::insertOrReplace(const std::vector & multiple_entities) { - return applyToMultipleEntities( - multiple_entities, - [this](const AccessEntityPtr & entity) { return insertImpl(entity, /* replace_if_exists = */ true); }, - "Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", - [](const AccessEntityPtr & entity) -> String { return entity->outputTypeAndName(); }); + ErrorsTracker tracker(multiple_entities.size()); + + std::vector ids; + for (const auto & entity : multiple_entities) + { + UUID id; + auto func = [&] { id = insertImpl(entity, /* replace_if_exists = */ true); }; + if (tracker.tryCall(func)) + ids.push_back(id); + } + + if (tracker.errors()) + { + auto get_name_function = [&](size_t i) { return multiple_entities[i]->outputTypeAndName(); }; + tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function); + } + + return ids; } @@ -286,25 +292,39 @@ void IAccessStorage::remove(const UUID & id) void IAccessStorage::remove(const std::vector & ids) { - applyToMultipleEntities( - ids, - [this](const UUID & id) { removeImpl(id); }, - "Couldn't remove {failed_names}. Successfully removed: {succeeded_names}", - [this](const UUID & id) { return outputTypeAndNameOrID(*this, id); }); + ErrorsTracker tracker(ids.size()); + + for (const auto & id : ids) + { + auto func = [&] { removeImpl(id); }; + tracker.tryCall(func); + } + + if (tracker.errors()) + { + auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); }; + tracker.showErrors("Couldn't remove {failed_names}. Successfully removed: {succeeded_names}", get_name_function); + } } bool IAccessStorage::tryRemove(const UUID & id) { - return doTry([&] { removeImpl(id); return true; }); + auto func = [&] { removeImpl(id); }; + return tryCall(func); } std::vector IAccessStorage::tryRemove(const std::vector & ids) { - return applyToMultipleEntities( - ids, - [this](const UUID & id) { removeImpl(id); return id; }); + std::vector removed_ids; + for (const auto & id : ids) + { + auto func = [&] { removeImpl(id); }; + if (tryCall(func)) + removed_ids.push_back(id); + } + return removed_ids; } @@ -316,25 +336,39 @@ void IAccessStorage::update(const UUID & id, const UpdateFunc & update_func) void IAccessStorage::update(const std::vector & ids, const UpdateFunc & update_func) { - applyToMultipleEntities( - ids, - [this, &update_func](const UUID & id) { updateImpl(id, update_func); }, - "Couldn't update {failed_names}. Successfully updated: {succeeded_names}", - [this](const UUID & id) { return outputTypeAndNameOrID(*this, id); }); + ErrorsTracker tracker(ids.size()); + + for (const auto & id : ids) + { + auto func = [&] { updateImpl(id, update_func); }; + tracker.tryCall(func); + } + + if (tracker.errors()) + { + auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); }; + tracker.showErrors("Couldn't update {failed_names}. Successfully updated: {succeeded_names}", get_name_function); + } } bool IAccessStorage::tryUpdate(const UUID & id, const UpdateFunc & update_func) { - return doTry([&] { updateImpl(id, update_func); return true; }); + auto func = [&] { updateImpl(id, update_func); }; + return tryCall(func); } std::vector IAccessStorage::tryUpdate(const std::vector & ids, const UpdateFunc & update_func) { - return applyToMultipleEntities( - ids, - [this, &update_func](const UUID & id) { updateImpl(id, update_func); return id; }); + std::vector updated_ids; + for (const auto & id : ids) + { + auto func = [&] { updateImpl(id, update_func); }; + if (tryCall(func)) + updated_ids.push_back(id); + } + return updated_ids; } diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 4ce5ab7b2a3..87e5e37db51 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -349,7 +349,7 @@ void ColumnTuple::updatePermutation(bool reverse, size_t limit, int nan_directio for (const auto& column : columns) { column->updatePermutation(reverse, limit, nan_direction_hint, res, equal_range); - while (limit && limit <= equal_range.back().first) + while (limit && !equal_range.empty() && limit <= equal_range.back().first) equal_range.pop_back(); if (equal_range.empty()) diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index ce7f5cf3942..59c99a61371 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -175,7 +175,6 @@ namespace ErrorCodes extern const int TOO_BIG_AST = 168; extern const int BAD_TYPE_OF_FIELD = 169; extern const int BAD_GET = 170; - extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE = 171; extern const int CANNOT_CREATE_DIRECTORY = 172; extern const int CANNOT_ALLOCATE_MEMORY = 173; extern const int CYCLIC_ALIASES = 174; diff --git a/src/Common/config.h.in b/src/Common/config.h.in index ff4e5e8c6b3..3a67b2a8f2c 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -12,4 +12,5 @@ #cmakedefine01 USE_CASSANDRA #cmakedefine01 USE_SENTRY #cmakedefine01 USE_GRPC +#cmakedefine01 USE_STATS #cmakedefine01 CLICKHOUSE_SPLIT_BINARY diff --git a/src/Core/Block.cpp b/src/Core/Block.cpp index da8bfa5451b..453d4b7e4ce 100644 --- a/src/Core/Block.cpp +++ b/src/Core/Block.cpp @@ -24,7 +24,6 @@ namespace ErrorCodes extern const int POSITION_OUT_OF_BOUND; extern const int NOT_FOUND_COLUMN_IN_BLOCK; extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; - extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE; } @@ -477,7 +476,7 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons size_t columns = rhs.columns(); if (lhs.columns() != columns) return on_error("Block structure mismatch in " + context_description + " stream: different number of columns:\n" - + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR); for (size_t i = 0; i < columns; ++i) { @@ -486,18 +485,18 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons if (actual.name != expected.name) return on_error("Block structure mismatch in " + context_description + " stream: different names of columns:\n" - + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR); if (!actual.type->equals(*expected.type)) return on_error("Block structure mismatch in " + context_description + " stream: different types:\n" - + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR); if (!actual.column || !expected.column) continue; if (actual.column->getName() != expected.column->getName()) return on_error("Block structure mismatch in " + context_description + " stream: different columns:\n" - + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + + lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR); if (isColumnConst(*actual.column) && isColumnConst(*expected.column)) { @@ -507,7 +506,7 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, cons if (actual_value != expected_value) return on_error("Block structure mismatch in " + context_description + " stream: different values of constants, actual: " + applyVisitor(FieldVisitorToString(), actual_value) + ", expected: " + applyVisitor(FieldVisitorToString(), expected_value), - ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + ErrorCodes::LOGICAL_ERROR); } } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index ad03fa706d1..ac325538923 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -393,6 +393,7 @@ struct Settings : public SettingsCollection M(SettingBool, allow_experimental_geo_types, false, "Allow geo data types such as Point, Ring, Polygon, MultiPolygon", 0) \ M(SettingBool, data_type_default_nullable, false, "Data types without NULL or NOT NULL will make Nullable", 0) \ M(SettingBool, cast_keep_nullable, false, "CAST operator keep Nullable for result data type", 0) \ + M(SettingBool, alter_partition_verbose_result, false, "Output information about affected parts. Currently works only for FREEZE and ATTACH commands.", 0) \ \ /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ \ diff --git a/src/DataStreams/ConvertingBlockInputStream.cpp b/src/DataStreams/ConvertingBlockInputStream.cpp index 368ee7083b1..826e802296e 100644 --- a/src/DataStreams/ConvertingBlockInputStream.cpp +++ b/src/DataStreams/ConvertingBlockInputStream.cpp @@ -12,7 +12,7 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE; + extern const int ILLEGAL_COLUMN; extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; } @@ -75,11 +75,11 @@ ConvertingBlockInputStream::ConvertingBlockInputStream( if (!isColumnConst(*src_elem.column)) throw Exception("Cannot convert column " + backQuoteIfNeed(res_elem.name) + " because it is non constant in source stream but must be constant in result", - ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + ErrorCodes::ILLEGAL_COLUMN); else if (assert_cast(*src_elem.column).getField() != assert_cast(*res_elem.column).getField()) throw Exception("Cannot convert column " + backQuoteIfNeed(res_elem.name) + " because it is constant but values of constants are different in source and result", - ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + ErrorCodes::ILLEGAL_COLUMN); } /// Check conversion by dry run CAST function. diff --git a/src/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp index 01c9522e952..eb6f0c6387c 100644 --- a/src/Dictionaries/PolygonDictionary.cpp +++ b/src/Dictionaries/PolygonDictionary.cpp @@ -1,12 +1,11 @@ -#include -#include -#include -#include -#include #include "PolygonDictionary.h" #include "DictionaryBlockInputStream.h" #include "DictionaryFactory.h" +#include +#include +#include + #include namespace DB @@ -191,7 +190,7 @@ void IPolygonDictionary::createAttributes() } } -void IPolygonDictionary::blockToAttributes(const DB::Block &block) +void IPolygonDictionary::blockToAttributes(const DB::Block & block) { const auto rows = block.rows(); element_count += rows; @@ -222,8 +221,31 @@ void IPolygonDictionary::loadData() blockToAttributes(block); stream->readSuffix(); - for (auto & polygon : polygons) + std::vector areas; + areas.reserve(polygons.size()); + + std::vector> polygon_ids; + polygon_ids.reserve(polygons.size()); + for (size_t i = 0; i < polygons.size(); ++i) + { + auto & polygon = polygons[i]; bg::correct(polygon); + areas.push_back(bg::area(polygon)); + polygon_ids.emplace_back(polygon, i); + } + sort(polygon_ids.begin(), polygon_ids.end(), [& areas](const auto & lhs, const auto & rhs) + { + return areas[lhs.second] < areas[rhs.second]; + }); + std::vector correct_ids; + correct_ids.reserve(polygon_ids.size()); + for (size_t i = 0; i < polygon_ids.size(); ++i) + { + auto & polygon = polygon_ids[i]; + correct_ids.emplace_back(ids[polygon.second]); + polygons[i] = polygon.first; + } + ids = correct_ids; } void IPolygonDictionary::calculateBytesAllocated() @@ -233,7 +255,7 @@ void IPolygonDictionary::calculateBytesAllocated() bytes_allocated += column->allocatedBytes(); } -std::vector IPolygonDictionary::extractPoints(const Columns &key_columns) +std::vector IPolygonDictionary::extractPoints(const Columns & key_columns) { if (key_columns.size() != 2) throw Exception{"Expected two columns of coordinates", ErrorCodes::BAD_ARGUMENTS}; @@ -249,7 +271,7 @@ std::vector IPolygonDictionary::extractPoints(const C return result; } -void IPolygonDictionary::has(const Columns &key_columns, const DataTypes &, PaddedPODArray &out) const +void IPolygonDictionary::has(const Columns & key_columns, const DataTypes &, PaddedPODArray & out) const { size_t row = 0; for (const auto & pt : extractPoints(key_columns)) @@ -505,7 +527,7 @@ struct Data ids.push_back((ids.empty() ? 0 : ids.back() + new_multi_polygon)); } - void addPoint(Float64 x, Float64 y) + void addPoint(IPolygonDictionary::Coord x, IPolygonDictionary::Coord y) { auto & last_polygon = dest.back(); auto & last_ring = (last_polygon.inners().empty() ? last_polygon.outer() : last_polygon.inners().back()); @@ -513,7 +535,7 @@ struct Data } }; -void addNewPoint(Float64 x, Float64 y, Data & data, Offset & offset) +void addNewPoint(IPolygonDictionary::Coord x, IPolygonDictionary::Coord y, Data & data, Offset & offset) { if (offset.atLastPointOfRing()) { @@ -600,7 +622,7 @@ void handlePointsReprByTuples(const IColumn * column, Data & data, Offset & offs } -void IPolygonDictionary::extractPolygons(const ColumnPtr &column) +void IPolygonDictionary::extractPolygons(const ColumnPtr & column) { Data data = {polygons, ids}; Offset offset; @@ -634,114 +656,5 @@ void IPolygonDictionary::extractPolygons(const ColumnPtr &column) } } -SimplePolygonDictionary::SimplePolygonDictionary( - const std::string & database_, - const std::string & name_, - const DictionaryStructure & dict_struct_, - DictionarySourcePtr source_ptr_, - const DictionaryLifetime dict_lifetime_, - InputType input_type_, - PointType point_type_) - : IPolygonDictionary(database_, name_, dict_struct_, std::move(source_ptr_), dict_lifetime_, input_type_, point_type_) -{ -} - -std::shared_ptr SimplePolygonDictionary::clone() const -{ - return std::make_shared( - this->database, - this->name, - this->dict_struct, - this->source_ptr->clone(), - this->dict_lifetime, - this->input_type, - this->point_type); -} - -bool SimplePolygonDictionary::find(const Point &point, size_t & id) const -{ - bool found = false; - double area = 0; - for (size_t i = 0; i < (this->polygons).size(); ++i) - { - if (bg::covered_by(point, (this->polygons)[i])) - { - double new_area = bg::area((this->polygons)[i]); - if (!found || new_area < area) - { - found = true; - id = i; - area = new_area; - } - } - } - return found; -} - -void registerDictionaryPolygon(DictionaryFactory & factory) -{ - auto create_layout = [=](const std::string &, - const DictionaryStructure & dict_struct, - const Poco::Util::AbstractConfiguration & config, - const std::string & config_prefix, - DictionarySourcePtr source_ptr) -> DictionaryPtr - { - const String database = config.getString(config_prefix + ".database", ""); - const String name = config.getString(config_prefix + ".name"); - - if (!dict_struct.key) - throw Exception{"'key' is required for a dictionary of layout 'polygon'", ErrorCodes::BAD_ARGUMENTS}; - if (dict_struct.key->size() != 1) - throw Exception{"The 'key' should consist of a single attribute for a dictionary of layout 'polygon'", - ErrorCodes::BAD_ARGUMENTS}; - IPolygonDictionary::InputType input_type; - IPolygonDictionary::PointType point_type; - const auto key_type = (*dict_struct.key)[0].type; - const auto f64 = std::make_shared(); - const auto multi_polygon_array = DataTypeArray(std::make_shared(std::make_shared(std::make_shared(f64)))); - const auto multi_polygon_tuple = DataTypeArray(std::make_shared(std::make_shared(std::make_shared(std::vector{f64, f64})))); - const auto simple_polygon_array = DataTypeArray(std::make_shared(f64)); - const auto simple_polygon_tuple = DataTypeArray(std::make_shared(std::vector{f64, f64})); - if (key_type->equals(multi_polygon_array)) - { - input_type = IPolygonDictionary::InputType::MultiPolygon; - point_type = IPolygonDictionary::PointType::Array; - } - else if (key_type->equals(multi_polygon_tuple)) - { - input_type = IPolygonDictionary::InputType::MultiPolygon; - point_type = IPolygonDictionary::PointType::Tuple; - } - else if (key_type->equals(simple_polygon_array)) - { - input_type = IPolygonDictionary::InputType::SimplePolygon; - point_type = IPolygonDictionary::PointType::Array; - } - else if (key_type->equals(simple_polygon_tuple)) - { - input_type = IPolygonDictionary::InputType::SimplePolygon; - point_type = IPolygonDictionary::PointType::Tuple; - } - else - throw Exception{"The key type " + key_type->getName() + - " is not one of the following allowed types for a dictionary of layout 'polygon': " + - multi_polygon_array.getName() + " " + - multi_polygon_tuple.getName() + " " + - simple_polygon_array.getName() + " " + - simple_polygon_tuple.getName() + " ", - ErrorCodes::BAD_ARGUMENTS}; - - if (dict_struct.range_min || dict_struct.range_max) - throw Exception{name - + ": elements range_min and range_max should be defined only " - "for a dictionary of layout 'range_hashed'", - ErrorCodes::BAD_ARGUMENTS}; - - const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"}; - return std::make_unique(database, name, dict_struct, std::move(source_ptr), dict_lifetime, input_type, point_type); - }; - factory.registerLayout("polygon", create_layout, true); -} - } diff --git a/src/Dictionaries/PolygonDictionary.h b/src/Dictionaries/PolygonDictionary.h index fc85e339231..ce420463605 100644 --- a/src/Dictionaries/PolygonDictionary.h +++ b/src/Dictionaries/PolygonDictionary.h @@ -19,10 +19,10 @@ namespace DB namespace bg = boost::geometry; /** An interface for polygon dictionaries. - * Polygons are read and stored as multi_polygons from boost::geometry in Euclidean coordinates. - * An implementation should inherit from this base class and preprocess the data upon construction if needed. - * It must override the find method of this class which retrieves the polygon containing a single point. - */ + * Polygons are read and stored as multi_polygons from boost::geometry in Euclidean coordinates. + * An implementation should inherit from this base class and preprocess the data upon construction if needed. + * It must override the find method of this class which retrieves the polygon containing a single point. + */ class IPolygonDictionary : public IDictionaryBase { public: @@ -41,8 +41,8 @@ public: SimplePolygon }; /** Controls the different types allowed for providing the coordinates of points. - * Right now a point can be represented by either an array or a tuple of two Float64 values. - */ + * Right now a point can be represented by either an array or a tuple of two Float64 values. + */ enum class PointType { Array, @@ -178,10 +178,14 @@ public: // TODO: Refactor the whole dictionary design to perform stronger checks, i.e. make this an override. void has(const Columns & key_columns, const DataTypes & key_types, PaddedPODArray & out) const; + /** Single coordinate type. */ + using Coord = Float32; /** A two-dimensional point in Euclidean coordinates. */ - using Point = bg::model::point; + using Point = bg::model::d2::point_xy; /** A polygon in boost is a an outer ring of points with zero or more cut out inner rings. */ using Polygon = bg::model::polygon; + /** A ring in boost used for describing the polygons. */ + using Ring = bg::model::ring; protected: /** Returns true if the given point can be found in the polygon dictionary. @@ -266,28 +270,5 @@ private: static std::vector extractPoints(const Columns &key_columns); }; -/** Simple implementation of the polygon dictionary. Doesn't generate anything during its construction. - * Iterates over all stored polygons for each query, checking each of them in linear time. - * Retrieves the polygon with the smallest area containing the given point. If there is more than one any such polygon - * may be returned. - */ -class SimplePolygonDictionary : public IPolygonDictionary -{ -public: - SimplePolygonDictionary( - const std::string & database_, - const std::string & name_, - const DictionaryStructure & dict_struct_, - DictionarySourcePtr source_ptr_, - DictionaryLifetime dict_lifetime_, - InputType input_type_, - PointType point_type_); - - std::shared_ptr clone() const override; - -private: - bool find(const Point & point, size_t & id) const override; -}; - } diff --git a/src/Dictionaries/PolygonDictionaryImplementations.cpp b/src/Dictionaries/PolygonDictionaryImplementations.cpp new file mode 100644 index 00000000000..4dd42ac8b6e --- /dev/null +++ b/src/Dictionaries/PolygonDictionaryImplementations.cpp @@ -0,0 +1,255 @@ +#include "PolygonDictionaryImplementations.h" +#include "DictionaryFactory.h" + +#include +#include +#include + +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +PolygonDictionarySimple::PolygonDictionarySimple( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_): + IPolygonDictionary(database_, name_, dict_struct_, std::move(source_ptr_), dict_lifetime_, input_type_, point_type_) +{ +} + +std::shared_ptr PolygonDictionarySimple::clone() const +{ + return std::make_shared( + this->database, + this->name, + this->dict_struct, + this->source_ptr->clone(), + this->dict_lifetime, + this->input_type, + this->point_type); +} + +bool PolygonDictionarySimple::find(const Point & point, size_t & id) const +{ + bool found = false; + for (size_t i = 0; i < polygons.size(); ++i) + { + if (bg::covered_by(point, polygons[i])) + { + id = i; + found = true; + break; + } + } + return found; +} + +PolygonDictionaryIndexEach::PolygonDictionaryIndexEach( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_, + int min_intersections_, + int max_depth_) + : IPolygonDictionary(database_, name_, dict_struct_, std::move(source_ptr_), dict_lifetime_, input_type_, point_type_), + grid(min_intersections_, max_depth_, polygons), + min_intersections(min_intersections_), + max_depth(max_depth_) +{ + buckets.reserve(polygons.size()); + for (const auto & polygon : polygons) + { + std::vector single; + single.emplace_back(polygon); + buckets.emplace_back(single); + } +} + +std::shared_ptr PolygonDictionaryIndexEach::clone() const +{ + return std::make_shared( + this->database, + this->name, + this->dict_struct, + this->source_ptr->clone(), + this->dict_lifetime, + this->input_type, + this->point_type, + this->min_intersections, + this->max_depth); +} + +bool PolygonDictionaryIndexEach::find(const Point & point, size_t & id) const +{ + const auto * cell = grid.find(point.x(), point.y()); + if (cell) + { + for (const auto & candidate : cell->polygon_ids) + { + size_t unused; + if (buckets[candidate].find(point, unused)) + { + id = candidate; + return true; + } + } + if (cell->first_covered != FinalCell::kNone) + { + id = cell->first_covered; + return true; + } + } + return false; +} + +PolygonDictionaryIndexCell::PolygonDictionaryIndexCell( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_, + size_t min_intersections_, + size_t max_depth_) + : IPolygonDictionary(database_, name_, dict_struct_, std::move(source_ptr_), dict_lifetime_, input_type_, point_type_), + index(min_intersections_, max_depth_, polygons), + min_intersections(min_intersections_), + max_depth(max_depth_) +{ +} + +std::shared_ptr PolygonDictionaryIndexCell::clone() const +{ + return std::make_shared( + this->database, + this->name, + this->dict_struct, + this->source_ptr->clone(), + this->dict_lifetime, + this->input_type, + this->point_type, + this->min_intersections, + this->max_depth); +} + +bool PolygonDictionaryIndexCell::find(const Point & point, size_t & id) const +{ + const auto * cell = index.find(point.x(), point.y()); + if (cell) + { + if (!(cell->corresponding_ids).empty() && cell->index.find(point, id)) + { + id = cell->corresponding_ids[id]; + return true; + } + if (cell->first_covered != FinalCellWithSlabs::kNone) + { + id = cell->first_covered; + return true; + } + } + return false; +} + +template +DictionaryPtr createLayout(const std::string & , + const DictionaryStructure & dict_struct, + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix, + DictionarySourcePtr source_ptr) +{ + const String database = config.getString(config_prefix + ".database", ""); + const String name = config.getString(config_prefix + ".name"); + + if (!dict_struct.key) + throw Exception{"'key' is required for a polygon dictionary", ErrorCodes::BAD_ARGUMENTS}; + if (dict_struct.key->size() != 1) + throw Exception{"The 'key' should consist of a single attribute for a polygon dictionary", + ErrorCodes::BAD_ARGUMENTS}; + + IPolygonDictionary::InputType input_type; + IPolygonDictionary::PointType point_type; + const auto key_type = (*dict_struct.key)[0].type; + const auto f64 = std::make_shared(); + const auto multi_polygon_array = DataTypeArray(std::make_shared(std::make_shared(std::make_shared(f64)))); + const auto multi_polygon_tuple = DataTypeArray(std::make_shared(std::make_shared(std::make_shared(std::vector{f64, f64})))); + const auto simple_polygon_array = DataTypeArray(std::make_shared(f64)); + const auto simple_polygon_tuple = DataTypeArray(std::make_shared(std::vector{f64, f64})); + if (key_type->equals(multi_polygon_array)) + { + input_type = IPolygonDictionary::InputType::MultiPolygon; + point_type = IPolygonDictionary::PointType::Array; + } + else if (key_type->equals(multi_polygon_tuple)) + { + input_type = IPolygonDictionary::InputType::MultiPolygon; + point_type = IPolygonDictionary::PointType::Tuple; + } + else if (key_type->equals(simple_polygon_array)) + { + input_type = IPolygonDictionary::InputType::SimplePolygon; + point_type = IPolygonDictionary::PointType::Array; + } + else if (key_type->equals(simple_polygon_tuple)) + { + input_type = IPolygonDictionary::InputType::SimplePolygon; + point_type = IPolygonDictionary::PointType::Tuple; + } + else + throw Exception{"The key type " + key_type->getName() + + " is not one of the following allowed types for a polygon dictionary: " + + multi_polygon_array.getName() + " " + + multi_polygon_tuple.getName() + " " + + simple_polygon_array.getName() + " " + + simple_polygon_tuple.getName() + " ", + ErrorCodes::BAD_ARGUMENTS}; + + if (dict_struct.range_min || dict_struct.range_max) + throw Exception{name + + ": elements range_min and range_max should be defined only " + "for a dictionary of layout 'range_hashed'", + ErrorCodes::BAD_ARGUMENTS}; + + const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"}; + + if constexpr (std::is_same_v || std::is_same_v) + { + const auto & layout_prefix = config_prefix + ".layout"; + Poco::Util::AbstractConfiguration::Keys keys; + config.keys(layout_prefix, keys); + const auto & dict_prefix = layout_prefix + "." + keys.front(); + size_t max_depth = config.getUInt(dict_prefix + ".max_depth", PolygonDictionary::kMaxDepthDefault); + size_t min_intersections = config.getUInt(dict_prefix + ".min_intersections", PolygonDictionary::kMinIntersectionsDefault); + return std::make_unique(database, name, dict_struct, std::move(source_ptr), dict_lifetime, input_type, point_type, min_intersections, max_depth); + } + else + return std::make_unique(database, name, dict_struct, std::move(source_ptr), dict_lifetime, input_type, point_type); +} + +void registerDictionaryPolygon(DictionaryFactory & factory) +{ + factory.registerLayout("polygon_simple", createLayout, true); + factory.registerLayout("polygon_index_each", createLayout, true); + factory.registerLayout("polygon_index_cell", createLayout, true); + + /// Alias to the most performant dictionary type - polygon_index_cell + factory.registerLayout("polygon", createLayout, true); +} + +} diff --git a/src/Dictionaries/PolygonDictionaryImplementations.h b/src/Dictionaries/PolygonDictionaryImplementations.h new file mode 100644 index 00000000000..285569b6829 --- /dev/null +++ b/src/Dictionaries/PolygonDictionaryImplementations.h @@ -0,0 +1,99 @@ +#pragma once + +#include "PolygonDictionary.h" +#include "PolygonDictionaryUtils.h" + +#include + +namespace DB +{ + +/** Simple implementation of the polygon dictionary. Doesn't generate anything during its construction. + * Iterates over all stored polygons for each query, checking each of them in linear time. + * Retrieves the polygon with the smallest area containing the given point. + * If there is more than one any such polygon may be returned. + */ +class PolygonDictionarySimple : public IPolygonDictionary +{ +public: + PolygonDictionarySimple( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_); + + std::shared_ptr clone() const override; + +private: + bool find(const Point & point, size_t & id) const override; +}; + +/** A polygon dictionary which generates a recursive grid in order to efficiently cut the number + * of polygons to be checked for a given point. + * For more detail see the GridRoot and FinalCell classes. + * Separately, a slab index is built for each individual polygon. This allows to check the + * candidates more efficiently. + */ +class PolygonDictionaryIndexEach : public IPolygonDictionary +{ +public: + PolygonDictionaryIndexEach( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_, + int min_intersections_, + int max_depth_); + + std::shared_ptr clone() const override; + + static constexpr size_t kMinIntersectionsDefault = 1; + static constexpr size_t kMaxDepthDefault = 5; + +private: + bool find(const Point & point, size_t & id) const override; + + std::vector buckets; + GridRoot grid; + + const size_t min_intersections; + const size_t max_depth; +}; + +/** Uses single SlabsPolygonIndex for all queries. */ +class PolygonDictionaryIndexCell : public IPolygonDictionary +{ +public: + PolygonDictionaryIndexCell( + const std::string & database_, + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + DictionaryLifetime dict_lifetime_, + InputType input_type_, + PointType point_type_, + size_t min_intersections_, + size_t max_depth_); + + std::shared_ptr clone() const override; + + static constexpr size_t kMinIntersectionsDefault = 1; + static constexpr size_t kMaxDepthDefault = 5; + +private: + bool find(const Point & point, size_t & id) const override; + + GridRoot index; + + const size_t min_intersections; + const size_t max_depth; +}; + +} + diff --git a/src/Dictionaries/PolygonDictionaryUtils.cpp b/src/Dictionaries/PolygonDictionaryUtils.cpp new file mode 100644 index 00000000000..176711d9002 --- /dev/null +++ b/src/Dictionaries/PolygonDictionaryUtils.cpp @@ -0,0 +1,320 @@ +#include "PolygonDictionaryUtils.h" + +#include + +#include + +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +FinalCell::FinalCell(const std::vector & polygon_ids_, const std::vector &, const Box &, bool is_last_covered_): +polygon_ids(polygon_ids_) +{ + if (is_last_covered_) + { + first_covered = polygon_ids.back(); + polygon_ids.pop_back(); + } +} + +const FinalCell * FinalCell::find(Coord, Coord) const +{ + return this; +} + +inline void shift(Point & point, Coord val) +{ + point.x(point.x() + val); + point.y(point.y() + val); +} + +FinalCellWithSlabs::FinalCellWithSlabs(const std::vector & polygon_ids_, const std::vector & polygons_, const Box & box_, bool is_last_covered_) +{ + auto extended = box_; + shift(extended.min_corner(), -GridRoot::kEps); + shift(extended.max_corner(), GridRoot::kEps); + Polygon tmp_poly; + bg::convert(extended, tmp_poly); + std::vector intersections; + if (is_last_covered_) + first_covered = polygon_ids_.back(); + for (size_t i = 0; i + is_last_covered_ < polygon_ids_.size(); ++i) + { + std::vector intersection; + bg::intersection(tmp_poly, polygons_[polygon_ids_[i]], intersection); + for (auto & polygon : intersection) + intersections.emplace_back(std::move(polygon)); + while (corresponding_ids.size() < intersections.size()) + corresponding_ids.push_back(polygon_ids_[i]); + } + if (!intersections.empty()) + index = SlabsPolygonIndex{intersections}; +} + +const FinalCellWithSlabs * FinalCellWithSlabs::find(Coord, Coord) const +{ + return this; +} + +SlabsPolygonIndex::SlabsPolygonIndex( + const std::vector & polygons) + : log(&Poco::Logger::get("SlabsPolygonIndex")), + sorted_x(uniqueX(polygons)) +{ + indexBuild(polygons); +} + +std::vector SlabsPolygonIndex::uniqueX(const std::vector & polygons) +{ + std::vector all_x; + for (const auto & poly : polygons) + { + for (const auto & point : poly.outer()) + all_x.push_back(point.x()); + + for (const auto & inner : poly.inners()) + for (const auto & point : inner) + all_x.push_back(point.x()); + } + + /** Making all_x sorted and distinct */ + std::sort(all_x.begin(), all_x.end()); + all_x.erase(std::unique(all_x.begin(), all_x.end()), all_x.end()); + + LOG_TRACE(log, "Found {} unique x coordinates", all_x.size()); + return all_x; +} + +void SlabsPolygonIndex::indexBuild(const std::vector & polygons) +{ + for (size_t i = 0; i < polygons.size(); ++i) + { + indexAddRing(polygons[i].outer(), i); + + for (const auto & inner : polygons[i].inners()) + indexAddRing(inner, i); + } + + /** Sorting edges of (left_point, right_point, polygon_id) in that order */ + std::sort(all_edges.begin(), all_edges.end(), Edge::compareByLeftPoint); + for (size_t i = 0; i != all_edges.size(); ++i) + all_edges[i].edge_id = i; + + /** Total number of edges */ + size_t m = all_edges.size(); + + LOG_TRACE(log, "Just sorted {} edges from all {} polygons", all_edges.size(), polygons.size()); + + /** Using custom comparator for fetching edges in right_point order, like in scanline */ + auto cmp = [](const Edge & a, const Edge & b) + { + return Edge::compareByRightPoint(a, b); + }; + std::set interesting_edges(cmp); + + /** Size of index (number of different x coordinates) */ + size_t n = 0; + if (!sorted_x.empty()) + { + n = sorted_x.size() - 1; + } + edges_index_tree.resize(2 * n); + + /** Map of interesting edge ids to the index of left x, the index of right x */ + std::vector edge_left(m, n), edge_right(m, n); + + size_t total_index_edges = 0; + size_t edges_it = 0; + for (size_t l = 0, r = 1; r < sorted_x.size(); ++l, ++r) + { + const Coord lx = sorted_x[l]; + const Coord rx = sorted_x[r]; + + /** Removing edges where right_point.x <= lx */ + while (!interesting_edges.empty() && interesting_edges.begin()->r.x() <= lx) + { + edge_right[interesting_edges.begin()->edge_id] = l; + interesting_edges.erase(interesting_edges.begin()); + } + + /** Adding edges where left_point.x < rx */ + for (; edges_it < all_edges.size() && all_edges[edges_it].l.x() < rx; ++edges_it) + { + interesting_edges.insert(all_edges[edges_it]); + edge_left[all_edges[edges_it].edge_id] = l; + } + } + + for (size_t i = 0; i != all_edges.size(); i++) + { + size_t l = edge_left[i]; + size_t r = edge_right[i]; + if (l == n || sorted_x[l] != all_edges[i].l.x() || sorted_x[r] != all_edges[i].r.x()) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Error occured while building polygon index. Edge {} is [{}, {}] but found [{}, {}]. l = {}, r = {}", + i, all_edges[i].l.x(), all_edges[i].r.x(), sorted_x[l], sorted_x[r], l, r); + } + + /** Adding [l, r) to the segment tree */ + for (l += n, r += n; l < r; l >>= 1, r >>= 1) + { + if (l & 1) + { + edges_index_tree[l++].emplace_back(all_edges[i]); + ++total_index_edges; + } + if (r & 1) + { + edges_index_tree[--r].emplace_back(all_edges[i]); + ++total_index_edges; + } + } + } + + LOG_TRACE(log, "Polygon index is built, total_index_edges = {}", total_index_edges); +} + +void SlabsPolygonIndex::indexAddRing(const Ring & ring, size_t polygon_id) +{ + for (size_t i = 0, prev = ring.size() - 1; i < ring.size(); prev = i, ++i) + { + Point a = ring[prev]; + Point b = ring[i]; + + /** Making a.x <= b.x */ + if (a.x() > b.x()) + std::swap(a, b); + + if (a.x() == b.x() && a.y() > b.y()) + std::swap(a, b); + + if (a.x() == b.x()) + { + /** Vertical edge found, skipping for now */ + continue; + } + + all_edges.emplace_back(a, b, polygon_id, 0); + } +} + +SlabsPolygonIndex::Edge::Edge( + const Point & l_, + const Point & r_, + size_t polygon_id_, + size_t edge_id_) + : l(l_), + r(r_), + polygon_id(polygon_id_), + edge_id(edge_id_) +{ + /** Calculating arguments of line equation. + * Original equation of this edge is: + * f(x) = l.y() + (r.y() - l.y()) / (r.x() - l.x()) * (x - l.x()) + */ + k = (r.y() - l.y()) / (r.x() - l.x()); + b = l.y() - k * l.x(); +} + +bool SlabsPolygonIndex::Edge::compareByLeftPoint(const Edge & a, const Edge & b) +{ + /** Comparing left point */ + if (a.l.x() != b.l.x()) + return a.l.x() < b.l.x(); + if (a.l.y() != b.l.y()) + return a.l.y() < b.l.y(); + + /** Comparing right point */ + if (a.r.x() != b.r.x()) + return a.r.x() < b.r.x(); + if (a.r.y() != b.r.y()) + return a.r.y() < b.r.y(); + + return a.polygon_id < b.polygon_id; +} + +bool SlabsPolygonIndex::Edge::compareByRightPoint(const Edge & a, const Edge & b) +{ + /** Comparing right point */ + if (a.r.x() != b.r.x()) + return a.r.x() < b.r.x(); + if (a.r.y() != b.r.y()) + return a.r.y() < b.r.y(); + + /** Comparing left point */ + if (a.l.x() != b.l.x()) + return a.l.x() < b.l.x(); + if (a.l.y() != b.l.y()) + return a.l.y() < b.l.y(); + + if (a.polygon_id != b.polygon_id) + return a.polygon_id < b.polygon_id; + + return a.edge_id < b.edge_id; +} + +bool SlabsPolygonIndex::find(const Point & point, size_t & id) const +{ + /** Vertical line or nothing at all, no match here */ + if (sorted_x.size() < 2) + return false; + + Coord x = point.x(); + Coord y = point.y(); + + /** Not in bounding box */ + if (x < sorted_x[0] || x > sorted_x.back()) + return false; + + bool found = false; + + /** Point is considired inside when ray down from point crosses odd number of edges. + * This vector will contain polygon ids of all crosses. Smallest id with odd number of + * occurrences is the answer. + */ + std::vector intersections; + intersections.reserve(10); + + /** Find position of the slab with binary search by sorted_x */ + size_t pos = std::upper_bound(sorted_x.begin() + 1, sorted_x.end() - 1, x) - sorted_x.begin() - 1; + + /** Jump to the leaf in segment tree */ + pos += edges_index_tree.size() / 2; + do + { + /** Iterating over interesting edges */ + for (const auto & edge : edges_index_tree[pos]) + { + /** Check if point lies above the edge */ + if (x * edge.k + edge.b <= y) + intersections.emplace_back(edge.polygon_id); + } + pos >>= 1; + } while (pos != 0); + + /** Sort all ids and find smallest with odd occurrences */ + std::sort(intersections.begin(), intersections.end()); + for (size_t i = 0; i < intersections.size(); i += 2) + { + if (i + 1 == intersections.size() || intersections[i] != intersections[i + 1]) + { + found = true; + id = intersections[i]; + break; + } + } + + return found; +} + +} diff --git a/src/Dictionaries/PolygonDictionaryUtils.h b/src/Dictionaries/PolygonDictionaryUtils.h new file mode 100644 index 00000000000..83ce2c26944 --- /dev/null +++ b/src/Dictionaries/PolygonDictionaryUtils.h @@ -0,0 +1,291 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +#include "PolygonDictionary.h" + +#include + +namespace DB +{ + +namespace bg = boost::geometry; + +using Coord = IPolygonDictionary::Coord; +using Point = IPolygonDictionary::Point; +using Polygon = IPolygonDictionary::Polygon; +using Ring = IPolygonDictionary::Ring; +using Box = bg::model::box; + +/** SlabsPolygonIndex builds index based on shooting ray down from point. + * When this ray crosses odd number of edges in single polygon, point is considered inside. + * + * SlabsPolygonIndex divides plane into vertical slabs, separated by vertical lines going through all points. + * For each slab, all edges falling in that slab are effectively stored. + * For each find query, required slab is found with binary search, and result is computed + * by iterating over all edges in that slab. + */ +class SlabsPolygonIndex +{ +public: + SlabsPolygonIndex() = default; + + /** Builds an index by splitting all edges with all points x coordinates. */ + SlabsPolygonIndex(const std::vector & polygons); + + /** Finds polygon id the same way as IPolygonIndex. */ + bool find(const Point & point, size_t & id) const; + + /** Edge describes edge (adjacent points) of any polygon, and contains polygon's id. + * Invariant here is first point has x not greater than second point. + */ + struct Edge + { + Point l; + Point r; + size_t polygon_id; + size_t edge_id; + + Coord k; + Coord b; + + Edge(const Point & l, const Point & r, size_t polygon_id, size_t edge_id); + + static bool compareByLeftPoint(const Edge & a, const Edge & b); + static bool compareByRightPoint(const Edge & a, const Edge & b); + }; + + /** EdgeLine is optimized version of Edge. */ + struct EdgeLine + { + explicit EdgeLine(const Edge & e): k(e.k), b(e.b), polygon_id(e.polygon_id) {} + Coord k; + Coord b; + size_t polygon_id; + }; + +private: + /** Returns unique x coordinates among all points */ + std::vector uniqueX(const std::vector & polygons); + + /** Builds index described above */ + void indexBuild(const std::vector & polygons); + + /** Auxiliary function for adding ring to the index */ + void indexAddRing(const Ring & ring, size_t polygon_id); + + Poco::Logger * log; + + /** Sorted distinct coordinates of all vertexes */ + std::vector sorted_x; + std::vector all_edges; + + /** This edges_index_tree stores all slabs with edges efficiently, using segment tree algorithm. + * edges_index_tree[i] node combines segments from edges_index_tree[i*2] and edges_index_tree[i*2+1]. + * Every polygon's edge covers a segment of x coordinates, and can be added to this tree by + * placing it into O(log n) nodes of this tree. + */ + std::vector> edges_index_tree; +}; + +template +class ICell +{ +public: + virtual ~ICell() = default; + [[nodiscard]] virtual const ReturnCell * find(Coord x, Coord y) const = 0; +}; + +/** This leaf cell implementation simply stores the indexes of the intersections. + * As an additional optimization, if a polygon covers the cell completely its index is stored in + * the first_covered field and all following polygon indexes are discarded, + * since they won't ever be useful. + */ +class FinalCell : public ICell +{ +public: + explicit FinalCell(const std::vector & polygon_ids_, const std::vector &, const Box &, bool is_last_covered_); + std::vector polygon_ids; + size_t first_covered = kNone; + + static constexpr size_t kNone = -1; + +private: + [[nodiscard]] const FinalCell * find(Coord x, Coord y) const override; +}; + +/** This leaf cell implementation intersects the given polygons with the cell's box and builds a + * slab index for the result. + * Since the intersections can produce multiple polygons a vector of corresponding ids is stored. + * If the slab index returned the id x for a query the correct polygon id is corresponding_ids[x]. + * As an additional optimization, if a polygon covers the cell completely its index stored in the + * first_covered field and all following polygons are not used for building the slab index. + */ +class FinalCellWithSlabs : public ICell +{ +public: + explicit FinalCellWithSlabs(const std::vector & polygon_ids_, const std::vector & polygons_, const Box & box_, bool is_last_covered_); + + SlabsPolygonIndex index; + std::vector corresponding_ids; + size_t first_covered = kNone; + + static constexpr size_t kNone = -1; + +private: + [[nodiscard]] const FinalCellWithSlabs * find(Coord x, Coord y) const override; +}; + +template +class DividedCell : public ICell +{ +public: + explicit DividedCell(std::vector>> children_): children(std::move(children_)) {} + + [[nodiscard]] const ReturnCell * find(Coord x, Coord y) const override + { + auto x_ratio = x * kSplit; + auto y_ratio = y * kSplit; + auto x_bin = static_cast(x_ratio); + auto y_bin = static_cast(y_ratio); + return children[y_bin + x_bin * kSplit]->find(x_ratio - x_bin, y_ratio - y_bin); + } + + /** When a cell is split every side is split into kSplit pieces producing kSplit * kSplit equal smaller cells. */ + static constexpr size_t kSplit = 4; + +private: + std::vector>> children; +}; + +/** A recursively built grid containing information about polygons intersecting each cell. + * The starting cell is the bounding box of the given polygons which are passed by reference. + * For every cell a vector of indices of intersecting polygons is calculated, in the order originally provided upon + * construction. A cell is recursively split into kSplit * kSplit equal cells up to the point where the cell + * intersects a small enough number of polygons or the maximum allowed depth is exceeded. + * Both of these parameters are set in the constructor. + * Once these conditions are fulfilled some index is built and stored in the leaf cells. + * The ReturnCell class passed in the template parameter is responsible for this. + */ +template +class GridRoot : public ICell +{ +public: + GridRoot(size_t min_intersections_, size_t max_depth_, const std::vector & polygons_): + kMinIntersections(min_intersections_), kMaxDepth(max_depth_), polygons(polygons_) + { + setBoundingBox(); + std::vector order(polygons.size()); + std::iota(order.begin(), order.end(), 0); + root = makeCell(min_x, min_y, max_x, max_y, order); + } + + /** Retrieves the cell containing a given point. + * A null pointer is returned when the point falls outside the grid. + */ + [[nodiscard]] const ReturnCell * find(Coord x, Coord y) const override + { + if (x < min_x || x >= max_x) + return nullptr; + if (y < min_y || y >= max_y) + return nullptr; + return root->find((x - min_x) / (max_x - min_x), (y - min_y) / (max_y - min_y)); + } + + /** Until this depth is reached each row of cells is calculated concurrently in a new thread. */ + static constexpr size_t kMultiProcessingDepth = 2; + + /** A constant used to avoid errors with points falling on the boundaries of cells. */ + static constexpr Coord kEps = 1e-4; + +private: + std::unique_ptr> root = nullptr; + Coord min_x = 0, min_y = 0; + Coord max_x = 0, max_y = 0; + const size_t kMinIntersections; + const size_t kMaxDepth; + + const std::vector & polygons; + + std::unique_ptr> makeCell(Coord current_min_x, Coord current_min_y, Coord current_max_x, Coord current_max_y, std::vector possible_ids, size_t depth = 0) + { + auto current_box = Box(Point(current_min_x, current_min_y), Point(current_max_x, current_max_y)); + Polygon tmp_poly; + bg::convert(current_box, tmp_poly); + possible_ids.erase(std::remove_if(possible_ids.begin(), possible_ids.end(), [&](const auto id) + { + return !bg::intersects(current_box, polygons[id]); + }), possible_ids.end()); + int covered = 0; +#ifndef __clang_analyzer__ /// Triggers a warning in boost geometry. + auto it = std::find_if(possible_ids.begin(), possible_ids.end(), [&](const auto id) + { + return bg::covered_by(tmp_poly, polygons[id]); + }); + if (it != possible_ids.end()) + { + possible_ids.erase(it + 1, possible_ids.end()); + covered = 1; + } +#endif + size_t intersections = possible_ids.size() - covered; + if (intersections <= kMinIntersections || depth++ == kMaxDepth) + return std::make_unique(possible_ids, polygons, current_box, covered); + auto x_shift = (current_max_x - current_min_x) / DividedCell::kSplit; + auto y_shift = (current_max_y - current_min_y) / DividedCell::kSplit; + std::vector>> children; + children.resize(DividedCell::kSplit * DividedCell::kSplit); + std::vector threads{}; + for (size_t i = 0; i < DividedCell::kSplit; current_min_x += x_shift, ++i) + { + auto handle_row = [this, &children, &y_shift, &x_shift, &possible_ids, &depth, i](Coord x, Coord y) + { + for (size_t j = 0; j < DividedCell::kSplit; y += y_shift, ++j) + { + children[i * DividedCell::kSplit + j] = makeCell(x, y, x + x_shift, y + y_shift, possible_ids, depth); + } + }; + if (depth <= kMultiProcessingDepth) + threads.emplace_back(handle_row, current_min_x, current_min_y); + else + handle_row(current_min_x, current_min_y); + } + for (auto & thread : threads) + thread.join(); + return std::make_unique>(std::move(children)); + } + + void setBoundingBox() + { + bool first = true; + std::for_each(polygons.begin(), polygons.end(), [&](const auto & polygon) + { + bg::for_each_point(polygon, [&](const Point & point) + { + auto x = point.x(); + auto y = point.y(); + if (first || x < min_x) + min_x = x; + if (first || x > max_x) + max_x = x; + if (first || y < min_y) + min_y = y; + if (first || y > max_y) + max_y = y; + if (first) + first = false; + }); + }); + max_x += kEps; + max_y += kEps; + } +}; + +} diff --git a/src/Dictionaries/ya.make b/src/Dictionaries/ya.make index 3aaecb0b97e..89cdcb1f79a 100644 --- a/src/Dictionaries/ya.make +++ b/src/Dictionaries/ya.make @@ -56,6 +56,8 @@ SRCS( MongoDBDictionarySource.cpp MySQLDictionarySource.cpp PolygonDictionary.cpp + PolygonDictionaryUtils.cpp + PolygonDictionaryImplementations.cpp RangeHashedDictionary.cpp readInvalidateQuery.cpp RedisBlockInputStream.cpp @@ -65,7 +67,6 @@ SRCS( SSDComplexKeyCacheDictionary.cpp writeParenthesisedString.cpp XDBCDictionarySource.cpp - ) END() diff --git a/src/Functions/CMakeLists.txt b/src/Functions/CMakeLists.txt index 993376b1e50..a37fb386d4c 100644 --- a/src/Functions/CMakeLists.txt +++ b/src/Functions/CMakeLists.txt @@ -112,4 +112,6 @@ target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_url) add_subdirectory(array) target_link_libraries(clickhouse_functions PRIVATE clickhouse_functions_array) -target_link_libraries(clickhouse_functions PRIVATE stats) +if (USE_STATS) + target_link_libraries(clickhouse_functions PRIVATE stats) +endif() diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 2327bce5e45..c2a7f3f3cd2 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1931,7 +1931,7 @@ private: } template - WrapperType createEnumWrapper(const DataTypePtr & from_type, const DataTypeEnum * to_type) const + WrapperType createEnumWrapper(const DataTypePtr & from_type, const DataTypeEnum * to_type, bool source_is_nullable) const { using EnumType = DataTypeEnum; using Function = typename FunctionTo::Type; @@ -1942,9 +1942,9 @@ private: checkEnumToEnumConversion(from_enum16, to_type); if (checkAndGetDataType(from_type.get())) - return createStringToEnumWrapper(); + return createStringToEnumWrapper(source_is_nullable); else if (checkAndGetDataType(from_type.get())) - return createStringToEnumWrapper(); + return createStringToEnumWrapper(source_is_nullable); else if (isNativeNumber(from_type) || isEnum(from_type)) { auto function = Function::create(); @@ -1987,17 +1987,32 @@ private: } template - WrapperType createStringToEnumWrapper() const + WrapperType createStringToEnumWrapper(bool source_is_nullable) const { const char * function_name = name; - return [function_name] (Block & block, const ColumnNumbers & arguments, const size_t result, size_t /*input_rows_count*/) + return [function_name, source_is_nullable] (Block & block, const ColumnNumbers & arguments, const size_t result, size_t /*input_rows_count*/) { const auto first_col = block.getByPosition(arguments.front()).column.get(); auto & col_with_type_and_name = block.getByPosition(result); const auto & result_type = typeid_cast(*col_with_type_and_name.type); - if (const auto col = typeid_cast(first_col)) + const ColumnStringType * col = typeid_cast(first_col); + const ColumnNullable * nullable_col = nullptr; + if (source_is_nullable) + { + if (block.columns() <= arguments.front() + 1) + throw Exception("Not enough columns", ErrorCodes::LOGICAL_ERROR); + + size_t nullable_pos = block.columns() - 1; + nullable_col = typeid_cast(block.getByPosition(nullable_pos).column.get()); + if (!nullable_col) + throw Exception("Last column should be ColumnNullable", ErrorCodes::LOGICAL_ERROR); + if (col && nullable_col->size() != col->size()) + throw Exception("ColumnNullable is not compatible with original", ErrorCodes::LOGICAL_ERROR); + } + + if (col) { const auto size = col->size(); @@ -2005,8 +2020,19 @@ private: auto & out_data = static_cast(*res).getData(); out_data.resize(size); - for (const auto i : ext::range(0, size)) - out_data[i] = result_type.getValue(col->getDataAt(i)); + if (nullable_col) + { + for (const auto i : ext::range(0, size)) + { + if (!nullable_col->isNullAt(i)) + out_data[i] = result_type.getValue(col->getDataAt(i)); + } + } + else + { + for (const auto i : ext::range(0, size)) + out_data[i] = result_type.getValue(col->getDataAt(i)); + } col_with_type_and_name.column = std::move(res); } @@ -2141,7 +2167,7 @@ private: bool source_is_nullable = from_type->isNullable(); bool result_is_nullable = to_type->isNullable(); - auto wrapper = prepareImpl(removeNullable(from_type), removeNullable(to_type), result_is_nullable); + auto wrapper = prepareImpl(removeNullable(from_type), removeNullable(to_type), result_is_nullable, source_is_nullable); if (result_is_nullable) { @@ -2162,6 +2188,13 @@ private: size_t tmp_res_index = block.columns(); tmp_block.insert({nullptr, nested_type, ""}); + /// Add original ColumnNullable for createStringToEnumWrapper() + if (source_is_nullable) + { + if (arguments.size() != 1) + throw Exception("Invalid number of arguments", ErrorCodes::LOGICAL_ERROR); + tmp_block.insert(block.getByPosition(arguments.front())); + } /// Perform the requested conversion. wrapper(tmp_block, arguments, tmp_res_index, input_rows_count); @@ -2208,7 +2241,7 @@ private: /// 'from_type' and 'to_type' are nested types in case of Nullable. /// 'requested_result_is_nullable' is true if CAST to Nullable type is requested. - WrapperType prepareImpl(const DataTypePtr & from_type, const DataTypePtr & to_type, bool requested_result_is_nullable) const + WrapperType prepareImpl(const DataTypePtr & from_type, const DataTypePtr & to_type, bool requested_result_is_nullable, bool source_is_nullable) const { if (from_type->equals(*to_type)) return createIdentityWrapper(from_type); @@ -2243,7 +2276,7 @@ private: std::is_same_v || std::is_same_v) { - ret = createEnumWrapper(from_type, checkAndGetDataType(to_type.get())); + ret = createEnumWrapper(from_type, checkAndGetDataType(to_type.get()), source_is_nullable); return true; } if constexpr ( diff --git a/src/Functions/FunctionsExternalDictionaries.h b/src/Functions/FunctionsExternalDictionaries.h index c7194335ff3..e2cdc6d5e57 100644 --- a/src/Functions/FunctionsExternalDictionaries.h +++ b/src/Functions/FunctionsExternalDictionaries.h @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include @@ -194,7 +194,9 @@ private: #if !defined(ARCADIA_BUILD) !executeDispatchComplex(block, arguments, result, dict) && #endif - !executeDispatchComplex(block, arguments, result, dict)) + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict)) throw Exception{"Unsupported dictionary type " + dict->getTypeName(), ErrorCodes::UNKNOWN_TYPE}; } @@ -350,7 +352,9 @@ private: #if !defined(ARCADIA_BUILD) !executeDispatchComplex(block, arguments, result, dict) && #endif - !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchRange(block, arguments, result, dict)) throw Exception{"Unsupported dictionary type " + dict->getTypeName(), ErrorCodes::UNKNOWN_TYPE}; } @@ -534,7 +538,9 @@ private: #if !defined(ARCADIA_BUILD) !executeDispatchComplex(block, arguments, result, dict) && #endif - !executeDispatchComplex(block, arguments, result, dict)) + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict)) throw Exception{"Unsupported dictionary type " + dict->getTypeName(), ErrorCodes::UNKNOWN_TYPE}; } @@ -874,7 +880,9 @@ private: #if !defined(ARCADIA_BUILD) !executeDispatchComplex(block, arguments, result, dict) && #endif - !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && !executeDispatchRange(block, arguments, result, dict)) throw Exception{"Unsupported dictionary type " + dict->getTypeName(), ErrorCodes::UNKNOWN_TYPE}; } @@ -1135,7 +1143,9 @@ private: #if !defined(ARCADIA_BUILD) !executeDispatchComplex(block, arguments, result, dict) && #endif - !executeDispatchComplex(block, arguments, result, dict)) + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict) && + !executeDispatchComplex(block, arguments, result, dict)) throw Exception{"Unsupported dictionary type " + dict->getTypeName(), ErrorCodes::UNKNOWN_TYPE}; } diff --git a/src/Functions/abtesting.cpp b/src/Functions/abtesting.cpp index 0152e403fb8..040630b36ac 100644 --- a/src/Functions/abtesting.cpp +++ b/src/Functions/abtesting.cpp @@ -1,4 +1,6 @@ -#if !defined(ARCADIA_BUILD) +#include + +#if !defined(ARCADIA_BUILD) && USE_STATS #include #include @@ -9,7 +11,6 @@ #include #include #include -#include #include #include @@ -304,4 +305,17 @@ void registerFunctionBayesAB(FunctionFactory & factory) } +#else + +namespace DB +{ + +class FunctionFactory; + +void registerFunctionBayesAB(FunctionFactory & /* factory */) +{ +} + +} + #endif diff --git a/src/Functions/abtesting.h b/src/Functions/abtesting.h index c39cbb71d7b..1e9b9747505 100644 --- a/src/Functions/abtesting.h +++ b/src/Functions/abtesting.h @@ -1,6 +1,10 @@ -#if !defined(ARCADIA_BUILD) #pragma once +#include + +#if !defined(ARCADIA_BUILD) && USE_STATS + + #include #include #include @@ -28,4 +32,5 @@ Variants bayesian_ab_test(String distribution, PODArray & xs, PODArray< String convertToJson(const PODArray & variant_names, const Variants & variants); } + #endif diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp new file mode 100644 index 00000000000..0ab17d39de7 --- /dev/null +++ b/src/Functions/array/mapOp.cpp @@ -0,0 +1,331 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "Core/ColumnWithTypeAndName.h" + +namespace DB +{ +namespace ErrorCodes +{ + extern const int ILLEGAL_COLUMN; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +namespace +{ + +struct TupArg +{ + const IColumn & key_column; + const IColumn & val_column; + const IColumn::Offsets & key_offsets; + const IColumn::Offsets & val_offsets; + bool is_const; +}; +using TupleMaps = std::vector; + +namespace OpTypes +{ + extern const int ADD = 0; + extern const int SUBTRACT = 1; +} + +template +class FunctionMapOp : public IFunction +{ +public: + static constexpr auto name = (op_type == OpTypes::ADD) ? "mapAdd" : "mapSubtract"; + static FunctionPtr create(const Context &) { return std::make_shared(); } + +private: + String getName() const override { return name; } + + size_t getNumberOfArguments() const override { return 0; } + bool isVariadic() const override { return true; } + bool useDefaultImplementationForConstants() const override { return true; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + bool is_float = false; + DataTypePtr key_type, val_type, res; + + if (arguments.size() < 2) + throw Exception{getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + + for (const auto & tup_arg : arguments) + { + const DataTypeTuple * tup = checkAndGetDataType(tup_arg.get()); + if (!tup) + throw Exception{getName() + " accepts at least two map tuples", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH}; + + auto elems = tup->getElements(); + if (elems.size() != 2) + throw Exception( + "Each tuple in " + getName() + " arguments should consist of two arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + const DataTypeArray * k = checkAndGetDataType(elems[0].get()); + const DataTypeArray * v = checkAndGetDataType(elems[1].get()); + + if (!k || !v) + throw Exception( + "Each tuple in " + getName() + " arguments should consist of two arrays", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + auto result_type = v->getNestedType(); + if (!result_type->canBePromoted()) + throw Exception{"Values to be summed are expected to be Numeric, Float or Decimal.", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT}; + + WhichDataType which_val(result_type); + + auto promoted_type = result_type->promoteNumericType(); + if (!key_type) + { + key_type = k->getNestedType(); + val_type = promoted_type; + is_float = which_val.isFloat(); + } + else + { + if (!(k->getNestedType()->equals(*key_type))) + throw Exception( + "All key types in " + getName() + " should be same: " + key_type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + if (is_float != which_val.isFloat()) + throw Exception( + "All value types in " + getName() + " should be or float or integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + if (!(promoted_type->equals(*val_type))) + { + throw Exception( + "All value types in " + getName() + " should be promotable to " + val_type->getName() + ", got " + + promoted_type->getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + } + + if (!res) + { + res = std::make_shared( + DataTypes{std::make_shared(k->getNestedType()), std::make_shared(promoted_type)}); + } + } + + return res; + } + + template + void execute2(Block & block, const size_t result, size_t row_count, TupleMaps & args, const DataTypeTuple & res_type) const + { + MutableColumnPtr res_tuple = res_type.createColumn(); + + auto * to_tuple = assert_cast(res_tuple.get()); + auto & to_keys_arr = assert_cast(to_tuple->getColumn(0)); + auto & to_keys_data = to_keys_arr.getData(); + auto & to_keys_offset = to_keys_arr.getOffsets(); + + auto & to_vals_arr = assert_cast(to_tuple->getColumn(1)); + auto & to_vals_data = to_vals_arr.getData(); + + size_t res_offset = 0; + std::map summing_map; + + for (size_t i = 0; i < row_count; i++) + { + [[maybe_unused]] bool first = true; + for (auto & arg : args) + { + size_t offset = 0, len = arg.key_offsets[0]; + + if (!arg.is_const) + { + offset = i > 0 ? arg.key_offsets[i - 1] : 0; + len = arg.key_offsets[i] - offset; + + if (arg.val_offsets[i] != arg.key_offsets[i]) + throw Exception( + "Key and value array should have same amount of elements", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + + for (size_t j = 0; j < len; j++) + { + KeyType key; + if constexpr (is_str_key) + { + // have to use Field structs to get strings + key = arg.key_column.operator[](offset + j).get(); + } + else + { + key = assert_cast &>(arg.key_column).getData()[offset + j]; + } + + auto value = arg.val_column.operator[](offset + j).get(); + + if constexpr (op_type == OpTypes::ADD) + { + const auto [it, inserted] = summing_map.insert({key, value}); + if (!inserted) + it->second += value; + } + else + { + static_assert(op_type == OpTypes::SUBTRACT); + const auto [it, inserted] = summing_map.insert({key, first ? value : -value}); + if (!inserted) + it->second -= value; + } + } + + first = false; + } + + for (const auto & elem : summing_map) + { + res_offset++; + to_keys_data.insert(elem.first); + to_vals_data.insert(elem.second); + } + to_keys_offset.push_back(res_offset); + summing_map.clear(); + } + + // same offsets as in keys + to_vals_arr.getOffsets().insert(to_keys_offset.begin(), to_keys_offset.end()); + + block.getByPosition(result).column = std::move(res_tuple); + } + + template + void execute1(Block & block, const size_t result, size_t row_count, const DataTypeTuple & res_type, TupleMaps & args) const + { + const auto & promoted_type = (assert_cast(res_type.getElements()[1].get()))->getNestedType(); +#define MATCH_EXECUTE(is_str) \ + switch (promoted_type->getTypeId()) { \ + case TypeIndex::Int64: execute2(block, result, row_count, args, res_type); break; \ + case TypeIndex::UInt64: execute2(block, result, row_count, args, res_type); break; \ + case TypeIndex::Float64: execute2(block, result, row_count, args, res_type); break; \ + default: \ + throw Exception{"Illegal columns in arguments of function " + getName(), ErrorCodes::ILLEGAL_COLUMN}; \ + } + + if constexpr (is_str_key) + { + MATCH_EXECUTE(true) + } + else + { + MATCH_EXECUTE(false) + } +#undef MATCH_EXECUTE + } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t) const override + { + const DataTypeTuple * tup_type = checkAndGetDataType((block.safeGetByPosition(arguments[0])).type.get()); + const DataTypeArray * key_array_type = checkAndGetDataType(tup_type->getElements()[0].get()); + const DataTypeArray * val_array_type = checkAndGetDataType(tup_type->getElements()[1].get()); + + /* determine output type */ + const DataTypeTuple & res_type + = DataTypeTuple(DataTypes{std::make_shared(key_array_type->getNestedType()), + std::make_shared(val_array_type->getNestedType()->promoteNumericType())}); + + TupleMaps args{}; + args.reserve(arguments.size()); + + //prepare columns, extract data columns for direct access and put them to the vector + for (auto arg : arguments) + { + auto & col = block.getByPosition(arg); + const ColumnTuple * tup; + bool is_const = isColumnConst(*col.column); + if (is_const) + { + const auto * c = assert_cast(col.column.get()); + tup = assert_cast(c->getDataColumnPtr().get()); + } + else + tup = assert_cast(col.column.get()); + + const auto & arr1 = assert_cast(tup->getColumn(0)); + const auto & arr2 = assert_cast(tup->getColumn(1)); + + const auto & key_offsets = arr1.getOffsets(); + const auto & key_column = arr1.getData(); + + const auto & val_offsets = arr2.getOffsets(); + const auto & val_column = arr2.getData(); + + // we can check const columns before any processing + if (is_const) + { + if (val_offsets[0] != key_offsets[0]) + throw Exception( + "Key and value array should have same amount of elements", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + + args.push_back({key_column, val_column, key_offsets, val_offsets, is_const}); + } + + size_t row_count = block.getByPosition(arguments[0]).column->size(); + auto key_type_id = key_array_type->getNestedType()->getTypeId(); + + switch (key_type_id) + { + case TypeIndex::Enum8: + case TypeIndex::Int8: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::Enum16: + case TypeIndex::Int16: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::Int32: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::Int64: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::UInt8: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::Date: + case TypeIndex::UInt16: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::DateTime: + case TypeIndex::UInt32: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::UInt64: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::UUID: + execute1(block, result, row_count, res_type, args); + break; + case TypeIndex::FixedString: + case TypeIndex::String: + execute1(block, result, row_count, res_type, args); + break; + default: + throw Exception{"Illegal columns in arguments of function " + getName(), ErrorCodes::ILLEGAL_COLUMN}; + } + } +}; + +} + +void registerFunctionMapOp(FunctionFactory & factory) +{ + factory.registerFunction>(); + factory.registerFunction>(); +} + +} diff --git a/src/Functions/array/registerFunctionsArray.cpp b/src/Functions/array/registerFunctionsArray.cpp index a4e74dcf5eb..d10b65f77fd 100644 --- a/src/Functions/array/registerFunctionsArray.cpp +++ b/src/Functions/array/registerFunctionsArray.cpp @@ -35,6 +35,7 @@ void registerFunctionArrayWithConstant(FunctionFactory &); void registerFunctionArrayZip(FunctionFactory &); void registerFunctionArrayAUC(FunctionFactory &); void registerFunctionArrayReduceInRanges(FunctionFactory &); +void registerFunctionMapOp(FunctionFactory &); void registerFunctionsArray(FunctionFactory & factory) { @@ -71,6 +72,7 @@ void registerFunctionsArray(FunctionFactory & factory) registerFunctionArrayWithConstant(factory); registerFunctionArrayZip(factory); registerFunctionArrayAUC(factory); + registerFunctionMapOp(factory); } } diff --git a/src/Functions/tests/abtesting.cpp b/src/Functions/tests/abtesting.cpp index b9c65fde728..3ac57042db4 100644 --- a/src/Functions/tests/abtesting.cpp +++ b/src/Functions/tests/abtesting.cpp @@ -63,7 +63,7 @@ Variants test_bayesab(std::string dist, PODArray xs, PODArray int main(int, char **) { - size_t max, min; + size_t max = 0, min = 0; auto variants = test_bayesab("beta", {10000, 1000, 900}, {600, 110, 90}, max, min); if (max != 1) exit(1); diff --git a/src/Functions/ya.make b/src/Functions/ya.make index 72ac36f880a..54f5ff24990 100644 --- a/src/Functions/ya.make +++ b/src/Functions/ya.make @@ -96,6 +96,7 @@ SRCS( array/hasSubstr.cpp array/indexOf.cpp array/length.cpp + array/mapOp.cpp array/range.cpp array/registerFunctionsArray.cpp asin.cpp diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 32e3000a65d..0e1d0c51704 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -1309,7 +1309,7 @@ void ExpressionActionsChain::finalize() } } -std::string ExpressionActionsChain::dumpChain() +std::string ExpressionActionsChain::dumpChain() const { std::stringstream ss; diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index 1aae3f5e021..49da9a5f810 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -347,7 +347,7 @@ struct ExpressionActionsChain return steps.back(); } - std::string dumpChain(); + std::string dumpChain() const; }; } diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index cd35e619d37..2f9e93d0eee 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -34,6 +34,7 @@ InterpreterAlterQuery::InterpreterAlterQuery(const ASTPtr & query_ptr_, const Co BlockIO InterpreterAlterQuery::execute() { + BlockIO res; const auto & alter = query_ptr->as(); if (!alter.cluster.empty()) @@ -86,7 +87,9 @@ BlockIO InterpreterAlterQuery::execute() if (!partition_commands.empty()) { table->checkAlterPartitionIsPossible(partition_commands, metadata_snapshot, context.getSettingsRef()); - table->alterPartition(query_ptr, metadata_snapshot, partition_commands, context); + auto partition_commands_pipes = table->alterPartition(query_ptr, metadata_snapshot, partition_commands, context); + if (!partition_commands_pipes.empty()) + res.pipeline.init(std::move(partition_commands_pipes)); } if (!live_view_commands.empty()) @@ -113,7 +116,7 @@ BlockIO InterpreterAlterQuery::execute() table->alter(alter_commands, context, alter_lock); } - return {}; + return res; } diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 94740ae0bd4..759543950de 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -764,4 +764,23 @@ std::optional MutationsInterpreter::getStorageSortDescriptionIf return sort_description; } +bool MutationsInterpreter::Stage::isAffectingAllColumns(const Names & storage_columns) const +{ + /// is subset + for (const auto & storage_column : storage_columns) + if (!output_columns.count(storage_column)) + return false; + + return true; +} + +bool MutationsInterpreter::isAffectingAllColumns() const +{ + auto storage_columns = metadata_snapshot->getColumns().getNamesOfPhysical(); + if (stages.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutation interpreter has no stages"); + + return stages.back().isAffectingAllColumns(storage_columns); +} + } diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 894d135a099..c9130ad6613 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -42,6 +42,9 @@ public: /// Only changed columns. const Block & getUpdatedHeader() const; + /// Latest mutation stage affects all columns in storage + bool isAffectingAllColumns() const; + private: ASTPtr prepare(bool dry_run); @@ -86,8 +89,8 @@ private: ASTs filters; std::unordered_map column_to_updated; - /// Contains columns that are changed by this stage, - /// columns changed by the previous stages and also columns needed by the next stages. + /// Contains columns that are changed by this stage, columns changed by + /// the previous stages and also columns needed by the next stages. NameSet output_columns; std::unique_ptr analyzer; @@ -97,6 +100,9 @@ private: /// then there is (possibly) an UPDATE step, and finally a projection step. ExpressionActionsChain expressions_chain; Names filter_column_names; + + /// Check that stage affects all storage columns + bool isAffectingAllColumns(const Names & storage_columns) const; }; std::unique_ptr updated_header; diff --git a/src/Processors/Chunk.cpp b/src/Processors/Chunk.cpp index 340cd4a43d7..4800bfca2ce 100644 --- a/src/Processors/Chunk.cpp +++ b/src/Processors/Chunk.cpp @@ -65,10 +65,13 @@ void Chunk::setColumns(MutableColumns columns_, UInt64 num_rows_) void Chunk::checkNumRowsIsConsistent() { - for (auto & column : columns) + for (size_t i = 0; i < columns.size(); ++i) + { + auto & column = columns[i]; if (column->size() != num_rows) - throw Exception("Invalid number of rows in Chunk column " + column->getName()+ ": expected " + + throw Exception("Invalid number of rows in Chunk column " + column->getName()+ " position " + toString(i) + ": expected " + toString(num_rows) + ", got " + toString(column->size()), ErrorCodes::LOGICAL_ERROR); + } } MutableColumns Chunk::mutateColumns() diff --git a/src/Processors/QueryPipeline.cpp b/src/Processors/QueryPipeline.cpp index f3635ac5408..f063b82e1b2 100644 --- a/src/Processors/QueryPipeline.cpp +++ b/src/Processors/QueryPipeline.cpp @@ -654,7 +654,7 @@ void QueryPipeline::unitePipelines( if (extremes.size() == 1) extremes_port = extremes.back(); else - extremes_port = uniteExtremes(extremes, current_header, processors); + extremes_port = uniteExtremes(extremes, common_header, processors); } if (!totals.empty()) @@ -662,7 +662,7 @@ void QueryPipeline::unitePipelines( if (totals.size() == 1) totals_having_port = totals.back(); else - totals_having_port = uniteTotals(totals, current_header, processors); + totals_having_port = uniteTotals(totals, common_header, processors); } current_header = common_header; diff --git a/src/Processors/Transforms/ConvertingTransform.cpp b/src/Processors/Transforms/ConvertingTransform.cpp index a0e9626b302..95830f2a1c3 100644 --- a/src/Processors/Transforms/ConvertingTransform.cpp +++ b/src/Processors/Transforms/ConvertingTransform.cpp @@ -12,7 +12,7 @@ namespace DB namespace ErrorCodes { extern const int THERE_IS_NO_COLUMN; - extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE; + extern const int ILLEGAL_COLUMN; extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH; } @@ -82,12 +82,12 @@ ConvertingTransform::ConvertingTransform( if (res_const->getField() != src_const->getField()) throw Exception("Cannot convert column " + backQuoteIfNeed(res_elem.name) + " because " "it is constant but values of constants are different in source and result", - ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + ErrorCodes::ILLEGAL_COLUMN); } else throw Exception("Cannot convert column " + backQuoteIfNeed(res_elem.name) + " because " "it is non constant in source stream but must be constant in result", - ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + ErrorCodes::ILLEGAL_COLUMN); } /// Check conversion by dry run CAST function. diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index e6a1a8e1298..1c75a661339 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -355,7 +355,7 @@ public: /** ALTER tables with regard to its partitions. * Should handle locks for each command on its own. */ - virtual void alterPartition(const ASTPtr & /* query */, const StorageMetadataPtr & /* metadata_snapshot */, const PartitionCommands & /* commands */, const Context & /* context */) + virtual Pipes alterPartition(const ASTPtr & /* query */, const StorageMetadataPtr & /* metadata_snapshot */, const PartitionCommands & /* commands */, const Context & /* context */) { throw Exception("Partition operations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED); } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 1d04118643f..d5b59f3d7b6 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -552,6 +552,30 @@ void IMergeTreeDataPart::loadRowsCount() auto buf = openForReading(volume->getDisk(), path); readIntText(rows_count, *buf); assertEOF(*buf); + +#ifndef NDEBUG + /// columns have to be loaded + for (const auto & column : getColumns()) + { + /// Most trivial types + if (column.type->isValueRepresentedByNumber() && !column.type->haveSubtypes()) + { + auto size = getColumnSize(column.name, *column.type); + + if (size.data_uncompressed == 0) + continue; + + size_t rows_in_column = size.data_uncompressed / column.type->getSizeOfValueInMemory(); + if (rows_in_column != rows_count) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Column {} has rows count {} according to size in memory " + "and size of single value, but data part {} has {} rows", backQuote(column.name), rows_in_column, name, rows_count); + } + } + } +#endif } else { diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 57d9c86f102..6847a09b85c 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1089,7 +1089,7 @@ BoolMask KeyCondition::checkInRange( /* std::cerr << "Hyperrectangle: "; for (size_t i = 0, size = key_ranges.size(); i != size; ++i) std::cerr << (i != 0 ? " x " : "") << key_ranges[i].toString(); - std::cerr << ": " << res << "\n";*/ + std::cerr << ": " << res.can_be_true << "\n";*/ return res; }); @@ -1112,10 +1112,20 @@ std::optional KeyCondition::applyMonotonicFunctionsChainToRange( return {}; } + /// If we apply function to open interval, we can get empty intervals in result. + /// E.g. for ('2020-01-03', '2020-01-20') after applying 'toYYYYMM' we will get ('202001', '202001'). + /// To avoid this we make range left and right included. if (!key_range.left.isNull()) + { key_range.left = applyFunction(func, current_type, key_range.left); + key_range.left_included = true; + } + if (!key_range.right.isNull()) + { key_range.right = applyFunction(func, current_type, key_range.right); + key_range.right_included = true; + } current_type = func->getReturnType(); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index c71172850ba..740d44605ee 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1662,9 +1662,9 @@ void MergeTreeData::changeSettings( } } -void MergeTreeData::freezeAll(const String & with_name, const StorageMetadataPtr & metadata_snapshot, const Context & context, TableLockHolder &) +PartitionCommandsResultInfo MergeTreeData::freezeAll(const String & with_name, const StorageMetadataPtr & metadata_snapshot, const Context & context, TableLockHolder &) { - freezePartitionsByMatcher([] (const DataPartPtr &){ return true; }, metadata_snapshot, with_name, context); + return freezePartitionsByMatcher([] (const DataPartPtr &) { return true; }, metadata_snapshot, with_name, context); } void MergeTreeData::PartsTemporaryRename::addPart(const String & old_name, const String & new_name) @@ -2408,11 +2408,6 @@ static void loadPartAndFixMetadataImpl(MergeTreeData::MutableDataPartPtr part) auto disk = part->volume->getDisk(); String full_part_path = part->getFullRelativePath(); - /// Earlier the list of columns was written incorrectly. Delete it and re-create. - /// But in compact parts we can't get list of columns without this file. - if (isWidePart(part)) - disk->removeIfExists(full_part_path + "columns.txt"); - part->loadColumnsChecksumsIndexes(false, true); part->modification_time = disk->getLastModified(full_part_path).epochTime(); } @@ -2468,7 +2463,7 @@ void MergeTreeData::removePartContributionToColumnSizes(const DataPartPtr & part } -void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context, TableLockHolder &) +PartitionCommandsResultInfo MergeTreeData::freezePartition(const ASTPtr & partition_ast, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context, TableLockHolder &) { std::optional prefix; String partition_id; @@ -2492,7 +2487,7 @@ void MergeTreeData::freezePartition(const ASTPtr & partition_ast, const StorageM LOG_DEBUG(log, "Freezing parts with partition ID {}", partition_id); - freezePartitionsByMatcher( + return freezePartitionsByMatcher( [&prefix, &partition_id](const DataPartPtr & part) { if (prefix) @@ -3319,7 +3314,7 @@ MergeTreeData::PathsWithDisks MergeTreeData::getRelativeDataPathsWithDisks() con return res; } -void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context) +PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context) { String clickhouse_path = Poco::Path(context.getPath()).makeAbsolute().toString(); String default_shadow_path = clickhouse_path + "shadow/"; @@ -3331,6 +3326,10 @@ void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const StorageMe /// Acquire a snapshot of active data parts to prevent removing while doing backup. const auto data_parts = getDataParts(); + String backup_name = (!with_name.empty() ? escapeForFileName(with_name) : toString(increment)); + + PartitionCommandsResultInfo result; + size_t parts_processed = 0; for (const auto & part : data_parts) { @@ -3339,11 +3338,7 @@ void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const StorageMe part->volume->getDisk()->createDirectories(shadow_path); - String backup_path = shadow_path - + (!with_name.empty() - ? escapeForFileName(with_name) - : toString(increment)) - + "/"; + String backup_path = shadow_path + backup_name + "/"; LOG_DEBUG(log, "Freezing part {} snapshot will be placed at {}", part->name, backup_path); @@ -3356,10 +3351,17 @@ void MergeTreeData::freezePartitionsByMatcher(MatcherFn matcher, const StorageMe part->volume->getDisk()->removeIfExists(backup_part_path + "/" + DELETE_ON_DESTROY_MARKER_PATH); part->is_frozen.store(true, std::memory_order_relaxed); + result.push_back(PartitionCommandResultInfo{ + .partition_id = part->info.partition_id, + .part_name = part->name, + .backup_path = backup_path, + .backup_name = backup_name, + }); ++parts_processed; } LOG_DEBUG(log, "Freezed {} parts", parts_processed); + return result; } bool MergeTreeData::canReplacePartition(const DataPartPtr & src_part) const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 2c53b3287cd..a50ea2a939f 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -515,7 +515,7 @@ public: TableLockHolder & table_lock_holder); /// Freezes all parts. - void freezeAll( + PartitionCommandsResultInfo freezeAll( const String & with_name, const StorageMetadataPtr & metadata_snapshot, const Context & context, @@ -541,7 +541,7 @@ public: * Backup is created in directory clickhouse_dir/shadow/i/, where i - incremental number, * or if 'with_name' is specified - backup is created in directory with specified name. */ - void freezePartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context, TableLockHolder & table_lock_holder); + PartitionCommandsResultInfo freezePartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context, TableLockHolder & table_lock_holder); public: @@ -836,7 +836,7 @@ protected: /// Common part for |freezePartition()| and |freezeAll()|. using MatcherFn = std::function; - void freezePartitionsByMatcher(MatcherFn matcher, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context); + PartitionCommandsResultInfo freezePartitionsByMatcher(MatcherFn matcher, const StorageMetadataPtr & metadata_snapshot, const String & with_name, const Context & context); bool canReplacePartition(const DataPartPtr & src_part) const; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index b59070ca070..a0e0f35bc15 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -30,13 +30,15 @@ #include #include #include -#include -#include -#include +#include +#include +#include +#include #include + namespace ProfileEvents { extern const Event MergedRows; @@ -173,8 +175,12 @@ UInt64 MergeTreeDataMergerMutator::getMaxSourcePartsSizeForMerge(size_t pool_siz size_t free_entries = pool_size - pool_used; const auto data_settings = data.getSettings(); + /// Always allow maximum size if one or less pool entries is busy. + /// One entry is probably the entry where this function is executed. + /// This will protect from bad settings. + UInt64 max_size = 0; - if (free_entries >= data_settings->number_of_free_entries_in_pool_to_lower_max_size_of_merge) + if (pool_used <= 1 || free_entries >= data_settings->number_of_free_entries_in_pool_to_lower_max_size_of_merge) max_size = data_settings->max_bytes_to_merge_at_max_space_in_pool; else max_size = interpolateExponential( @@ -195,7 +201,8 @@ UInt64 MergeTreeDataMergerMutator::getMaxSourcePartSizeForMutation() UInt64 disk_space = data.getStoragePolicy()->getMaxUnreservedFreeSpace(); /// Allow mutations only if there are enough threads, leave free threads for merges else - if (background_pool_size - busy_threads_in_pool >= data_settings->number_of_free_entries_in_pool_to_execute_mutation) + if (busy_threads_in_pool <= 1 + || background_pool_size - busy_threads_in_pool >= data_settings->number_of_free_entries_in_pool_to_execute_mutation) return static_cast(disk_space / DISK_USAGE_COEFFICIENT_TO_RESERVE); return 0; @@ -219,14 +226,13 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( return false; } - time_t current_time = time(nullptr); + time_t current_time = std::time(nullptr); IMergeSelector::Partitions partitions; const String * prev_partition_id = nullptr; /// Previous part only in boundaries of partition frame const MergeTreeData::DataPartPtr * prev_part = nullptr; - bool has_part_with_expired_ttl = false; for (const MergeTreeData::DataPartPtr & part : data_parts) { /// Check predicate only for first part in each partition. @@ -258,11 +264,6 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( part_info.min_ttl = part->ttl_infos.part_min_ttl; part_info.max_ttl = part->ttl_infos.part_max_ttl; - time_t ttl = data_settings->ttl_only_drop_parts ? part_info.max_ttl : part_info.min_ttl; - - if (ttl && ttl <= current_time) - has_part_with_expired_ttl = true; - partitions.back().emplace_back(part_info); /// Check for consistency of data parts. If assertion is failed, it requires immediate investigation. @@ -275,38 +276,38 @@ bool MergeTreeDataMergerMutator::selectPartsToMerge( prev_part = ∂ } - std::unique_ptr merge_selector; + IMergeSelector::PartsInPartition parts_to_merge; - SimpleMergeSelector::Settings merge_settings; - if (aggressive) - merge_settings.base = 1; - - bool can_merge_with_ttl = - (current_time - last_merge_with_ttl > data_settings->merge_with_ttl_timeout); - - /// NOTE Could allow selection of different merge strategy. - if (can_merge_with_ttl && has_part_with_expired_ttl && !ttl_merges_blocker.isCancelled()) + if (!ttl_merges_blocker.isCancelled()) { - merge_selector = std::make_unique(current_time, data_settings->ttl_only_drop_parts); - last_merge_with_ttl = current_time; + TTLMergeSelector merge_selector( + next_ttl_merge_times_by_partition, + current_time, + data_settings->merge_with_ttl_timeout, + data_settings->ttl_only_drop_parts); + parts_to_merge = merge_selector.select(partitions, max_total_size_to_merge); } - else - merge_selector = std::make_unique(merge_settings); - - IMergeSelector::PartsInPartition parts_to_merge = merge_selector->select( - partitions, - max_total_size_to_merge); if (parts_to_merge.empty()) { - if (out_disable_reason) - *out_disable_reason = "There is no need to merge parts according to merge selector algorithm"; - return false; - } + SimpleMergeSelector::Settings merge_settings; + if (aggressive) + merge_settings.base = 1; - /// Allow to "merge" part with itself if we need remove some values with expired ttl - if (parts_to_merge.size() == 1 && !has_part_with_expired_ttl) - throw Exception("Logical error: merge selector returned only one part to merge", ErrorCodes::LOGICAL_ERROR); + parts_to_merge = SimpleMergeSelector(merge_settings) + .select(partitions, max_total_size_to_merge); + + /// Do not allow to "merge" part with itself for regular merges, unless it is a TTL-merge where it is ok to remove some values with expired ttl + if (parts_to_merge.size() == 1) + throw Exception("Logical error: merge selector returned only one part to merge", ErrorCodes::LOGICAL_ERROR); + + if (parts_to_merge.empty()) + { + if (out_disable_reason) + *out_disable_reason = "There is no need to merge parts according to merge selector algorithm"; + return false; + } + } MergeTreeData::DataPartsVector parts; parts.reserve(parts_to_merge.size()); @@ -1092,7 +1093,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor need_remove_expired_values = true; /// All columns from part are changed and may be some more that were missing before in part - if (!isWidePart(source_part) || source_part->getColumns().isSubsetOf(updated_header.getNamesAndTypesList())) + if (!isWidePart(source_part) || (interpreter && interpreter->isAffectingAllColumns())) { auto part_indices = getIndicesForNewDataPart(metadata_snapshot->getSecondaryIndices(), for_file_renames); mutateAllPartColumns( @@ -1478,13 +1479,14 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( return updated_header.getNamesAndTypesList(); NameSet removed_columns; - NameToNameMap renamed_columns; + NameToNameMap renamed_columns_to_from; + /// All commands are validated in AlterCommand so we don't care about order for (const auto & command : commands_for_removes) { if (command.type == MutationCommand::DROP_COLUMN) removed_columns.insert(command.column_name); if (command.type == MutationCommand::RENAME_COLUMN) - renamed_columns.emplace(command.rename_to, command.column_name); + renamed_columns_to_from.emplace(command.rename_to, command.column_name); } Names source_column_names = source_part->getColumns().getNames(); NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end()); @@ -1497,17 +1499,49 @@ NamesAndTypesList MergeTreeDataMergerMutator::getColumnsForNewDataPart( it->type = updated_type; ++it; } - else if (source_columns_name_set.count(it->name) && !removed_columns.count(it->name)) - { - ++it; - } - else if (renamed_columns.count(it->name) && source_columns_name_set.count(renamed_columns[it->name])) - { - ++it; - } else { - it = storage_columns.erase(it); + if (!source_columns_name_set.count(it->name)) + { + /// Source part doesn't have column but some other column + /// was renamed to it's name. + auto renamed_it = renamed_columns_to_from.find(it->name); + if (renamed_it != renamed_columns_to_from.end() + && source_columns_name_set.count(renamed_it->second)) + ++it; + else + it = storage_columns.erase(it); + } + else + { + bool was_renamed = false; + bool was_removed = removed_columns.count(it->name); + + /// Check that this column was renamed to some other name + for (const auto & [rename_to, rename_from] : renamed_columns_to_from) + { + if (rename_from == it->name) + { + was_renamed = true; + break; + } + } + + /// If we want to rename this column to some other name, than it + /// should it's previous version should be dropped or removed + if (renamed_columns_to_from.count(it->name) && !was_renamed && !was_removed) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Incorrect mutation commands, trying to rename column {} to {}, but part {} already has column {}", renamed_columns_to_from[it->name], it->name, source_part->name, it->name); + + + /// Column was renamed and no other column renamed to it's name + /// or column is dropped. + if (!renamed_columns_to_from.count(it->name) && (was_renamed || was_removed)) + it = storage_columns.erase(it); + else + ++it; + } } } diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 121cc770d51..3c7fcd99f95 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -242,8 +243,10 @@ private: /// When the last time you wrote to the log that the disk space was running out (not to write about this too often). time_t disk_space_warning_time = 0; - /// Last time when TTLMergeSelector has been used - time_t last_merge_with_ttl = 0; + /// Stores the next TTL merge due time for each partition (used only by TTLMergeSelector) + TTLMergeSelector::PartitionIdToTTLs next_ttl_merge_times_by_partition; + /// Performing TTL merges independently for each partition guarantees that + /// there is only a limited number of TTL merges and no partition stores data, that is too stale }; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index b33a3d4645d..f133d438866 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -13,6 +13,7 @@ namespace ErrorCodes extern const int CANNOT_READ_ALL_DATA; extern const int NO_FILE_IN_DATA_PART; extern const int BAD_SIZE_OF_FILE_IN_DATA_PART; + extern const int LOGICAL_ERROR; } @@ -237,6 +238,21 @@ void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_col ColumnSize size = getColumnSizeImpl(column.name, *column.type, &processed_substreams); each_columns_size[column.name] = size; total_size.add(size); + +#ifndef NDEBUG + /// Most trivial types + if (rows_count != 0 && column.type->isValueRepresentedByNumber() && !column.type->haveSubtypes()) + { + size_t rows_in_column = size.data_uncompressed / column.type->getSizeOfValueInMemory(); + if (rows_in_column != rows_count) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Column {} has rows count {} according to size in memory " + "and size of single value, but data part {} has {} rows", backQuote(column.name), rows_in_column, name, rows_count); + } + } +#endif } } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index b0ac33b4fc1..35f38cd5965 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -567,6 +567,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( RangesInDataParts parts_with_ranges(parts.size()); size_t sum_marks = 0; + std::atomic sum_marks_pk = 0; size_t sum_ranges = 0; /// Let's find what range to read from each part. @@ -590,6 +591,8 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( } } + sum_marks_pk.fetch_add(ranges.getMarksCount(), std::memory_order_relaxed); + for (const auto & index_and_condition : useful_indices) ranges.ranges = filterMarksUsingIndex( index_and_condition.first, index_and_condition.second, part, ranges.ranges, settings, reader_settings, log); @@ -636,7 +639,7 @@ Pipes MergeTreeDataSelectExecutor::readFromParts( parts_with_ranges.resize(next_part); } - LOG_DEBUG(log, "Selected {} parts by date, {} parts by key, {} marks to read from {} ranges", parts.size(), parts_with_ranges.size(), sum_marks, sum_ranges); + LOG_DEBUG(log, "Selected {} parts by date, {} parts by key, {} marks by primary key, {} marks to read from {} ranges", parts.size(), parts_with_ranges.size(), sum_marks_pk.load(std::memory_order_relaxed), sum_marks, sum_ranges); if (parts_with_ranges.empty()) return {}; @@ -1552,6 +1555,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( part->index_granularity_info.index_granularity_bytes); size_t granules_dropped = 0; + size_t total_granules = 0; size_t marks_count = part->getMarksCount(); size_t final_mark = part->index_granularity.hasFinalMark(); @@ -1578,6 +1582,8 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( if (last_index_mark != index_range.begin || !granule) reader.seek(index_range.begin); + total_granules += index_range.end - index_range.begin; + for (size_t index_mark = index_range.begin; index_mark < index_range.end; ++index_mark) { if (index_mark != index_range.begin || !granule || last_index_mark != index_range.begin) @@ -1602,7 +1608,7 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( last_index_mark = index_range.end - 1; } - LOG_DEBUG(log, "Index {} has dropped {} granules.", backQuote(index_helper->index.name), granules_dropped); + LOG_DEBUG(log, "Index {} has dropped {} / {} granules.", backQuote(index_helper->index.name), granules_dropped, total_granules); return res; } diff --git a/src/Storages/MergeTree/MergeTreeMutationStatus.cpp b/src/Storages/MergeTree/MergeTreeMutationStatus.cpp new file mode 100644 index 00000000000..4819cf9b2a9 --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeMutationStatus.cpp @@ -0,0 +1,34 @@ +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int UNFINISHED; +} + +void checkMutationStatus(std::optional & status, const Strings & mutation_ids) +{ + if (!status) + { + assert(mutation_ids.size() == 1); + throw Exception(ErrorCodes::UNFINISHED, "Mutation {} was killed", mutation_ids[0]); + } + else if (!status->is_done && !status->latest_fail_reason.empty()) + { + throw Exception( + ErrorCodes::UNFINISHED, + "Exception happened during execution of mutation{} '{}' with part '{}' reason: '{}'. This error maybe retryable or not. " + "In case of unretryable error, mutation can be killed with KILL MUTATION query", + mutation_ids.size() > 1 ? "s" : "", + boost::algorithm::join(mutation_ids, ", "), + status->latest_failed_part, + status->latest_fail_reason); + } +} + +} diff --git a/src/Storages/MergeTree/MergeTreeMutationStatus.h b/src/Storages/MergeTree/MergeTreeMutationStatus.h index 3a9ecf30eb1..62d7c1bbbcd 100644 --- a/src/Storages/MergeTree/MergeTreeMutationStatus.h +++ b/src/Storages/MergeTree/MergeTreeMutationStatus.h @@ -1,12 +1,16 @@ #pragma once #include +#include +#include #include +#include namespace DB { + struct MergeTreeMutationStatus { String id; @@ -25,4 +29,10 @@ struct MergeTreeMutationStatus String latest_fail_reason; }; +/// Check mutation status and throw exception in case of error during mutation +/// (latest_fail_reason not empty) or if mutation was killed (status empty +/// optional). mutation_ids passed separately, because status may be empty and +/// we can execute multiple mutations at once +void checkMutationStatus(std::optional & status, const Strings & mutation_ids); + } diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index e3371d25822..833425ff592 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -57,8 +57,8 @@ struct MergeTreeSettings : public SettingsCollection /** Replication settings. */ \ M(SettingUInt64, replicated_deduplication_window, 100, "How many last blocks of hashes should be kept in ZooKeeper (old blocks will be deleted).", 0) \ M(SettingUInt64, replicated_deduplication_window_seconds, 7 * 24 * 60 * 60 /* one week */, "Similar to \"replicated_deduplication_window\", but determines old blocks by their lifetime. Hash of an inserted block will be deleted (and the block will not be deduplicated after) if it outside of one \"window\". You can set very big replicated_deduplication_window to avoid duplicating INSERTs during that period of time.", 0) \ - M(SettingUInt64, max_replicated_logs_to_keep, 10000, "How many records may be in log, if there is inactive replica.", 0) \ - M(SettingUInt64, min_replicated_logs_to_keep, 100, "Keep about this number of last records in ZooKeeper log, even if they are obsolete. It doesn't affect work of tables: used only to diagnose ZooKeeper log before cleaning.", 0) \ + M(SettingUInt64, max_replicated_logs_to_keep, 100, "How many records may be in log, if there is inactive replica.", 0) \ + M(SettingUInt64, min_replicated_logs_to_keep, 10, "Keep about this number of last records in ZooKeeper log, even if they are obsolete. It doesn't affect work of tables: used only to diagnose ZooKeeper log before cleaning.", 0) \ M(SettingSeconds, prefer_fetch_merged_part_time_threshold, 3600, "If time passed after replication log entry creation exceeds this threshold and sum size of parts is greater than \"prefer_fetch_merged_part_size_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ M(SettingUInt64, prefer_fetch_merged_part_size_threshold, 10ULL * 1024 * 1024 * 1024, "If sum size of parts exceeds this threshold and time passed after replication log entry creation is greater than \"prefer_fetch_merged_part_time_threshold\", prefer fetching merged part from replica instead of doing merge locally. To speed up very long merges.", 0) \ M(SettingBool, always_fetch_merged_part, 0, "If true, replica never merge parts and always download merged parts from other replicas.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 3deb61bf8db..4ff63f9c2de 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1538,6 +1538,43 @@ void ReplicatedMergeTreeQueue::getInsertTimes(time_t & out_min_unprocessed_inser } +std::optional ReplicatedMergeTreeQueue::getIncompleteMutationsStatus(const String & znode_name, Strings * mutation_ids) const +{ + + std::lock_guard lock(state_mutex); + auto current_mutation_it = mutations_by_znode.find(znode_name); + /// killed + if (current_mutation_it == mutations_by_znode.end()) + return {}; + + const MutationStatus & status = current_mutation_it->second; + MergeTreeMutationStatus result + { + .is_done = status.is_done, + .latest_failed_part = status.latest_failed_part, + .latest_fail_time = status.latest_fail_time, + .latest_fail_reason = status.latest_fail_reason, + }; + + if (mutation_ids && !status.latest_fail_reason.empty()) + { + const auto & latest_failed_part_info = status.latest_failed_part_info; + auto in_partition = mutations_by_partition.find(latest_failed_part_info.partition_id); + if (in_partition != mutations_by_partition.end()) + { + const auto & version_to_status = in_partition->second; + auto begin_it = version_to_status.upper_bound(latest_failed_part_info.getDataVersion()); + for (auto it = begin_it; it != version_to_status.end(); ++it) + { + /// All mutations with the same failure + if (!it->second->is_done && it->second->latest_fail_reason == status.latest_fail_reason) + mutation_ids->push_back(it->second->entry->znode_name); + } + } + } + return result; +} + std::vector ReplicatedMergeTreeQueue::getMutationsStatus() const { std::lock_guard lock(state_mutex); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index cd155214cac..c330631d9dd 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -399,6 +399,13 @@ public: /// Get information about the insertion times. void getInsertTimes(time_t & out_min_unprocessed_insert_time, time_t & out_max_processed_insert_time) const; + + /// Return empty optional if mutation was killed. Otherwise return partially + /// filled mutation status with information about error (latest_fail*) and + /// is_done. mutation_ids filled with all mutations with same errors, because + /// they may be executed simultaneously as one mutation. + std::optional getIncompleteMutationsStatus(const String & znode_name, Strings * mutation_ids = nullptr) const; + std::vector getMutationsStatus() const; void removeCurrentPartsFromMutations(); diff --git a/src/Storages/MergeTree/TTLMergeSelector.cpp b/src/Storages/MergeTree/TTLMergeSelector.cpp index 0ba341fca64..1966f2a4f0a 100644 --- a/src/Storages/MergeTree/TTLMergeSelector.cpp +++ b/src/Storages/MergeTree/TTLMergeSelector.cpp @@ -1,12 +1,20 @@ #include +#include -#include #include +#include namespace DB { +const String & getPartitionIdForPart(const TTLMergeSelector::Part & part_info) +{ + const MergeTreeData::DataPartPtr & part = *static_cast(part_info.data); + return part->info.partition_id; +} + + IMergeSelector::PartsInPartition TTLMergeSelector::select( const Partitions & partitions, const size_t max_total_size_to_merge) @@ -18,15 +26,24 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select( for (size_t i = 0; i < partitions.size(); ++i) { - for (auto it = partitions[i].begin(); it != partitions[i].end(); ++it) + const auto & mergeable_parts_in_partition = partitions[i]; + if (mergeable_parts_in_partition.empty()) + continue; + + const auto & partition_id = getPartitionIdForPart(mergeable_parts_in_partition.front()); + const auto & next_merge_time_for_partition = merge_due_times[partition_id]; + if (next_merge_time_for_partition > current_time) + continue; + + for (Iterator part_it = mergeable_parts_in_partition.cbegin(); part_it != mergeable_parts_in_partition.cend(); ++part_it) { - time_t ttl = only_drop_parts ? it->max_ttl : it->min_ttl; + time_t ttl = only_drop_parts ? part_it->max_ttl : part_it->min_ttl; if (ttl && (partition_to_merge_index == -1 || ttl < partition_to_merge_min_ttl)) { partition_to_merge_min_ttl = ttl; partition_to_merge_index = i; - best_begin = it; + best_begin = part_it; } } } @@ -68,6 +85,9 @@ IMergeSelector::PartsInPartition TTLMergeSelector::select( ++best_end; } + const auto & best_partition_id = getPartitionIdForPart(best_partition.front()); + merge_due_times[best_partition_id] = current_time + merge_cooldown_time; + return PartsInPartition(best_begin, best_end); } diff --git a/src/Storages/MergeTree/TTLMergeSelector.h b/src/Storages/MergeTree/TTLMergeSelector.h index 2f03d5b9feb..5b7361d2d2b 100644 --- a/src/Storages/MergeTree/TTLMergeSelector.h +++ b/src/Storages/MergeTree/TTLMergeSelector.h @@ -1,7 +1,10 @@ #pragma once +#include #include +#include + namespace DB { @@ -10,17 +13,27 @@ namespace DB * It selects parts to merge by greedy algorithm: * 1. Finds part with the most earliest expired ttl and includes it to result. * 2. Tries to find the longest range of parts with expired ttl, that includes part from step 1. + * Finally, merge selector updates TTL merge timer for the selected partition */ class TTLMergeSelector : public IMergeSelector { public: - explicit TTLMergeSelector(time_t current_time_, bool only_drop_parts_) : current_time(current_time_), only_drop_parts(only_drop_parts_) {} + using PartitionIdToTTLs = std::map; + + explicit TTLMergeSelector(PartitionIdToTTLs & merge_due_times_, time_t current_time_, Int64 merge_cooldown_time_, bool only_drop_parts_) + : merge_due_times(merge_due_times_), + current_time(current_time_), + merge_cooldown_time(merge_cooldown_time_), + only_drop_parts(only_drop_parts_) {} PartsInPartition select( const Partitions & partitions, const size_t max_total_size_to_merge) override; + private: + PartitionIdToTTLs & merge_due_times; time_t current_time; + Int64 merge_cooldown_time; bool only_drop_parts; }; diff --git a/src/Storages/PartitionCommands.cpp b/src/Storages/PartitionCommands.cpp index c3bf00187af..e3f542695cb 100644 --- a/src/Storages/PartitionCommands.cpp +++ b/src/Storages/PartitionCommands.cpp @@ -3,6 +3,11 @@ #include #include #include +#include +#include +#include +#include +#include namespace DB @@ -97,4 +102,89 @@ std::optional PartitionCommand::parse(const ASTAlterCommand * return {}; } +std::string PartitionCommand::typeToString() const +{ + switch (type) + { + case PartitionCommand::Type::ATTACH_PARTITION: + if (part) + return "ATTACH PART"; + else + return "ATTACH PARTITION"; + case PartitionCommand::Type::MOVE_PARTITION: + return "MOVE PARTITION"; + case PartitionCommand::Type::DROP_PARTITION: + if (detach) + return "DETACH PARTITION"; + else + return "DROP PARTITION"; + case PartitionCommand::Type::DROP_DETACHED_PARTITION: + if (part) + return "DROP DETACHED PART"; + else + return "DROP DETACHED PARTITION"; + case PartitionCommand::Type::FETCH_PARTITION: + return "FETCH PARTITION"; + case PartitionCommand::Type::FREEZE_ALL_PARTITIONS: + return "FREEZE ALL"; + case PartitionCommand::Type::FREEZE_PARTITION: + return "FREEZE PARTITION"; + case PartitionCommand::Type::REPLACE_PARTITION: + return "REPLACE PARTITION"; + } + __builtin_unreachable(); +} + +Pipes convertCommandsResultToSource(const PartitionCommandsResultInfo & commands_result) +{ + Block header { + ColumnWithTypeAndName(std::make_shared(), "command_type"), + ColumnWithTypeAndName(std::make_shared(), "partition_id"), + ColumnWithTypeAndName(std::make_shared(), "part_name"), + }; + + for (const auto & command_result : commands_result) + { + if (!command_result.old_part_name.empty() && !header.has("old_part_name")) + header.insert(ColumnWithTypeAndName(std::make_shared(), "old_part_name")); + + if (!command_result.backup_name.empty() && !header.has("backup_name")) + header.insert(ColumnWithTypeAndName(std::make_shared(), "backup_name")); + + if (!command_result.backup_path.empty() && !header.has("backup_path")) + header.insert(ColumnWithTypeAndName(std::make_shared(), "backup_path")); + } + + MutableColumns res_columns = header.cloneEmptyColumns(); + + for (const auto & command_result : commands_result) + { + res_columns[0]->insert(command_result.command_type); + res_columns[1]->insert(command_result.partition_id); + res_columns[2]->insert(command_result.part_name); + if (header.has("old_part_name")) + { + size_t pos = header.getPositionByName("old_part_name"); + res_columns[pos]->insert(command_result.old_part_name); + } + if (header.has("backup_name")) + { + size_t pos = header.getPositionByName("backup_name"); + res_columns[pos]->insert(command_result.backup_name); + } + if (header.has("backup_path")) + { + size_t pos = header.getPositionByName("backup_path"); + res_columns[pos]->insert(command_result.backup_path); + } + } + + Chunk chunk(std::move(res_columns), commands_result.size()); + + Pipe pipe(std::make_shared(std::move(header), std::move(chunk))); + Pipes result; + result.emplace_back(std::move(pipe)); + return result; +} + } diff --git a/src/Storages/PartitionCommands.h b/src/Storages/PartitionCommands.h index 74c25e26a0c..9f5540fdbdf 100644 --- a/src/Storages/PartitionCommands.h +++ b/src/Storages/PartitionCommands.h @@ -14,6 +14,9 @@ namespace DB class ASTAlterCommand; +class Pipe; +using Pipes = std::vector; + struct PartitionCommand { enum Type @@ -66,9 +69,39 @@ struct PartitionCommand String move_destination_name; static std::optional parse(const ASTAlterCommand * command); + /// Convert type of the command to string (use not only type, but also + /// different flags) + std::string typeToString() const; }; using PartitionCommands = std::vector; +/// Result of exectuin of a single partition commands. Partition commands quite +/// different, so some fields will be empty for some commands. Currently used in +/// ATTACH and FREEZE commands. +struct PartitionCommandResultInfo +{ + /// Command type, always filled + String command_type; + /// Partition id, always filled + String partition_id; + /// Part name, always filled + String part_name; + /// Part name in /detached directory, filled in ATTACH + String old_part_name; + /// Path to backup directory, filled in FREEZE + String backup_path; + /// Name of the backup (specified by user or increment value), filled in + /// FREEZE + String backup_name; +}; + +using PartitionCommandsResultInfo = std::vector; + +/// Convert partition comands result to Source from single Chunk, which will be +/// used to print info to the user. Tries to create narrowest table for given +/// results. For example, if all commands were FREEZE commands, than +/// old_part_name column will be absent. +Pipes convertCommandsResultToSource(const PartitionCommandsResultInfo & commands_result); } diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 9cbe06a3701..5fd1e7792e4 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -250,11 +250,11 @@ void StorageMaterializedView::checkAlterIsPossible(const AlterCommands & command } } -void StorageMaterializedView::alterPartition( +Pipes StorageMaterializedView::alterPartition( const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & context) { checkStatementCanBeForwarded(); - getTargetTable()->alterPartition(query, metadata_snapshot, commands, context); + return getTargetTable()->alterPartition(query, metadata_snapshot, commands, context); } void StorageMaterializedView::checkAlterPartitionIsPossible( diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 2692880eb2c..315d4cf01a2 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -51,7 +51,7 @@ public: void checkAlterIsPossible(const AlterCommands & commands, const Settings & settings) const override; - void alterPartition(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & context) override; + Pipes alterPartition(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & context) override; void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const override; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 06aef21542f..c9858969579 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -35,7 +35,6 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int ILLEGAL_PREWHERE; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; - extern const int BLOCKS_HAVE_DIFFERENT_STRUCTURE; extern const int SAMPLING_NOT_SUPPORTED; } @@ -480,7 +479,7 @@ void StorageMerge::convertingSourceStream( { if (required_column == header_column.name) throw Exception("Block structure mismatch in Merge Storage: different types:\n" + before_block_header.dumpStructure() - + "\n" + header.dumpStructure(), ErrorCodes::BLOCKS_HAVE_DIFFERENT_STRUCTURE); + + "\n" + header.dumpStructure(), ErrorCodes::LOGICAL_ERROR); } } diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index eecde28b7bb..9ef89d62919 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -134,7 +134,7 @@ void StorageMergeTree::shutdown() /// Unlock all waiting mutations { - std::lock_guard lock(mutation_wait_mutex); + std::lock_guard lock(mutation_wait_mutex); mutation_wait_event.notify_all(); } @@ -284,9 +284,6 @@ struct CurrentlyMergingPartsTagger FutureMergedMutatedPart future_part; ReservationPtr reserved_space; - bool is_successful = false; - String exception_message; - StorageMergeTree & storage; public: @@ -339,40 +336,6 @@ public: storage.currently_merging_mutating_parts.erase(part); } - /// Update the information about failed parts in the system.mutations table. - - Int64 sources_data_version = future_part.parts.at(0)->info.getDataVersion(); - Int64 result_data_version = future_part.part_info.getDataVersion(); - auto mutations_begin_it = storage.current_mutations_by_version.end(); - auto mutations_end_it = storage.current_mutations_by_version.end(); - if (sources_data_version != result_data_version) - { - mutations_begin_it = storage.current_mutations_by_version.upper_bound(sources_data_version); - mutations_end_it = storage.current_mutations_by_version.upper_bound(result_data_version); - } - - for (auto it = mutations_begin_it; it != mutations_end_it; ++it) - { - MergeTreeMutationEntry & entry = it->second; - if (is_successful) - { - if (!entry.latest_failed_part.empty() && future_part.part_info.contains(entry.latest_failed_part_info)) - { - entry.latest_failed_part.clear(); - entry.latest_failed_part_info = MergeTreePartInfo(); - entry.latest_fail_time = 0; - entry.latest_fail_reason.clear(); - } - } - else - { - entry.latest_failed_part = future_part.parts.at(0)->name; - entry.latest_failed_part_info = future_part.parts.at(0)->info; - entry.latest_fail_time = time(nullptr); - entry.latest_fail_reason = exception_message; - } - } - storage.currently_processing_in_background_condition.notify_all(); } }; @@ -398,12 +361,66 @@ Int64 StorageMergeTree::startMutation(const MutationCommands & commands, String return version; } + +void StorageMergeTree::updateMutationEntriesErrors(FutureMergedMutatedPart result_part, bool is_successful, const String & exception_message) +{ + /// Update the information about failed parts in the system.mutations table. + + Int64 sources_data_version = result_part.parts.at(0)->info.getDataVersion(); + Int64 result_data_version = result_part.part_info.getDataVersion(); + if (sources_data_version != result_data_version) + { + std::lock_guard lock(currently_processing_in_background_mutex); + auto mutations_begin_it = current_mutations_by_version.upper_bound(sources_data_version); + auto mutations_end_it = current_mutations_by_version.upper_bound(result_data_version); + + for (auto it = mutations_begin_it; it != mutations_end_it; ++it) + { + MergeTreeMutationEntry & entry = it->second; + if (is_successful) + { + if (!entry.latest_failed_part.empty() && result_part.part_info.contains(entry.latest_failed_part_info)) + { + entry.latest_failed_part.clear(); + entry.latest_failed_part_info = MergeTreePartInfo(); + entry.latest_fail_time = 0; + entry.latest_fail_reason.clear(); + } + } + else + { + entry.latest_failed_part = result_part.parts.at(0)->name; + entry.latest_failed_part_info = result_part.parts.at(0)->info; + entry.latest_fail_time = time(nullptr); + entry.latest_fail_reason = exception_message; + } + } + } + + std::unique_lock lock(mutation_wait_mutex); + mutation_wait_event.notify_all(); +} + void StorageMergeTree::waitForMutation(Int64 version, const String & file_name) { LOG_INFO(log, "Waiting mutation: {}", file_name); - auto check = [version, this]() { return shutdown_called || isMutationDone(version); }; - std::unique_lock lock(mutation_wait_mutex); - mutation_wait_event.wait(lock, check); + { + auto check = [version, this]() + { + if (shutdown_called) + return true; + auto mutation_status = getIncompleteMutationsStatus(version); + return !mutation_status || mutation_status->is_done || !mutation_status->latest_fail_reason.empty(); + }; + + std::unique_lock lock(mutation_wait_mutex); + mutation_wait_event.wait(lock, check); + } + + Strings mutation_ids; + auto mutation_status = getIncompleteMutationsStatus(version, &mutation_ids); + checkMutationStatus(mutation_status, mutation_ids); + LOG_INFO(log, "Mutation {} done", file_name); } @@ -432,20 +449,50 @@ bool comparator(const PartVersionWithName & f, const PartVersionWithName & s) } - -bool StorageMergeTree::isMutationDone(Int64 mutation_version) const +std::optional StorageMergeTree::getIncompleteMutationsStatus(Int64 mutation_version, Strings * mutation_ids) const { std::lock_guard lock(currently_processing_in_background_mutex); + auto current_mutation_it = current_mutations_by_version.find(mutation_version); /// Killed - if (!current_mutations_by_version.count(mutation_version)) - return true; + if (current_mutation_it == current_mutations_by_version.end()) + return {}; + + MergeTreeMutationStatus result{.is_done = false}; + + const auto & mutation_entry = current_mutation_it->second; auto data_parts = getDataPartsVector(); for (const auto & data_part : data_parts) + { if (data_part->info.getDataVersion() < mutation_version) - return false; - return true; + { + + if (!mutation_entry.latest_fail_reason.empty()) + { + result.latest_failed_part = mutation_entry.latest_failed_part; + result.latest_fail_reason = mutation_entry.latest_fail_reason; + result.latest_fail_time = mutation_entry.latest_fail_time; + + /// Fill all mutations which failed with the same error + /// (we can execute several mutations together) + if (mutation_ids) + { + auto mutations_begin_it = current_mutations_by_version.upper_bound(data_part->info.getDataVersion()); + + for (auto it = mutations_begin_it; it != current_mutations_by_version.end(); ++it) + /// All mutations with the same failure + if (it->second.latest_fail_reason == result.latest_fail_reason) + mutation_ids->push_back(it->second.file_name); + } + } + + return result; + } + } + + result.is_done = true; + return result; } @@ -474,7 +521,6 @@ std::vector StorageMergeTree::getMutationsStatus() cons std::vector result; for (const auto & kv : current_mutations_by_version) { - Int64 mutation_version = kv.first; const MergeTreeMutationEntry & entry = kv.second; const PartVersionWithName needle{mutation_version, ""}; @@ -500,7 +546,7 @@ std::vector StorageMergeTree::getMutationsStatus() cons entry.create_time, block_numbers_map, parts_to_do_names, - parts_to_do_names.empty(), + /* is_done = */parts_to_do_names.empty(), entry.latest_failed_part, entry.latest_fail_time, entry.latest_fail_reason, @@ -686,12 +732,10 @@ bool StorageMergeTree::merge( merging_tagger->reserved_space, deduplicate, force_ttl); merger_mutator.renameMergedTemporaryPart(new_part, future_part.parts, nullptr); - merging_tagger->is_successful = true; write_part_log({}); } catch (...) { - merging_tagger->exception_message = getCurrentExceptionMessage(false); write_part_log(ExecutionStatus::fromCurrentException()); throw; } @@ -750,8 +794,15 @@ bool StorageMergeTree::tryMutatePart() if (mutations_begin_it == mutations_end_it) continue; - if (merger_mutator.getMaxSourcePartSizeForMutation() < part->getBytesOnDisk()) + size_t max_source_part_size = merger_mutator.getMaxSourcePartSizeForMutation(); + if (max_source_part_size < part->getBytesOnDisk()) + { + LOG_DEBUG(log, "Current max source part size for mutation is {} but part size {}. Will not mutate part {}. " + "Max size depends not only on available space, but also on settings " + "'number_of_free_entries_in_pool_to_execute_mutation' and 'background_pool_size'", + max_source_part_size, part->getBytesOnDisk(), part->name); continue; + } size_t current_ast_elements = 0; for (auto it = mutations_begin_it; it != mutations_end_it; ++it) @@ -828,18 +879,12 @@ bool StorageMergeTree::tryMutatePart() renameTempPartAndReplace(new_part); - tagger->is_successful = true; + updateMutationEntriesErrors(future_part, true, ""); write_part_log({}); - - /// Notify all, who wait for this or previous mutations - { - std::lock_guard lock(mutation_wait_mutex); - mutation_wait_event.notify_all(); - } } catch (...) { - tagger->exception_message = getCurrentExceptionMessage(false); + updateMutationEntriesErrors(future_part, false, getCurrentExceptionMessage(false)); write_part_log(ExecutionStatus::fromCurrentException()); throw; } @@ -1007,24 +1052,29 @@ bool StorageMergeTree::optimize( return true; } -void StorageMergeTree::alterPartition( - const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & context) +Pipes StorageMergeTree::alterPartition( + const ASTPtr & query, + const StorageMetadataPtr & metadata_snapshot, + const PartitionCommands & commands, + const Context & query_context) { + PartitionCommandsResultInfo result; for (const PartitionCommand & command : commands) { + PartitionCommandsResultInfo current_command_results; switch (command.type) { case PartitionCommand::DROP_PARTITION: checkPartitionCanBeDropped(command.partition); - dropPartition(command.partition, command.detach, context); + dropPartition(command.partition, command.detach, query_context); break; case PartitionCommand::DROP_DETACHED_PARTITION: - dropDetached(command.partition, command.part, context); + dropDetached(command.partition, command.part, query_context); break; case PartitionCommand::ATTACH_PARTITION: - attachPartition(command.partition, command.part, context); + current_command_results = attachPartition(command.partition, command.part, query_context); break; case PartitionCommand::MOVE_PARTITION: @@ -1032,18 +1082,18 @@ void StorageMergeTree::alterPartition( switch (*command.move_destination_type) { case PartitionCommand::MoveDestinationType::DISK: - movePartitionToDisk(command.partition, command.move_destination_name, command.part, context); + movePartitionToDisk(command.partition, command.move_destination_name, command.part, query_context); break; case PartitionCommand::MoveDestinationType::VOLUME: - movePartitionToVolume(command.partition, command.move_destination_name, command.part, context); + movePartitionToVolume(command.partition, command.move_destination_name, command.part, query_context); break; case PartitionCommand::MoveDestinationType::TABLE: checkPartitionCanBeDropped(command.partition); - String dest_database = context.resolveDatabase(command.to_database); - auto dest_storage = DatabaseCatalog::instance().getTable({dest_database, command.to_table}, context); - movePartitionToTable(dest_storage, command.partition, context); + String dest_database = query_context.resolveDatabase(command.to_database); + auto dest_storage = DatabaseCatalog::instance().getTable({dest_database, command.to_table}, query_context); + movePartitionToTable(dest_storage, command.partition, query_context); break; } @@ -1053,30 +1103,39 @@ void StorageMergeTree::alterPartition( case PartitionCommand::REPLACE_PARTITION: { checkPartitionCanBeDropped(command.partition); - String from_database = context.resolveDatabase(command.from_database); - auto from_storage = DatabaseCatalog::instance().getTable({from_database, command.from_table}, context); - replacePartitionFrom(from_storage, command.partition, command.replace, context); + String from_database = query_context.resolveDatabase(command.from_database); + auto from_storage = DatabaseCatalog::instance().getTable({from_database, command.from_table}, query_context); + replacePartitionFrom(from_storage, command.partition, command.replace, query_context); } break; case PartitionCommand::FREEZE_PARTITION: { - auto lock = lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - freezePartition(command.partition, metadata_snapshot, command.with_name, context, lock); + auto lock = lockForShare(query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); + current_command_results = freezePartition(command.partition, metadata_snapshot, command.with_name, query_context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { - auto lock = lockForShare(context.getCurrentQueryId(), context.getSettingsRef().lock_acquire_timeout); - freezeAll(command.with_name, metadata_snapshot, context, lock); + auto lock = lockForShare(query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); + current_command_results = freezeAll(command.with_name, metadata_snapshot, query_context, lock); } break; default: - IStorage::alterPartition(query, metadata_snapshot, commands, context); // should throw an exception. + IStorage::alterPartition(query, metadata_snapshot, commands, query_context); // should throw an exception. } + + for (auto & command_result : current_command_results) + command_result.command_type = command.typeToString(); + result.insert(result.end(), current_command_results.begin(), current_command_results.end()); } + + if (query_context.getSettingsRef().alter_partition_verbose_result) + return convertCommandsResultToSource(result); + + return { }; } void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, const Context & context) @@ -1114,24 +1173,32 @@ void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, cons } -void StorageMergeTree::attachPartition( +PartitionCommandsResultInfo StorageMergeTree::attachPartition( const ASTPtr & partition, bool attach_part, const Context & context) { - // TODO: should get some locks to prevent race with 'alter … modify column' - + PartitionCommandsResultInfo results; PartsTemporaryRename renamed_parts(*this, "detached/"); MutableDataPartsVector loaded_parts = tryLoadPartsToAttach(partition, attach_part, context, renamed_parts); for (size_t i = 0; i < loaded_parts.size(); ++i) { LOG_INFO(log, "Attaching part {} from {}", loaded_parts[i]->name, renamed_parts.old_and_new_names[i].second); + String old_name = renamed_parts.old_and_new_names[i].first; renameTempPartAndAdd(loaded_parts[i], &increment); renamed_parts.old_and_new_names[i].first.clear(); + + results.push_back(PartitionCommandResultInfo{ + .partition_id = loaded_parts[i]->info.partition_id, + .part_name = loaded_parts[i]->name, + .old_part_name = old_name, + }); + LOG_INFO(log, "Finished attaching part"); } /// New parts with other data may appear in place of deleted parts. context.dropCaches(); + return results; } void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context) diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index c80c9f44377..e12e646f04e 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -61,7 +61,7 @@ public: bool deduplicate, const Context & context) override; - void alterPartition( + Pipes alterPartition( const ASTPtr & query, const StorageMetadataPtr & /* metadata_snapshot */, const PartitionCommands & commands, @@ -149,13 +149,21 @@ private: // Partition helpers void dropPartition(const ASTPtr & partition, bool detach, const Context & context); - void attachPartition(const ASTPtr & partition, bool part, const Context & context); + PartitionCommandsResultInfo attachPartition(const ASTPtr & partition, bool part, const Context & context); + void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & context); void movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & context); bool partIsAssignedToBackgroundOperation(const DataPartPtr & part) const override; + /// Update mutation entries after part mutation execution. May reset old + /// errors if mutation was successful. Otherwise update last_failed* fields + /// in mutation entries. + void updateMutationEntriesErrors(FutureMergedMutatedPart result_part, bool is_successful, const String & exception_message); - /// Just checks versions of each active data part - bool isMutationDone(Int64 mutation_version) const; + /// Return empty optional if mutation was killed. Otherwise return partially + /// filled mutation status with information about error (latest_fail*) and + /// is_done. mutation_ids filled with mutations with the same errors, because we + /// can execute several mutations at once + std::optional getIncompleteMutationsStatus(Int64 mutation_version, Strings * mutation_ids = nullptr) const; void startBackgroundMovesIfNeeded() override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index c6dc3e67b80..e2a76f45b82 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -348,7 +348,6 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( std::set inactive_replicas; for (const String & replica : replicas) { - LOG_DEBUG(log, "Waiting for {} to apply mutation {}", replica, mutation_id); while (!partial_shutdown_called) @@ -358,8 +357,7 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( Coordination::Stat exists_stat; if (!getZooKeeper()->exists(zookeeper_path + "/mutations/" + mutation_id, &exists_stat, wait_event)) { - LOG_WARNING(log, "Mutation {} was killed or manually removed. Nothing to wait.", mutation_id); - return; + throw Exception(ErrorCodes::UNFINISHED, "Mutation {} was killed, manually removed or table was dropped", mutation_id); } auto zookeeper = getZooKeeper(); @@ -387,8 +385,23 @@ void StorageReplicatedMergeTree::waitMutationToFinishOnReplicas( /// Replica can become inactive, so wait with timeout and recheck it if (wait_event->tryWait(1000)) break; + + auto mutation_status = queue.getIncompleteMutationsStatus(mutation_id); + if (!mutation_status || !mutation_status->latest_fail_reason.empty()) + break; } + /// It maybe already removed from zk, but local in-memory mutations + /// state was not update. + if (!getZooKeeper()->exists(zookeeper_path + "/mutations/" + mutation_id)) + { + throw Exception(ErrorCodes::UNFINISHED, "Mutation {} was killed, manually removed or table was dropped", mutation_id); + } + + Strings mutation_ids; + auto mutation_status = queue.getIncompleteMutationsStatus(mutation_id, &mutation_ids); + checkMutationStatus(mutation_status, mutation_ids); + if (partial_shutdown_called) throw Exception("Mutation is not finished because table shutdown was called. It will be done after table restart.", ErrorCodes::UNFINISHED); @@ -3851,14 +3864,16 @@ void StorageReplicatedMergeTree::alter( } } -void StorageReplicatedMergeTree::alterPartition( +Pipes StorageReplicatedMergeTree::alterPartition( const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, const Context & query_context) { + PartitionCommandsResultInfo result; for (const PartitionCommand & command : commands) { + PartitionCommandsResultInfo current_command_results; switch (command.type) { case PartitionCommand::DROP_PARTITION: @@ -3871,7 +3886,7 @@ void StorageReplicatedMergeTree::alterPartition( break; case PartitionCommand::ATTACH_PARTITION: - attachPartition(command.partition, metadata_snapshot, command.part, query_context); + current_command_results = attachPartition(command.partition, metadata_snapshot, command.part, query_context); break; case PartitionCommand::MOVE_PARTITION: { @@ -3911,18 +3926,26 @@ void StorageReplicatedMergeTree::alterPartition( case PartitionCommand::FREEZE_PARTITION: { auto lock = lockForShare(query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); - freezePartition(command.partition, metadata_snapshot, command.with_name, query_context, lock); + current_command_results = freezePartition(command.partition, metadata_snapshot, command.with_name, query_context, lock); } break; case PartitionCommand::FREEZE_ALL_PARTITIONS: { auto lock = lockForShare(query_context.getCurrentQueryId(), query_context.getSettingsRef().lock_acquire_timeout); - freezeAll(command.with_name, metadata_snapshot, query_context, lock); + current_command_results = freezeAll(command.with_name, metadata_snapshot, query_context, lock); } break; } + for (auto & command_result : current_command_results) + command_result.command_type = command.typeToString(); + result.insert(result.end(), current_command_results.begin(), current_command_results.end()); } + + if (query_context.getSettingsRef().alter_partition_verbose_result) + return convertCommandsResultToSource(result); + + return {}; } @@ -4028,12 +4051,15 @@ void StorageReplicatedMergeTree::truncate( } -void StorageReplicatedMergeTree::attachPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, bool attach_part, const Context & query_context) +PartitionCommandsResultInfo StorageReplicatedMergeTree::attachPartition( + const ASTPtr & partition, + const StorageMetadataPtr & metadata_snapshot, + bool attach_part, + const Context & query_context) { - // TODO: should get some locks to prevent race with 'alter … modify column' - assertNotReadonly(); + PartitionCommandsResultInfo results; PartsTemporaryRename renamed_parts(*this, "detached/"); MutableDataPartsVector loaded_parts = tryLoadPartsToAttach(partition, attach_part, query_context, renamed_parts); @@ -4044,7 +4070,13 @@ void StorageReplicatedMergeTree::attachPartition(const ASTPtr & partition, const output.writeExistingPart(loaded_parts[i]); renamed_parts.old_and_new_names[i].first.clear(); LOG_DEBUG(log, "Attached part {} as {}", old_name, loaded_parts[i]->name); + results.push_back(PartitionCommandResultInfo{ + .partition_id = loaded_parts[i]->info.partition_id, + .part_name = loaded_parts[i]->name, + .old_part_name = old_name, + }); } + return results; } diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index f6690051ad6..84fbae73fa5 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -111,7 +111,7 @@ public: void alter(const AlterCommands & params, const Context & query_context, TableLockHolder & table_lock_holder) override; - void alterPartition( + Pipes alterPartition( const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, const PartitionCommands & commands, @@ -534,7 +534,7 @@ private: // Partition helpers void dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & query_context); - void attachPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, bool part, const Context & query_context); + PartitionCommandsResultInfo attachPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, bool part, const Context & query_context); void replacePartitionFrom(const StoragePtr & source_table, const ASTPtr & partition, bool replace, const Context & query_context); void movePartitionToTable(const StoragePtr & dest_table, const ASTPtr & partition, const Context & query_context); void fetchPartition(const ASTPtr & partition, const StorageMetadataPtr & metadata_snapshot, const String & from, const Context & query_context); diff --git a/src/Storages/ya.make b/src/Storages/ya.make index 632434e1dfc..5f5ec87ba91 100644 --- a/src/Storages/ya.make +++ b/src/Storages/ya.make @@ -87,6 +87,7 @@ SRCS( MergeTree/MergeTreeDataPartWriterOnDisk.cpp MergeTree/MergeTreeReaderInMemory.cpp MergeTree/MergeTreeWriteAheadLog.cpp + MergeTree/MergeTreeMutationStatus.cpp System/attachSystemTables.cpp System/StorageSystemAggregateFunctionCombinators.cpp System/StorageSystemAsynchronousMetrics.cpp diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index 5d738126f07..f5c2b4f8d19 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -155,8 +155,6 @@ class _NetworkManager: def __init__( self, - image_name='clickhouse_tests_helper', - image_path=p.join(CLICKHOUSE_ROOT_DIR, 'docker', 'test', 'integration', 'helper_container'), container_expire_timeout=50, container_exit_timeout=60): self.container_expire_timeout = container_expire_timeout @@ -164,14 +162,6 @@ class _NetworkManager: self._docker_client = docker.from_env(version=os.environ.get("DOCKER_API_VERSION")) - try: - self._image = self._docker_client.images.get(image_name) - except docker.errors.ImageNotFound: - # Use docker console client instead of python API to work around https://github.com/docker/docker-py/issues/1397 - subprocess.check_call( - ['docker', 'build', '--force-rm', '--tag', image_name, '--network', 'host', image_path]) - self._image = self._docker_client.images.get(image_name) - self._container = None self._ensure_container() @@ -185,15 +175,11 @@ class _NetworkManager: except docker.errors.NotFound: pass - # Work around https://github.com/docker/docker-py/issues/1477 - host_config = self._docker_client.api.create_host_config(network_mode='host', auto_remove=True) - container_id = self._docker_client.api.create_container( - self._image.id, command=('sleep %s' % self.container_exit_timeout), - detach=True, host_config=host_config)['Id'] - + self._container = self._docker_client.containers.run('yandex/clickhouse-integration-helper', auto_remove=True, + command=('sleep %s' % self.container_exit_timeout), + detach=True, network_mode='host') + container_id = self._container.id self._container_expire_time = time.time() + self.container_expire_timeout - self._docker_client.api.start(container_id) - self._container = self._docker_client.containers.get(container_id) return self._container diff --git a/tests/integration/test_backup_with_other_granularity/__init__.py b/tests/integration/test_backup_with_other_granularity/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_backup_with_other_granularity/test.py b/tests/integration/test_backup_with_other_granularity/test.py new file mode 100644 index 00000000000..d4ca9bd1bac --- /dev/null +++ b/tests/integration/test_backup_with_other_granularity/test.py @@ -0,0 +1,152 @@ +import pytest + + +from helpers.cluster import ClickHouseCluster +cluster = ClickHouseCluster(__file__) + + +node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server:19.4.5.35', stay_alive=True, with_installed_binary=True) +node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server:19.4.5.35', stay_alive=True, with_installed_binary=True) +node3 = cluster.add_instance('node3', with_zookeeper=True, image='yandex/clickhouse-server:19.4.5.35', stay_alive=True, with_installed_binary=True) +node4 = cluster.add_instance('node4') + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_backup_from_old_version(started_cluster): + node1.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + + node1.query("INSERT INTO source_table VALUES(1, '1')") + + assert node1.query("SELECT COUNT() FROM source_table") == "1\n" + + node1.query("ALTER TABLE source_table ADD COLUMN Y String") + + node1.query("ALTER TABLE source_table FREEZE PARTITION tuple();") + + node1.restart_with_latest_version() + + node1.query("CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table1', '1') ORDER BY tuple()") + + node1.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") + + assert node1.query("SELECT COUNT() FROM dest_table") == "1\n" + + node1.exec_in_container(['bash', '-c', 'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached']) + + assert node1.query("SELECT COUNT() FROM dest_table") == "1\n" + + node1.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node1.query("SELECT sum(A) FROM dest_table") == "3\n" + + node1.query("ALTER TABLE dest_table DETACH PARTITION tuple()") + + node1.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node1.query("SELECT sum(A) FROM dest_table") == "3\n" + + assert node1.query("CHECK TABLE dest_table") == "1\n" + + +def test_backup_from_old_version_setting(started_cluster): + node2.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + + node2.query("INSERT INTO source_table VALUES(1, '1')") + + assert node2.query("SELECT COUNT() FROM source_table") == "1\n" + + node2.query("ALTER TABLE source_table ADD COLUMN Y String") + + node2.query("ALTER TABLE source_table FREEZE PARTITION tuple();") + + node2.restart_with_latest_version() + + node2.query("CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table2', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1") + + node2.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") + + assert node2.query("SELECT COUNT() FROM dest_table") == "1\n" + + node2.exec_in_container(['bash', '-c', 'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached']) + + assert node2.query("SELECT COUNT() FROM dest_table") == "1\n" + + node2.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node2.query("SELECT sum(A) FROM dest_table") == "3\n" + + node2.query("ALTER TABLE dest_table DETACH PARTITION tuple()") + + node2.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node2.query("SELECT sum(A) FROM dest_table") == "3\n" + + assert node1.query("CHECK TABLE dest_table") == "1\n" + + +def test_backup_from_old_version_config(started_cluster): + node3.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + + node3.query("INSERT INTO source_table VALUES(1, '1')") + + assert node3.query("SELECT COUNT() FROM source_table") == "1\n" + + node3.query("ALTER TABLE source_table ADD COLUMN Y String") + + node3.query("ALTER TABLE source_table FREEZE PARTITION tuple();") + + def callback(n): + n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", "1") + + node3.restart_with_latest_version(callback_onstop=callback) + + node3.query("CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table3', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1") + + node3.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") + + assert node3.query("SELECT COUNT() FROM dest_table") == "1\n" + + node3.exec_in_container(['bash', '-c', 'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached']) + + assert node3.query("SELECT COUNT() FROM dest_table") == "1\n" + + node3.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node3.query("SELECT sum(A) FROM dest_table") == "3\n" + + node3.query("ALTER TABLE dest_table DETACH PARTITION tuple()") + + node3.query("ALTER TABLE dest_table ATTACH PARTITION tuple()") + + assert node3.query("SELECT sum(A) FROM dest_table") == "3\n" + + assert node1.query("CHECK TABLE dest_table") == "1\n" + + +def test_backup_and_alter(started_cluster): + node4.query("CREATE TABLE backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()") + + node4.query("INSERT INTO backup_table VALUES(2, '2', toDate('2019-10-01'))") + + node4.query("ALTER TABLE backup_table FREEZE PARTITION tuple();") + + node4.query("ALTER TABLE backup_table DROP COLUMN C") + + node4.query("ALTER TABLE backup_table MODIFY COLUMN B UInt64") + + node4.query("ALTER TABLE backup_table DROP PARTITION tuple()") + + node4.exec_in_container(['bash', '-c', 'cp -r /var/lib/clickhouse/shadow/1/data/default/backup_table/all_1_1_0/ /var/lib/clickhouse/data/default/backup_table/detached']) + + node4.query("ALTER TABLE backup_table ATTACH PARTITION tuple()") + + assert node4.query("SELECT sum(A) FROM backup_table") == "2\n" + assert node4.query("SELECT B + 2 FROM backup_table") == "4\n" diff --git a/tests/integration/test_mutations_hardlinks/__init__.py b/tests/integration/test_mutations_hardlinks/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_mutations_hardlinks/test.py b/tests/integration/test_mutations_hardlinks/test.py new file mode 100644 index 00000000000..56852f572ff --- /dev/null +++ b/tests/integration/test_mutations_hardlinks/test.py @@ -0,0 +1,133 @@ +import pytest + +import os +import time +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +from multiprocessing.dummy import Pool + + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance('node1') + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def check_hardlinks(table, part_path, column_file, count): + column_path = os.path.join("/var/lib/clickhouse/data/default", table, part_path, column_file) + script = """ + export INODE=`ls -i {column_path} | awk '{{print $1}}'` + export COUNT=`find /var/lib/clickhouse -inum $INODE | wc -l` + test $COUNT = {count} + """.format(column_path=column_path, count=count) + + node1.exec_in_container(["bash", "-c", script]) + + +def check_exists(table, part_path, column_file): + column_path = os.path.join("/var/lib/clickhouse/data/default", table, part_path, column_file) + + node1.exec_in_container(["bash", "-c", "test -f {}".format(column_path)]) + + +def test_update_mutation(started_cluster): + node1.query("CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + + node1.query("INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)") + + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(range(100)) + + node1.query("ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1", settings={"mutations_sync" : "2"}) + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(i * i for i in range(100)) + + check_hardlinks("table_for_update", "all_1_1_0_2", "key.bin", 2) + check_hardlinks("table_for_update", "all_1_1_0_2", "value2.bin", 2) + check_hardlinks("table_for_update", "all_1_1_0_2", "value1.bin", 1) + + node1.query("ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1", settings={"mutations_sync": "2"}) + + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(i * i for i in range(100)) + + check_hardlinks("table_for_update", "all_1_1_0_3", "key.bin", 1) + check_hardlinks("table_for_update", "all_1_1_0_3", "value1.bin", 1) + check_hardlinks("table_for_update", "all_1_1_0_3", "value2.bin", 1) + + +def test_modify_mutation(started_cluster): + node1.query("CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + + node1.query("INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)") + + assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum(range(100)) + + node1.query("ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64", settings={"mutations_sync" : "2"}) + + assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum(range(100)) + + check_hardlinks("table_for_modify", "all_1_1_0_2", "key.bin", 2) + check_hardlinks("table_for_modify", "all_1_1_0_2", "value1.bin", 2) + check_hardlinks("table_for_modify", "all_1_1_0_2", "value2.bin", 1) + + +def test_drop_mutation(started_cluster): + node1.query("CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + + node1.query("INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)") + + assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum(range(100)) + + node1.query("ALTER TABLE table_for_drop DROP COLUMN value2", settings={"mutations_sync": "2"}) + + check_hardlinks("table_for_drop", "all_1_1_0_2", "key.bin", 2) + check_hardlinks("table_for_drop", "all_1_1_0_2", "value1.bin", 2) + + with pytest.raises(Exception): + check_exists("table_for_drop", "all_1_1_0_2", "value2.bin") + with pytest.raises(Exception): + check_exists("table_for_drop", "all_1_1_0_2", "value2.mrk") + + +def test_delete_and_drop_mutation(started_cluster): + node1.query("CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + + node1.query("INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)") + + assert int(node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip()) == sum(range(100)) + + node1.query("SYSTEM STOP MERGES") + + def mutate(): + node1.query("ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2") + + p = Pool(2) + p.apply_async(mutate) + + for _ in range(1, 100): + result = node1.query("SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0") + try: + if int(result.strip()) == 2: + break + except: + print "Result", result + pass + + time.sleep(0.5) + + node1.query("SYSTEM START MERGES") + + assert_eq_with_retry(node1, "SELECT COUNT() FROM table_for_delete_and_drop", str(sum(1 for i in range(100) if i % 2 != 0))) + + check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "key.bin", 1) + check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "value1.bin", 1) + + with pytest.raises(Exception): + check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.bin") + with pytest.raises(Exception): + check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.mrk") diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index a458db07a23..0f201f569b3 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -5,10 +5,12 @@ import helpers.client as client from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV + cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance('node1', with_zookeeper=True) node2 = cluster.add_instance('node2', with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): try: @@ -22,11 +24,13 @@ def started_cluster(): finally: cluster.shutdown() + def drop_table(nodes, table_name): for node in nodes: node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) time.sleep(1) + def test_ttl_columns(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: @@ -47,6 +51,40 @@ def test_ttl_columns(started_cluster): assert TSV(node2.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV(expected) +def test_merge_with_ttl_timeout(started_cluster): + table = "test_merge_with_ttl_timeout" + drop_table([node1, node2], table) + for node in [node1, node2]: + node.query( + ''' + CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') + ORDER BY id PARTITION BY toDayOfMonth(date); + '''.format(replica=node.name, table=table)) + + node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) + node2.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) + + for i in range(1, 4): + node1.query("INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format(day=i, table=table)) + + assert node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" + assert node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" + + node1.query("SYSTEM START TTL MERGES {table}".format(table=table)) + node2.query("SYSTEM START TTL MERGES {table}".format(table=table)) + + time.sleep(15) # TTL merges shall happen. + + for i in range(1, 4): + node1.query("INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format(day=i, table=table)) + + time.sleep(15) # TTL merges shall not happen. + + assert node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + assert node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + + def test_ttl_many_columns(started_cluster): drop_table([node1, node2], "test_ttl_2") for node in [node1, node2]: diff --git a/tests/queries/0_stateless/00834_kill_mutation.reference b/tests/queries/0_stateless/00834_kill_mutation.reference index 1e4a67b66ea..aa0bbdcdfee 100644 --- a/tests/queries/0_stateless/00834_kill_mutation.reference +++ b/tests/queries/0_stateless/00834_kill_mutation.reference @@ -2,6 +2,7 @@ 1 waiting test kill_mutation mutation_3.txt DELETE WHERE toUInt32(s) = 1 *** Create and kill invalid mutation that blocks another mutation *** +happened during execution of mutations 'mutation_4.txt, mutation_5.txt' 1 waiting test kill_mutation mutation_4.txt DELETE WHERE toUInt32(s) = 1 2001-01-01 2 b diff --git a/tests/queries/0_stateless/00834_kill_mutation.sh b/tests/queries/0_stateless/00834_kill_mutation.sh index a00e52fc23c..8dbc75be90c 100755 --- a/tests/queries/0_stateless/00834_kill_mutation.sh +++ b/tests/queries/0_stateless/00834_kill_mutation.sh @@ -14,7 +14,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation VALUES ('2001-01-01 ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutation ***'" -${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync = 1" & +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync = 1" 2>/dev/null & check_query1="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND is_done = 0" @@ -41,7 +41,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE dat ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that blocks another mutation ***'" ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE toUInt32(s) = 1" -${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1 SETTINGS mutations_sync = 1" & +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation DELETE WHERE x = 1 SETTINGS mutations_sync = 1" 2>&1 | grep -o "happened during execution of mutations 'mutation_4.txt, mutation_5.txt'" | head -n 1 & check_query2="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'" diff --git a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference index d6a82e48836..aaefdaeda56 100644 --- a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.reference @@ -1,6 +1,6 @@ *** Create and kill a single invalid mutation *** +happened during execution of mutation '0000000000' 1 -Mutation 0000000000 was killed waiting test kill_mutation_r1 0000000000 DELETE WHERE toUInt32(s) = 1 0 *** Create and kill invalid mutation that blocks another mutation *** diff --git a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh index 2aea2e7cfb0..4778c7f5889 100755 --- a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh @@ -18,7 +18,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO test.kill_mutation_r1 VALUES ('2001-01 ${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill a single invalid mutation ***'" # wrong mutation -${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync=2" 2>&1 | grep -o "Mutation 0000000000 was killed" & +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1 SETTINGS mutations_sync=2" 2>&1 | grep -o "happened during execution of mutation '0000000000'" | head -n 1 check_query1="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND is_done = 0" @@ -51,6 +51,16 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.replication_queue WHERE table ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE toUInt32(s) = 1" +check_query1="SELECT count() FROM system.mutations WHERE database = 'test' AND table = 'kill_mutation_r1' AND is_done = 0" + +query_result=`$CLICKHOUSE_CLIENT --query="$check_query1" 2>&1` + +while [ "$query_result" == "0" ] +do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query1" 2>&1` + sleep 0.5 +done + # good mutation, but blocked with wrong mutation ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.kill_mutation_r1 DELETE WHERE x = 1 SETTINGS mutations_sync=2" & diff --git a/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference deleted file mode 100644 index bc40521f692..00000000000 --- a/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.reference +++ /dev/null @@ -1,142 +0,0 @@ -dictGet test_01037.dict_array (-100,-42) qqq 101 -dictGet test_01037.dict_array (-1,0) Click South 423 -dictGet test_01037.dict_array (-0.1,0) Click South 423 -dictGet test_01037.dict_array (0,-2) Click West 424 -dictGet test_01037.dict_array (0,-1.1) Click West 424 -dictGet test_01037.dict_array (0,1.1) Click North 422 -dictGet test_01037.dict_array (0,2) Click North 422 -dictGet test_01037.dict_array (0.1,0) Click East 421 -dictGet test_01037.dict_array (0.99,2.99) Click North 422 -dictGet test_01037.dict_array (1,0) Click East 421 -dictGet test_01037.dict_array (2,4) House 523 -dictGet test_01037.dict_array (2,4.1) qqq 101 -dictGet test_01037.dict_array (3,3) House 523 -dictGet test_01037.dict_array (4,4) House 523 -dictGet test_01037.dict_array (5,6) qqq 101 -dictGet test_01037.dict_array (7.01,7.01) qqq 101 -dictGetOrDefault test_01037.dict_array (-100,-42) www 1234 -dictGetOrDefault test_01037.dict_array (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_array (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_array (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_array (0,2) Click North 422 -dictGetOrDefault test_01037.dict_array (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_array (1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (2,4) House 523 -dictGetOrDefault test_01037.dict_array (2,4.1) www 1234 -dictGetOrDefault test_01037.dict_array (3,3) House 523 -dictGetOrDefault test_01037.dict_array (4,4) House 523 -dictGetOrDefault test_01037.dict_array (5,6) www 1234 -dictGetOrDefault test_01037.dict_array (7.01,7.01) www 1234 -dictGetOrDefault test_01037.dict_array (-100,-42) dd 44 -dictGetOrDefault test_01037.dict_array (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_array (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_array (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_array (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_array (0,2) Click North 422 -dictGetOrDefault test_01037.dict_array (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_array (1,0) Click East 421 -dictGetOrDefault test_01037.dict_array (2,4) House 523 -dictGetOrDefault test_01037.dict_array (2,4.1) gac 803 -dictGetOrDefault test_01037.dict_array (3,3) House 523 -dictGetOrDefault test_01037.dict_array (4,4) House 523 -dictGetOrDefault test_01037.dict_array (5,6) cc 33 -dictGetOrDefault test_01037.dict_array (7.01,7.01) ee 55 -dictGet test_01037.dict_tuple (-100,-42) qqq 101 -dictGet test_01037.dict_tuple (-1,0) Click South 423 -dictGet test_01037.dict_tuple (-0.1,0) Click South 423 -dictGet test_01037.dict_tuple (0,-2) Click West 424 -dictGet test_01037.dict_tuple (0,-1.1) Click West 424 -dictGet test_01037.dict_tuple (0,1.1) Click North 422 -dictGet test_01037.dict_tuple (0,2) Click North 422 -dictGet test_01037.dict_tuple (0.1,0) Click East 421 -dictGet test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGet test_01037.dict_tuple (1,0) Click East 421 -dictGet test_01037.dict_tuple (2,4) House 523 -dictGet test_01037.dict_tuple (2,4.1) qqq 101 -dictGet test_01037.dict_tuple (3,3) House 523 -dictGet test_01037.dict_tuple (4,4) House 523 -dictGet test_01037.dict_tuple (5,6) qqq 101 -dictGet test_01037.dict_tuple (7.01,7.01) qqq 101 -dictGetOrDefault test_01037.dict_tuple (-100,-42) www 1234 -dictGetOrDefault test_01037.dict_tuple (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0,2) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_tuple (1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (2,4) House 523 -dictGetOrDefault test_01037.dict_tuple (2,4.1) www 1234 -dictGetOrDefault test_01037.dict_tuple (3,3) House 523 -dictGetOrDefault test_01037.dict_tuple (4,4) House 523 -dictGetOrDefault test_01037.dict_tuple (5,6) www 1234 -dictGetOrDefault test_01037.dict_tuple (7.01,7.01) www 1234 -dictGetOrDefault test_01037.dict_tuple (-100,-42) dd 44 -dictGetOrDefault test_01037.dict_tuple (-1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (-0.1,0) Click South 423 -dictGetOrDefault test_01037.dict_tuple (0,-2) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,-1.1) Click West 424 -dictGetOrDefault test_01037.dict_tuple (0,1.1) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0,2) Click North 422 -dictGetOrDefault test_01037.dict_tuple (0.1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (0.99,2.99) Click North 422 -dictGetOrDefault test_01037.dict_tuple (1,0) Click East 421 -dictGetOrDefault test_01037.dict_tuple (2,4) House 523 -dictGetOrDefault test_01037.dict_tuple (2,4.1) gac 803 -dictGetOrDefault test_01037.dict_tuple (3,3) House 523 -dictGetOrDefault test_01037.dict_tuple (4,4) House 523 -dictGetOrDefault test_01037.dict_tuple (5,6) cc 33 -dictGetOrDefault test_01037.dict_tuple (7.01,7.01) ee 55 -dictHas test_01037.dict_array (-100,-42) 0 -dictHas test_01037.dict_array (-1,0) 1 -dictHas test_01037.dict_array (-0.1,0) 1 -dictHas test_01037.dict_array (0,-2) 1 -dictHas test_01037.dict_array (0,-1.1) 1 -dictHas test_01037.dict_array (0,-1) 1 -dictHas test_01037.dict_array (0,0) 1 -dictHas test_01037.dict_array (0,1) 1 -dictHas test_01037.dict_array (0,1.1) 1 -dictHas test_01037.dict_array (0,2) 1 -dictHas test_01037.dict_array (0.1,0) 1 -dictHas test_01037.dict_array (0.99,2.99) 1 -dictHas test_01037.dict_array (1,0) 1 -dictHas test_01037.dict_array (1,1) 1 -dictHas test_01037.dict_array (1,3) 1 -dictHas test_01037.dict_array (2,4) 1 -dictHas test_01037.dict_array (2,4.1) 0 -dictHas test_01037.dict_array (3,3) 1 -dictHas test_01037.dict_array (4,4) 1 -dictHas test_01037.dict_array (5,1) 1 -dictHas test_01037.dict_array (5,5) 1 -dictHas test_01037.dict_array (5,6) 0 -dictHas test_01037.dict_array (7.01,7.01) 0 -dictHas test_01037.dict_tuple (-100,-42) 0 -dictHas test_01037.dict_tuple (-1,0) 1 -dictHas test_01037.dict_tuple (-0.1,0) 1 -dictHas test_01037.dict_tuple (0,-2) 1 -dictHas test_01037.dict_tuple (0,-1.1) 1 -dictHas test_01037.dict_tuple (0,-1) 1 -dictHas test_01037.dict_tuple (0,0) 1 -dictHas test_01037.dict_tuple (0,1) 1 -dictHas test_01037.dict_tuple (0,1.1) 1 -dictHas test_01037.dict_tuple (0,2) 1 -dictHas test_01037.dict_tuple (0.1,0) 1 -dictHas test_01037.dict_tuple (0.99,2.99) 1 -dictHas test_01037.dict_tuple (1,0) 1 -dictHas test_01037.dict_tuple (1,1) 1 -dictHas test_01037.dict_tuple (1,3) 1 -dictHas test_01037.dict_tuple (2,4) 1 -dictHas test_01037.dict_tuple (2,4.1) 0 -dictHas test_01037.dict_tuple (3,3) 1 -dictHas test_01037.dict_tuple (4,4) 1 -dictHas test_01037.dict_tuple (5,1) 1 -dictHas test_01037.dict_tuple (5,5) 1 -dictHas test_01037.dict_tuple (5,6) 0 -dictHas test_01037.dict_tuple (7.01,7.01) 0 diff --git a/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql b/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql deleted file mode 100644 index 9ffbac78bbb..00000000000 --- a/tests/queries/0_stateless/01037_polygon_dict_simple_polygons.sql +++ /dev/null @@ -1,108 +0,0 @@ -SET send_logs_level = 'fatal'; - -DROP DATABASE IF EXISTS test_01037; - -CREATE DATABASE test_01037 Engine = Ordinary; - -DROP DICTIONARY IF EXISTS test_01037.dict_array; -DROP TABLE IF EXISTS test_01037.polygons_array; - -CREATE TABLE test_01037.polygons_array (key Array(Array(Float64)), name String, value UInt64) ENGINE = Memory; -INSERT INTO test_01037.polygons_array VALUES ([[3, 1], [0, 1], [0, -1], [3, -1]], 'Click East', 421); -INSERT INTO test_01037.polygons_array VALUES ([[-1, 1], [1, 1], [1, 3], [-1, 3]], 'Click North', 422); -INSERT INTO test_01037.polygons_array VALUES ([[-3, 1], [-3, -1], [0, -1], [0, 1]], 'Click South', 423); -INSERT INTO test_01037.polygons_array VALUES ([[-1, -1], [1, -1], [1, -3], [-1, -3]], 'Click West', 424); -INSERT INTO test_01037.polygons_array VALUES ([[1, 1], [1, 3], [3, 5], [5, 5], [5, 1]], 'House', 523); - -CREATE DICTIONARY test_01037.dict_array -( - key Array(Array(Float64)), - name String DEFAULT 'qqq', - value UInt64 DEFAULT 101 -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) -LIFETIME(MIN 1 MAX 10) -LAYOUT(POLYGON()); - -DROP DICTIONARY IF EXISTS test_01037.dict_tuple; -DROP TABLE IF EXISTS test_01037.polygons_tuple; - -CREATE TABLE test_01037.polygons_tuple (key Array(Tuple(Float64, Float64)), name String, value UInt64) ENGINE = Memory; -INSERT INTO test_01037.polygons_tuple VALUES ([(3.0, 1.0), (0.0, 1.0), (0.0, -1.0), (3.0, -1.0)], 'Click East', 421); -INSERT INTO test_01037.polygons_tuple VALUES ([(-1, 1), (1, 1), (1, 3), (-1, 3)], 'Click North', 422); -INSERT INTO test_01037.polygons_tuple VALUES ([(-3, 1), (-3, -1), (0, -1), (0, 1)], 'Click South', 423); -INSERT INTO test_01037.polygons_tuple VALUES ([(-1, -1), (1, -1), (1, -3), (-1, -3)], 'Click West', 424); -INSERT INTO test_01037.polygons_tuple VALUES ([(1, 1), (1, 3), (3, 5), (5, 5), (5, 1)], 'House', 523); - -CREATE DICTIONARY test_01037.dict_tuple -( - key Array(Tuple(Float64, Float64)), - name String DEFAULT 'qqq', - value UInt64 DEFAULT 101 -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037')) -LIFETIME(MIN 1 MAX 10) -LAYOUT(POLYGON()); - -DROP TABLE IF EXISTS test_01037.points; - -CREATE TABLE test_01037.points (x Float64, y Float64, def_i UInt64, def_s String) ENGINE = Memory; -INSERT INTO test_01037.points VALUES (0.1, 0.0, 112, 'aax'); -INSERT INTO test_01037.points VALUES (-0.1, 0.0, 113, 'aay'); -INSERT INTO test_01037.points VALUES (0.0, 1.1, 114, 'aaz'); -INSERT INTO test_01037.points VALUES (0.0, -1.1, 115, 'aat'); -INSERT INTO test_01037.points VALUES (3.0, 3.0, 22, 'bb'); -INSERT INTO test_01037.points VALUES (5.0, 6.0, 33, 'cc'); -INSERT INTO test_01037.points VALUES (-100.0, -42.0, 44, 'dd'); -INSERT INTO test_01037.points VALUES (7.01, 7.01, 55, 'ee') -INSERT INTO test_01037.points VALUES (0.99, 2.99, 66, 'ee'); -INSERT INTO test_01037.points VALUES (1.0, 0.0, 771, 'ffa'); -INSERT INTO test_01037.points VALUES (-1.0, 0.0, 772, 'ffb'); -INSERT INTO test_01037.points VALUES (0.0, 2.0, 773, 'ffc'); -INSERT INTO test_01037.points VALUES (0.0, -2.0, 774, 'ffd'); -INSERT INTO test_01037.points VALUES (2.0, 4.0, 801, 'gaa') -INSERT INTO test_01037.points VALUES (4.0, 4.0, 802, 'gab') -INSERT INTO test_01037.points VALUES (2.0, 4.1, 803, 'gac') - -select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; - -select 'dictGet', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; - -INSERT INTO test_01037.points VALUES (5.0, 5.0, 0, ''); -INSERT INTO test_01037.points VALUES (5.0, 1.0, 0, ''); -INSERT INTO test_01037.points VALUES (1.0, 3.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, 0.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, 1.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, -1.0, 0, ''); -INSERT INTO test_01037.points VALUES (1.0, 1.0, 0, ''); - -select 'dictHas', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; - -select 'dictHas', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; - -DROP DICTIONARY test_01037.dict_array; -DROP DICTIONARY test_01037.dict_tuple; -DROP TABLE test_01037.polygons_array; -DROP TABLE test_01037.polygons_tuple; -DROP TABLE test_01037.points; -DROP DATABASE test_01037; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.ans b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.ans new file mode 100644 index 00000000000..6e31edbdd40 --- /dev/null +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.ans @@ -0,0 +1,1000 @@ +dictGet test_01037.dict_array (29.5699,2.50068) 101 +dictGet test_01037.dict_array (29.5796,1.55456) 101 +dictGet test_01037.dict_array (29.5796,2.36864) 101 +dictGet test_01037.dict_array (29.5844,1.59626) 101 +dictGet test_01037.dict_array (29.5886,4.03321) 101 +dictGet test_01037.dict_array (29.5914,3.02628) 101 +dictGet test_01037.dict_array (29.5926,-0.0965169) 101 +dictGet test_01037.dict_array (29.5968,2.37773) 101 +dictGet test_01037.dict_array (29.5984,0.755853) 101 +dictGet test_01037.dict_array (29.6066,3.47173) 101 +dictGet test_01037.dict_array (29.6085,-1.26007) 101 +dictGet test_01037.dict_array (29.6131,0.246565) 101 +dictGet test_01037.dict_array (29.6157,-0.266687) 101 +dictGet test_01037.dict_array (29.6164,2.94674) 101 +dictGet test_01037.dict_array (29.6195,-0.591941) 101 +dictGet test_01037.dict_array (29.6231,1.54818) 101 +dictGet test_01037.dict_array (29.6379,0.764114) 101 +dictGet test_01037.dict_array (29.6462,-0.772059) 934570 +dictGet test_01037.dict_array (29.6579,-1.07336) 101 +dictGet test_01037.dict_array (29.6618,-0.271842) 101 +dictGet test_01037.dict_array (29.6629,-0.303602) 101 +dictGet test_01037.dict_array (29.6659,-0.782823) 934570 +dictGet test_01037.dict_array (29.6736,-0.113832) 101 +dictGet test_01037.dict_array (29.6759,3.02905) 101 +dictGet test_01037.dict_array (29.6778,3.71898) 101 +dictGet test_01037.dict_array (29.6796,1.10433) 101 +dictGet test_01037.dict_array (29.6809,2.13677) 101 +dictGet test_01037.dict_array (29.6935,4.11894) 101 +dictGet test_01037.dict_array (29.6991,-1.4458199999999999) 101 +dictGet test_01037.dict_array (29.6997,3.17297) 101 +dictGet test_01037.dict_array (29.7043,3.6145899999999997) 101 +dictGet test_01037.dict_array (29.7065,3.24885) 101 +dictGet test_01037.dict_array (29.7126,0.28108) 101 +dictGet test_01037.dict_array (29.7192,0.174273) 101 +dictGet test_01037.dict_array (29.7217,-0.523481) 934570 +dictGet test_01037.dict_array (29.7271,1.67967) 101 +dictGet test_01037.dict_array (29.7311,4.12444) 101 +dictGet test_01037.dict_array (29.7347,1.88378) 101 +dictGet test_01037.dict_array (29.7358,0.67944) 101 +dictGet test_01037.dict_array (29.7366,-0.2973) 101 +dictGet test_01037.dict_array (29.7446,0.646536) 101 +dictGet test_01037.dict_array (29.7453,-0.567963) 101 +dictGet test_01037.dict_array (29.764,4.04217) 101 +dictGet test_01037.dict_array (29.7655,1.51372) 101 +dictGet test_01037.dict_array (29.7744,1.12435) 101 +dictGet test_01037.dict_array (29.7774,-0.0681196) 101 +dictGet test_01037.dict_array (29.7784,1.54864) 101 +dictGet test_01037.dict_array (29.7785,2.24139) 101 +dictGet test_01037.dict_array (29.7922,0.220808) 101 +dictGet test_01037.dict_array (29.7936,2.37709) 101 +dictGet test_01037.dict_array (29.8008,0.948536) 101 +dictGet test_01037.dict_array (29.8115,0.201227) 101 +dictGet test_01037.dict_array (29.814,0.149601) 101 +dictGet test_01037.dict_array (29.8193,-1.35858) 101 +dictGet test_01037.dict_array (29.8201,0.965518) 101 +dictGet test_01037.dict_array (29.8265,-0.727286) 101 +dictGet test_01037.dict_array (29.8277,-0.531746) 101 +dictGet test_01037.dict_array (29.8289,3.63009) 101 +dictGet test_01037.dict_array (29.8548,0.838047) 101 +dictGet test_01037.dict_array (29.8641,-0.845265) 101 +dictGet test_01037.dict_array (29.8649,0.0562212) 101 +dictGet test_01037.dict_array (29.8701,-1.02045) 101 +dictGet test_01037.dict_array (29.8733,2.76654) 101 +dictGet test_01037.dict_array (29.876,0.555475) 101 +dictGet test_01037.dict_array (29.8794,-0.800108) 101 +dictGet test_01037.dict_array (29.8813,2.7426399999999997) 101 +dictGet test_01037.dict_array (29.897100000000002,2.66193) 101 +dictGet test_01037.dict_array (29.908,4.01339) 101 +dictGet test_01037.dict_array (29.9165,-1.08246) 101 +dictGet test_01037.dict_array (29.9201,-0.420861) 101 +dictGet test_01037.dict_array (29.9217,3.03778) 101 +dictGet test_01037.dict_array (29.9355,0.773833) 101 +dictGet test_01037.dict_array (29.947,3.76517) 101 +dictGet test_01037.dict_array (29.9518,-0.60557) 101 +dictGet test_01037.dict_array (29.9564,-0.600163) 101 +dictGet test_01037.dict_array (29.959600000000002,4.16591) 101 +dictGet test_01037.dict_array (29.9615,-1.33708) 101 +dictGet test_01037.dict_array (29.9699,-0.392375) 101 +dictGet test_01037.dict_array (29.9776,1.04552) 101 +dictGet test_01037.dict_array (29.9784,4.02756) 101 +dictGet test_01037.dict_array (29.9819,4.00597) 101 +dictGet test_01037.dict_array (29.9826,1.2816100000000001) 101 +dictGet test_01037.dict_array (30.0026,2.76257) 101 +dictGet test_01037.dict_array (30.0126,3.68255) 101 +dictGet test_01037.dict_array (30.0131,0.796576) 101 +dictGet test_01037.dict_array (30.018,1.16523) 101 +dictGet test_01037.dict_array (30.0261,-0.210653) 101 +dictGet test_01037.dict_array (30.0472,-1.11007) 101 +dictGet test_01037.dict_array (30.0542,-0.479585) 101 +dictGet test_01037.dict_array (30.0613,1.6278000000000001) 101 +dictGet test_01037.dict_array (30.0617,-0.0551152) 101 +dictGet test_01037.dict_array (30.0637,2.62066) 101 +dictGet test_01037.dict_array (30.0721,1.6424400000000001) 101 +dictGet test_01037.dict_array (30.0769,-0.402636) 101 +dictGet test_01037.dict_array (30.0791,-0.277435) 101 +dictGet test_01037.dict_array (30.0931,0.0327512) 101 +dictGet test_01037.dict_array (30.1059,3.52623) 101 +dictGet test_01037.dict_array (30.1103,0.865466) 101 +dictGet test_01037.dict_array (30.1115,2.95243) 101 +dictGet test_01037.dict_array (30.1144,1.71029) 101 +dictGet test_01037.dict_array (30.1311,-0.864751) 101 +dictGet test_01037.dict_array (30.1336,-0.851386) 101 +dictGet test_01037.dict_array (30.1393,3.89901) 101 +dictGet test_01037.dict_array (30.1456,-0.531898) 101 +dictGet test_01037.dict_array (30.1492,2.07833) 101 +dictGet test_01037.dict_array (30.1575,2.43856) 101 +dictGet test_01037.dict_array (30.1682,1.19771) 101 +dictGet test_01037.dict_array (30.1716,3.9853300000000003) 101 +dictGet test_01037.dict_array (30.1849,2.78374) 101 +dictGet test_01037.dict_array (30.1866,0.65658) 101 +dictGet test_01037.dict_array (30.1885,1.56943) 101 +dictGet test_01037.dict_array (30.1959,-1.38202) 101 +dictGet test_01037.dict_array (30.1999,1.58413) 101 +dictGet test_01037.dict_array (30.2024,0.713081) 101 +dictGet test_01037.dict_array (30.2054,0.620143) 101 +dictGet test_01037.dict_array (30.2091,1.51641) 101 +dictGet test_01037.dict_array (30.2124,-0.331782) 101 +dictGet test_01037.dict_array (30.226,3.03527) 101 +dictGet test_01037.dict_array (30.2261,3.18486) 101 +dictGet test_01037.dict_array (30.2288,2.48407) 101 +dictGet test_01037.dict_array (30.2345,3.7462400000000002) 101 +dictGet test_01037.dict_array (30.2375,0.62046) 101 +dictGet test_01037.dict_array (30.2425,-0.472914) 101 +dictGet test_01037.dict_array (30.247,3.95863) 101 +dictGet test_01037.dict_array (30.2494,-0.305093) 101 +dictGet test_01037.dict_array (30.2499,2.54337) 101 +dictGet test_01037.dict_array (30.2606,2.16644) 101 +dictGet test_01037.dict_array (30.2672,3.94847) 101 +dictGet test_01037.dict_array (30.2709,-0.136264) 101 +dictGet test_01037.dict_array (30.2764,1.18654) 101 +dictGet test_01037.dict_array (30.2765,1.20383) 101 +dictGet test_01037.dict_array (30.2839,1.05762) 101 +dictGet test_01037.dict_array (30.286,0.469327) 101 +dictGet test_01037.dict_array (30.2927,3.1693) 101 +dictGet test_01037.dict_array (30.2935,3.49854) 101 +dictGet test_01037.dict_array (30.307,0.312338) 101 +dictGet test_01037.dict_array (30.3085,1.07791) 101 +dictGet test_01037.dict_array (30.3139,2.77248) 101 +dictGet test_01037.dict_array (30.314,0.822823) 101 +dictGet test_01037.dict_array (30.3227,-0.587351) 101 +dictGet test_01037.dict_array (30.332,1.00174) 101 +dictGet test_01037.dict_array (30.3388,0.844148) 101 +dictGet test_01037.dict_array (30.3485,0.561902) 101 +dictGet test_01037.dict_array (30.3497,0.180362) 101 +dictGet test_01037.dict_array (30.361,4.13016) 101 +dictGet test_01037.dict_array (30.3623,-0.0484027) 101 +dictGet test_01037.dict_array (30.3638,3.9845800000000002) 101 +dictGet test_01037.dict_array (30.3853,3.16051) 101 +dictGet test_01037.dict_array (30.3974,2.6617800000000003) 101 +dictGet test_01037.dict_array (30.4002,-1.15886) 101 +dictGet test_01037.dict_array (30.4008,-0.387015) 101 +dictGet test_01037.dict_array (30.4018,1.86493) 101 +dictGet test_01037.dict_array (30.4239,1.16818) 101 +dictGet test_01037.dict_array (30.4363,3.63938) 101 +dictGet test_01037.dict_array (30.4377,-0.81315) 101 +dictGet test_01037.dict_array (30.4391,3.54703) 101 +dictGet test_01037.dict_array (30.4424,-1.39435) 101 +dictGet test_01037.dict_array (30.4441,2.8463000000000003) 101 +dictGet test_01037.dict_array (30.4517,3.28117) 101 +dictGet test_01037.dict_array (30.4658,2.6928) 101 +dictGet test_01037.dict_array (30.4734,2.66161) 101 +dictGet test_01037.dict_array (30.4799,-1.07578) 101 +dictGet test_01037.dict_array (30.4837,-1.02486) 101 +dictGet test_01037.dict_array (30.485,1.06326) 101 +dictGet test_01037.dict_array (30.495,1.12306) 101 +dictGet test_01037.dict_array (30.501,2.27264) 101 +dictGet test_01037.dict_array (30.5027,1.99382) 101 +dictGet test_01037.dict_array (30.5194,-1.03943) 101 +dictGet test_01037.dict_array (30.5239,1.04328) 101 +dictGet test_01037.dict_array (30.528,3.82041) 101 +dictGet test_01037.dict_array (30.5299,-0.715248) 101 +dictGet test_01037.dict_array (30.5331,1.19603) 101 +dictGet test_01037.dict_array (30.535800000000002,2.71485) 101 +dictGet test_01037.dict_array (30.5405,0.804694) 101 +dictGet test_01037.dict_array (30.542,1.23739) 101 +dictGet test_01037.dict_array (30.5432,4.04189) 101 +dictGet test_01037.dict_array (30.5457,-0.956121) 101 +dictGet test_01037.dict_array (30.5506,3.07443) 101 +dictGet test_01037.dict_array (30.5539,3.87084) 101 +dictGet test_01037.dict_array (30.5578,3.78837) 101 +dictGet test_01037.dict_array (30.5588,0.966135) 101 +dictGet test_01037.dict_array (30.5637,2.5605) 101 +dictGet test_01037.dict_array (30.5647,-1.27328) 101 +dictGet test_01037.dict_array (30.5656,-0.0581332) 101 +dictGet test_01037.dict_array (30.5715,0.65755) 101 +dictGet test_01037.dict_array (30.5727,3.01604) 101 +dictGet test_01037.dict_array (30.5729,-0.976857) 101 +dictGet test_01037.dict_array (30.5751,0.60204) 101 +dictGet test_01037.dict_array (30.5854,3.02473) 101 +dictGet test_01037.dict_array (30.5866,0.174099) 101 +dictGet test_01037.dict_array (30.5947,0.875193) 101 +dictGet test_01037.dict_array (30.5992,-0.403901) 101 +dictGet test_01037.dict_array (30.6002,4.18891) 101 +dictGet test_01037.dict_array (30.6025,0.217712) 101 +dictGet test_01037.dict_array (30.6054,0.927203) 101 +dictGet test_01037.dict_array (30.6075,3.79359) 101 +dictGet test_01037.dict_array (30.6159,3.82773) 101 +dictGet test_01037.dict_array (30.627,3.84039) 101 +dictGet test_01037.dict_array (30.6308,0.77517) 101 +dictGet test_01037.dict_array (30.6338,0.179565) 101 +dictGet test_01037.dict_array (30.6461,1.3293599999999999) 101 +dictGet test_01037.dict_array (30.6674,-0.424547) 101 +dictGet test_01037.dict_array (30.669,1.76539) 101 +dictGet test_01037.dict_array (30.6788,4.01239) 101 +dictGet test_01037.dict_array (30.6864,3.59158) 101 +dictGet test_01037.dict_array (30.7049,-0.875413) 101 +dictGet test_01037.dict_array (30.705,1.3307) 101 +dictGet test_01037.dict_array (30.7063,-0.473192) 101 +dictGet test_01037.dict_array (30.7075,-1.1958199999999999) 101 +dictGet test_01037.dict_array (30.7101,-0.367562) 101 +dictGet test_01037.dict_array (30.7203,2.98725) 101 +dictGet test_01037.dict_array (30.7213,2.2745699999999998) 101 +dictGet test_01037.dict_array (30.7446,-0.334144) 101 +dictGet test_01037.dict_array (30.7468,3.82967) 101 +dictGet test_01037.dict_array (30.747,-0.384779) 101 +dictGet test_01037.dict_array (30.7681,0.904198) 101 +dictGet test_01037.dict_array (30.7757,1.78743) 101 +dictGet test_01037.dict_array (30.8021,-0.479212) 101 +dictGet test_01037.dict_array (30.8079,-1.40869) 101 +dictGet test_01037.dict_array (30.8206,-0.0608489) 101 +dictGet test_01037.dict_array (30.8218,0.43909) 101 +dictGet test_01037.dict_array (30.8239,0.10014) 101 +dictGet test_01037.dict_array (30.8282,4.15409) 101 +dictGet test_01037.dict_array (30.8288,-0.709528) 101 +dictGet test_01037.dict_array (30.8326,0.156011) 101 +dictGet test_01037.dict_array (30.8328,-1.03704) 101 +dictGet test_01037.dict_array (30.839,2.15528) 101 +dictGet test_01037.dict_array (30.8452,0.219377) 101 +dictGet test_01037.dict_array (30.8463,0.0515355) 101 +dictGet test_01037.dict_array (30.8526,2.06614) 101 +dictGet test_01037.dict_array (30.8566,0.517876) 101 +dictGet test_01037.dict_array (30.8588,-1.31738) 101 +dictGet test_01037.dict_array (30.8681,0.44207) 101 +dictGet test_01037.dict_array (30.8914,1.0072) 101 +dictGet test_01037.dict_array (30.897,0.483425) 101 +dictGet test_01037.dict_array (30.905,2.8731999999999998) 101 +dictGet test_01037.dict_array (30.9051,2.21956) 101 +dictGet test_01037.dict_array (30.9115,4.00663) 101 +dictGet test_01037.dict_array (30.9167,-0.834462) 101 +dictGet test_01037.dict_array (30.9252,-1.3289900000000001) 101 +dictGet test_01037.dict_array (30.9314,1.85384) 101 +dictGet test_01037.dict_array (30.9392,2.53236) 101 +dictGet test_01037.dict_array (30.9569,2.82038) 101 +dictGet test_01037.dict_array (30.9598,-0.641011) 101 +dictGet test_01037.dict_array (30.9601,-0.254928) 101 +dictGet test_01037.dict_array (30.9623,-1.3886) 101 +dictGet test_01037.dict_array (30.9707,0.888854) 101 +dictGet test_01037.dict_array (30.9766,2.81957) 101 +dictGet test_01037.dict_array (30.9775,2.69273) 101 +dictGet test_01037.dict_array (30.9821,0.587715) 101 +dictGet test_01037.dict_array (30.9887,4.0233) 101 +dictGet test_01037.dict_array (30.9914,0.259542) 101 +dictGet test_01037.dict_array (30.9986,-1.36832) 101 +dictGet test_01037.dict_array (31.008,0.628999) 101 +dictGet test_01037.dict_array (31.0168,-1.17462) 101 +dictGet test_01037.dict_array (31.0237,3.52547) 101 +dictGet test_01037.dict_array (31.0306,3.78522) 101 +dictGet test_01037.dict_array (31.0308,-0.72453) 101 +dictGet test_01037.dict_array (31.0463,2.41997) 101 +dictGet test_01037.dict_array (31.047,0.624184) 101 +dictGet test_01037.dict_array (31.0569,0.0706393) 5994232 +dictGet test_01037.dict_array (31.0583,1.3244099999999999) 101 +dictGet test_01037.dict_array (31.063,3.23861) 101 +dictGet test_01037.dict_array (31.068,0.695575) 101 +dictGet test_01037.dict_array (31.0687,1.85675) 101 +dictGet test_01037.dict_array (31.0692,0.254793) 101 +dictGet test_01037.dict_array (31.0766,0.828128) 101 +dictGet test_01037.dict_array (31.0833,0.0612782) 5994232 +dictGet test_01037.dict_array (31.0833,2.59748) 101 +dictGet test_01037.dict_array (31.0861,-1.3778299999999999) 101 +dictGet test_01037.dict_array (31.0874,3.07258) 101 +dictGet test_01037.dict_array (31.0882,1.4882) 101 +dictGet test_01037.dict_array (31.0924,3.42242) 101 +dictGet test_01037.dict_array (31.0927,2.67448) 101 +dictGet test_01037.dict_array (31.0936,1.12292) 101 +dictGet test_01037.dict_array (31.0952,-0.336928) 101 +dictGet test_01037.dict_array (31.0978,3.48482) 101 +dictGet test_01037.dict_array (31.1107,3.7513199999999998) 101 +dictGet test_01037.dict_array (31.1156,1.19171) 101 +dictGet test_01037.dict_array (31.1176,0.223509) 5994232 +dictGet test_01037.dict_array (31.1249,0.946838) 101 +dictGet test_01037.dict_array (31.1267,1.48983) 101 +dictGet test_01037.dict_array (31.138,-0.289981) 101 +dictGet test_01037.dict_array (31.1382,3.02904) 101 +dictGet test_01037.dict_array (31.1475,2.6178) 101 +dictGet test_01037.dict_array (31.1491,1.37873) 101 +dictGet test_01037.dict_array (31.1525,3.72105) 101 +dictGet test_01037.dict_array (31.1526,-1.4129800000000001) 101 +dictGet test_01037.dict_array (31.1526,-0.186457) 101 +dictGet test_01037.dict_array (31.1539,2.78789) 101 +dictGet test_01037.dict_array (31.1548,-1.08552) 101 +dictGet test_01037.dict_array (31.1567,-0.0768925) 101 +dictGet test_01037.dict_array (31.1613,1.49617) 101 +dictGet test_01037.dict_array (31.1653,1.03777) 101 +dictGet test_01037.dict_array (31.1662,3.4214700000000002) 101 +dictGet test_01037.dict_array (31.1672,-0.0813169) 101 +dictGet test_01037.dict_array (31.177,0.440843) 101 +dictGet test_01037.dict_array (31.1788,-0.737151) 101 +dictGet test_01037.dict_array (31.1856,-0.144396) 101 +dictGet test_01037.dict_array (31.1959,3.66813) 101 +dictGet test_01037.dict_array (31.1996,-0.353983) 101 +dictGet test_01037.dict_array (31.2019,2.86802) 101 +dictGet test_01037.dict_array (31.2087,2.31245) 101 +dictGet test_01037.dict_array (31.2125,3.2713200000000002) 101 +dictGet test_01037.dict_array (31.2137,-0.108129) 101 +dictGet test_01037.dict_array (31.216,3.9156) 101 +dictGet test_01037.dict_array (31.2201,-0.202141) 101 +dictGet test_01037.dict_array (31.2285,2.09058) 101 +dictGet test_01037.dict_array (31.2502,4.01526) 101 +dictGet test_01037.dict_array (31.2585,3.11524) 101 +dictGet test_01037.dict_array (31.2645,-0.620418) 101 +dictGet test_01037.dict_array (31.2684,2.74277) 101 +dictGet test_01037.dict_array (31.2821,-1.12772) 101 +dictGet test_01037.dict_array (31.2821,2.46769) 101 +dictGet test_01037.dict_array (31.2887,3.91396) 101 +dictGet test_01037.dict_array (31.295,1.49942) 101 +dictGet test_01037.dict_array (31.2997,3.46122) 101 +dictGet test_01037.dict_array (31.3017,3.3263) 101 +dictGet test_01037.dict_array (31.3022,3.16754) 101 +dictGet test_01037.dict_array (31.3048,0.364962) 101 +dictGet test_01037.dict_array (31.305,3.1967) 101 +dictGet test_01037.dict_array (31.3061,1.84303) 101 +dictGet test_01037.dict_array (31.3082,-0.173851) 101 +dictGet test_01037.dict_array (31.3315,3.90932) 101 +dictGet test_01037.dict_array (31.3351,2.80164) 101 +dictGet test_01037.dict_array (31.3388,0.168765) 5994233 +dictGet test_01037.dict_array (31.339,0.25535) 101 +dictGet test_01037.dict_array (31.3423,1.7036799999999999) 101 +dictGet test_01037.dict_array (31.349,0.386456) 101 +dictGet test_01037.dict_array (31.3558,-1.04336) 101 +dictGet test_01037.dict_array (31.3564,0.478876) 101 +dictGet test_01037.dict_array (31.3607,-0.0860507) 5994233 +dictGet test_01037.dict_array (31.3831,3.84469) 101 +dictGet test_01037.dict_array (31.3886,-0.731137) 101 +dictGet test_01037.dict_array (31.4043,-0.348907) 101 +dictGet test_01037.dict_array (31.4081,1.47391) 101 +dictGet test_01037.dict_array (31.4176,-0.583645) 101 +dictGet test_01037.dict_array (31.4177,1.36972) 101 +dictGet test_01037.dict_array (31.4182,0.958303) 101 +dictGet test_01037.dict_array (31.4199,3.1738) 101 +dictGet test_01037.dict_array (31.4221,2.74876) 101 +dictGet test_01037.dict_array (31.4301,-0.122643) 5994233 +dictGet test_01037.dict_array (31.4344,1.00661) 101 +dictGet test_01037.dict_array (31.4375,4.20304) 101 +dictGet test_01037.dict_array (31.4377,0.289608) 101 +dictGet test_01037.dict_array (31.4379,0.54744) 101 +dictGet test_01037.dict_array (31.4459,3.94945) 101 +dictGet test_01037.dict_array (31.4559,-0.345063) 101 +dictGet test_01037.dict_array (31.464,0.726129) 101 +dictGet test_01037.dict_array (31.4662,-0.299019) 5994233 +dictGet test_01037.dict_array (31.4671,1.9605299999999999) 101 +dictGet test_01037.dict_array (31.4673,-0.403676) 101 +dictGet test_01037.dict_array (31.4712,-0.237941) 5994233 +dictGet test_01037.dict_array (31.4816,0.120264) 5994233 +dictGet test_01037.dict_array (31.4875,0.323483) 101 +dictGet test_01037.dict_array (31.490099999999998,-0.338163) 101 +dictGet test_01037.dict_array (31.4932,0.517674) 101 +dictGet test_01037.dict_array (31.5112,1.9689299999999998) 101 +dictGet test_01037.dict_array (31.5122,2.92785) 101 +dictGet test_01037.dict_array (31.5151,0.166429) 101 +dictGet test_01037.dict_array (31.5174,2.94802) 101 +dictGet test_01037.dict_array (31.5182,4.18776) 101 +dictGet test_01037.dict_array (31.5238,1.18793) 101 +dictGet test_01037.dict_array (31.5271,3.07446) 101 +dictGet test_01037.dict_array (31.5393,1.58061) 101 +dictGet test_01037.dict_array (31.5421,3.13711) 101 +dictGet test_01037.dict_array (31.5479,2.39897) 101 +dictGet test_01037.dict_array (31.5519,0.99285) 101 +dictGet test_01037.dict_array (31.5685,3.47987) 101 +dictGet test_01037.dict_array (31.5959,0.437382) 101 +dictGet test_01037.dict_array (31.6003,0.194376) 101 +dictGet test_01037.dict_array (31.6026,2.15457) 101 +dictGet test_01037.dict_array (31.606,2.45365) 101 +dictGet test_01037.dict_array (31.6062,-0.453441) 101 +dictGet test_01037.dict_array (31.6107,1.35247) 101 +dictGet test_01037.dict_array (31.6155,3.85588) 101 +dictGet test_01037.dict_array (31.6222,2.03326) 101 +dictGet test_01037.dict_array (31.6231,-0.123059) 101 +dictGet test_01037.dict_array (31.6244,1.6885599999999998) 101 +dictGet test_01037.dict_array (31.6459,0.669716) 101 +dictGet test_01037.dict_array (31.6563,-0.0644741) 101 +dictGet test_01037.dict_array (31.6618,-0.551121) 101 +dictGet test_01037.dict_array (31.6725,-0.38922) 101 +dictGet test_01037.dict_array (31.6727,4.10336) 101 +dictGet test_01037.dict_array (31.6739,4.1391) 101 +dictGet test_01037.dict_array (31.6897,2.8694699999999997) 101 +dictGet test_01037.dict_array (31.6902,3.98792) 101 +dictGet test_01037.dict_array (31.6945,2.46687) 101 +dictGet test_01037.dict_array (31.6987,-1.3796) 101 +dictGet test_01037.dict_array (31.7012,2.34845) 101 +dictGet test_01037.dict_array (31.7036,0.0228348) 101 +dictGet test_01037.dict_array (31.7046,3.68111) 101 +dictGet test_01037.dict_array (31.7055,2.92556) 101 +dictGet test_01037.dict_array (31.7102,1.04532) 101 +dictGet test_01037.dict_array (31.7149,-0.443302) 101 +dictGet test_01037.dict_array (31.7195,2.99311) 101 +dictGet test_01037.dict_array (31.7274,0.166719) 101 +dictGet test_01037.dict_array (31.7565,-0.565382) 101 +dictGet test_01037.dict_array (31.7615,0.771626) 101 +dictGet test_01037.dict_array (31.7739,1.8970099999999999) 101 +dictGet test_01037.dict_array (31.7848,1.2623199999999999) 101 +dictGet test_01037.dict_array (31.7912,-0.788599) 101 +dictGet test_01037.dict_array (31.8011,2.65853) 101 +dictGet test_01037.dict_array (31.8032,-0.0590108) 101 +dictGet test_01037.dict_array (31.8038,1.9618799999999998) 101 +dictGet test_01037.dict_array (31.8098,-1.46851) 101 +dictGet test_01037.dict_array (31.8131,3.41982) 101 +dictGet test_01037.dict_array (31.8169,3.31059) 101 +dictGet test_01037.dict_array (31.8202,-0.193692) 101 +dictGet test_01037.dict_array (31.8306,1.57586) 101 +dictGet test_01037.dict_array (31.8382,-0.787948) 101 +dictGet test_01037.dict_array (31.8433,2.49692) 101 +dictGet test_01037.dict_array (31.8436,2.41851) 101 +dictGet test_01037.dict_array (31.8563,-1.10787) 101 +dictGet test_01037.dict_array (31.8683,0.996504) 101 +dictGet test_01037.dict_array (31.8693,-0.828142) 101 +dictGet test_01037.dict_array (31.8723,1.08929) 101 +dictGet test_01037.dict_array (31.8737,0.881127) 101 +dictGet test_01037.dict_array (31.8881,-0.58441) 101 +dictGet test_01037.dict_array (31.9011,0.121349) 101 +dictGet test_01037.dict_array (31.9066,2.13045) 101 +dictGet test_01037.dict_array (31.9142,1.03368) 101 +dictGet test_01037.dict_array (31.9155,3.38363) 101 +dictGet test_01037.dict_array (31.9168,1.3166) 101 +dictGet test_01037.dict_array (31.9185,-1.11879) 101 +dictGet test_01037.dict_array (31.9186,-0.647948) 101 +dictGet test_01037.dict_array (31.9311,3.96928) 101 +dictGet test_01037.dict_array (31.9335,1.47048) 101 +dictGet test_01037.dict_array (31.9443,-1.36175) 101 +dictGet test_01037.dict_array (31.9481,2.34231) 101 +dictGet test_01037.dict_array (31.9526,1.36565) 101 +dictGet test_01037.dict_array (31.9629,2.5208399999999997) 101 +dictGet test_01037.dict_array (31.9765,0.975783) 101 +dictGet test_01037.dict_array (31.9923,3.31773) 101 +dictGet test_01037.dict_array (31.9994,0.972816) 101 +dictGet test_01037.dict_array (32.001,3.47425) 101 +dictGet test_01037.dict_array (32.0127,2.13874) 101 +dictGet test_01037.dict_array (32.0244,3.2092) 101 +dictGet test_01037.dict_array (32.029,1.18039) 101 +dictGet test_01037.dict_array (32.0315,0.566073) 101 +dictGet test_01037.dict_array (32.0354,1.0766499999999999) 101 +dictGet test_01037.dict_array (32.0399,-1.11576) 101 +dictGet test_01037.dict_array (32.053,2.16849) 101 +dictGet test_01037.dict_array (32.0542,0.042328) 101 +dictGet test_01037.dict_array (32.0576,2.47001) 101 +dictGet test_01037.dict_array (32.061,3.7498899999999997) 101 +dictGet test_01037.dict_array (32.0623,1.25134) 101 +dictGet test_01037.dict_array (32.0626,1.9611399999999999) 101 +dictGet test_01037.dict_array (32.0666,-0.0904247) 101 +dictGet test_01037.dict_array (32.0681,2.28442) 101 +dictGet test_01037.dict_array (32.0692,1.50869) 101 +dictGet test_01037.dict_array (32.0724,4.03314) 101 +dictGet test_01037.dict_array (32.0729,-0.064324) 101 +dictGet test_01037.dict_array (32.079,0.293758) 101 +dictGet test_01037.dict_array (32.0847,-1.19814) 101 +dictGet test_01037.dict_array (32.0974,-0.91927) 101 +dictGet test_01037.dict_array (32.0979,-0.736979) 101 +dictGet test_01037.dict_array (32.106,-1.33063) 101 +dictGet test_01037.dict_array (32.1189,0.246715) 101 +dictGet test_01037.dict_array (32.1207,4.00883) 101 +dictGet test_01037.dict_array (32.1396,1.12402) 101 +dictGet test_01037.dict_array (32.1413,1.5668) 101 +dictGet test_01037.dict_array (32.143,1.35559) 101 +dictGet test_01037.dict_array (32.1538,1.32881) 101 +dictGet test_01037.dict_array (32.1549,4.06552) 101 +dictGet test_01037.dict_array (32.1555,-0.79275) 101 +dictGet test_01037.dict_array (32.163,1.17733) 101 +dictGet test_01037.dict_array (32.1634,2.94273) 101 +dictGet test_01037.dict_array (32.1644,1.85666) 101 +dictGet test_01037.dict_array (32.1745,0.435458) 101 +dictGet test_01037.dict_array (32.1765,1.65149) 101 +dictGet test_01037.dict_array (32.1893,2.08924) 101 +dictGet test_01037.dict_array (32.2024,0.222191) 101 +dictGet test_01037.dict_array (32.2107,1.34379) 101 +dictGet test_01037.dict_array (32.2109,3.9018699999999997) 101 +dictGet test_01037.dict_array (32.2123,1.85233) 101 +dictGet test_01037.dict_array (32.2144,3.72534) 101 +dictGet test_01037.dict_array (32.2218,2.5386699999999998) 101 +dictGet test_01037.dict_array (32.2279,2.84267) 101 +dictGet test_01037.dict_array (32.2345,3.33295) 101 +dictGet test_01037.dict_array (32.2435,3.85283) 101 +dictGet test_01037.dict_array (32.2527,-0.480608) 101 +dictGet test_01037.dict_array (32.2566,-0.837882) 101 +dictGet test_01037.dict_array (32.2627,2.57708) 101 +dictGet test_01037.dict_array (32.2733,0.244931) 101 +dictGet test_01037.dict_array (32.2761,4.05808) 101 +dictGet test_01037.dict_array (32.2764,3.78472) 101 +dictGet test_01037.dict_array (32.2814,-1.26011) 101 +dictGet test_01037.dict_array (32.2861,3.02427) 101 +dictGet test_01037.dict_array (32.2924,0.928609) 101 +dictGet test_01037.dict_array (32.2963,-0.78543) 101 +dictGet test_01037.dict_array (32.3039,3.21175) 101 +dictGet test_01037.dict_array (32.3107,0.698287) 101 +dictGet test_01037.dict_array (32.3138,0.0595677) 101 +dictGet test_01037.dict_array (32.3339,0.707056) 101 +dictGet test_01037.dict_array (32.3351,0.415474) 101 +dictGet test_01037.dict_array (32.342,-0.681023) 101 +dictGet test_01037.dict_array (32.3463,1.83196) 101 +dictGet test_01037.dict_array (32.3494,2.43799) 101 +dictGet test_01037.dict_array (32.3524,3.47049) 101 +dictGet test_01037.dict_array (32.3531,2.33115) 101 +dictGet test_01037.dict_array (32.3602,0.116106) 101 +dictGet test_01037.dict_array (32.3612,1.1598) 101 +dictGet test_01037.dict_array (32.3689,3.34847) 101 +dictGet test_01037.dict_array (32.3695,0.734055) 101 +dictGet test_01037.dict_array (32.3825,3.85017) 101 +dictGet test_01037.dict_array (32.3835,-1.25491) 101 +dictGet test_01037.dict_array (32.4018,-0.728568) 101 +dictGet test_01037.dict_array (32.4044,2.96727) 101 +dictGet test_01037.dict_array (32.4101,2.9988) 101 +dictGet test_01037.dict_array (32.417,-1.12908) 101 +dictGet test_01037.dict_array (32.4172,4.1952) 101 +dictGet test_01037.dict_array (32.4239,2.49512) 101 +dictGet test_01037.dict_array (32.4258,4.05137) 101 +dictGet test_01037.dict_array (32.4264,-0.427357) 101 +dictGet test_01037.dict_array (32.4274,3.59377) 101 +dictGet test_01037.dict_array (32.4286,-1.24757) 101 +dictGet test_01037.dict_array (32.4294,3.0665) 101 +dictGet test_01037.dict_array (32.4333,-0.353347) 101 +dictGet test_01037.dict_array (32.4391,3.64421) 101 +dictGet test_01037.dict_array (32.4401,3.70635) 101 +dictGet test_01037.dict_array (32.45,1.68918) 101 +dictGet test_01037.dict_array (32.4507,-0.133471) 101 +dictGet test_01037.dict_array (32.4592,0.976458) 101 +dictGet test_01037.dict_array (32.4595,1.89135) 101 +dictGet test_01037.dict_array (32.4604,0.280248) 101 +dictGet test_01037.dict_array (32.4835,0.472731) 101 +dictGet test_01037.dict_array (32.4855,2.01938) 101 +dictGet test_01037.dict_array (32.4872,2.01697) 101 +dictGet test_01037.dict_array (32.4911,0.613106) 101 +dictGet test_01037.dict_array (32.4918,2.17834) 101 +dictGet test_01037.dict_array (32.4947,2.34595) 101 +dictGet test_01037.dict_array (32.5035,2.92234) 101 +dictGet test_01037.dict_array (32.5132,-0.331206) 101 +dictGet test_01037.dict_array (32.5156,-0.412604) 7652581 +dictGet test_01037.dict_array (32.5158,2.9067499999999997) 101 +dictGet test_01037.dict_array (32.5249,2.44519) 101 +dictGet test_01037.dict_array (32.5293,-0.790952) 101 +dictGet test_01037.dict_array (32.5319,3.96854) 101 +dictGet test_01037.dict_array (32.5518,3.6093) 101 +dictGet test_01037.dict_array (32.5541,3.5225400000000002) 101 +dictGet test_01037.dict_array (32.5569,0.816123) 101 +dictGet test_01037.dict_array (32.5646,1.9775) 101 +dictGet test_01037.dict_array (32.5733,3.81271) 101 +dictGet test_01037.dict_array (32.5767,0.948327) 101 +dictGet test_01037.dict_array (32.5971,1.76179) 101 +dictGet test_01037.dict_array (32.6035,-0.716157) 101 +dictGet test_01037.dict_array (32.6087,4.21614) 101 +dictGet test_01037.dict_array (32.6171,0.024481) 101 +dictGet test_01037.dict_array (32.6189,-0.775391) 101 +dictGet test_01037.dict_array (32.6198,2.92081) 101 +dictGet test_01037.dict_array (32.621,-0.970784) 101 +dictGet test_01037.dict_array (32.6266,0.650009) 101 +dictGet test_01037.dict_array (32.6315,2.15144) 101 +dictGet test_01037.dict_array (32.6385,-0.436803) 101 +dictGet test_01037.dict_array (32.6449,-0.191292) 101 +dictGet test_01037.dict_array (32.6535,2.10385) 101 +dictGet test_01037.dict_array (32.6592,3.49973) 101 +dictGet test_01037.dict_array (32.6598,2.5980600000000003) 101 +dictGet test_01037.dict_array (32.6612,2.95681) 101 +dictGet test_01037.dict_array (32.6636,-0.57235) 101 +dictGet test_01037.dict_array (32.669,-0.382702) 101 +dictGet test_01037.dict_array (32.6752,1.30748) 101 +dictGet test_01037.dict_array (32.6811,2.9559800000000003) 101 +dictGet test_01037.dict_array (32.6821,0.57336) 101 +dictGet test_01037.dict_array (32.6828,3.91304) 101 +dictGet test_01037.dict_array (32.6979,3.96868) 101 +dictGet test_01037.dict_array (32.6983,3.15784) 101 +dictGet test_01037.dict_array (32.7122,0.794293) 101 +dictGet test_01037.dict_array (32.7131,-0.847256) 101 +dictGet test_01037.dict_array (32.7219,0.883461) 101 +dictGet test_01037.dict_array (32.7228,1.78808) 101 +dictGet test_01037.dict_array (32.7273,-0.206908) 101 +dictGet test_01037.dict_array (32.7292,0.259331) 101 +dictGet test_01037.dict_array (32.7304,-1.38317) 101 +dictGet test_01037.dict_array (32.7353,1.01601) 101 +dictGet test_01037.dict_array (32.7354,4.17574) 101 +dictGet test_01037.dict_array (32.7357,-0.190194) 101 +dictGet test_01037.dict_array (32.7465,-1.37598) 101 +dictGet test_01037.dict_array (32.7494,-0.275675) 101 +dictGet test_01037.dict_array (32.7514,0.128951) 101 +dictGet test_01037.dict_array (32.753,3.44207) 101 +dictGet test_01037.dict_array (32.7686,2.11713) 101 +dictGet test_01037.dict_array (32.7694,1.47159) 101 +dictGet test_01037.dict_array (32.7768,0.0401042) 101 +dictGet test_01037.dict_array (32.781,-1.34283) 101 +dictGet test_01037.dict_array (32.7814,1.73876) 101 +dictGet test_01037.dict_array (32.7856,-1.06363) 101 +dictGet test_01037.dict_array (32.792699999999996,-1.1255600000000001) 101 +dictGet test_01037.dict_array (32.7941,-0.645447) 101 +dictGet test_01037.dict_array (32.7946,1.48889) 101 +dictGet test_01037.dict_array (32.797,0.791753) 101 +dictGet test_01037.dict_array (32.7982,-0.537798) 101 +dictGet test_01037.dict_array (32.8091,2.3611) 101 +dictGet test_01037.dict_array (32.81,1.7130800000000002) 101 +dictGet test_01037.dict_array (32.8174,-0.288322) 101 +dictGet test_01037.dict_array (32.823,1.6546699999999999) 101 +dictGet test_01037.dict_array (32.8233,1.62108) 101 +dictGet test_01037.dict_array (32.8428,-0.400045) 101 +dictGet test_01037.dict_array (32.8479,2.13598) 101 +dictGet test_01037.dict_array (32.8524,0.199902) 101 +dictGet test_01037.dict_array (32.8543,3.23553) 101 +dictGet test_01037.dict_array (32.8562,1.31371) 101 +dictGet test_01037.dict_array (32.87,1.44256) 101 +dictGet test_01037.dict_array (32.8789,2.38192) 101 +dictGet test_01037.dict_array (32.8812,2.20734) 5999168 +dictGet test_01037.dict_array (32.8815,-0.54427) 101 +dictGet test_01037.dict_array (32.8853,2.4859) 5999168 +dictGet test_01037.dict_array (32.8909,0.513964) 101 +dictGet test_01037.dict_array (32.9035,2.38999) 101 +dictGet test_01037.dict_array (32.9097,2.48131) 5999168 +dictGet test_01037.dict_array (32.928,-0.943269) 101 +dictGet test_01037.dict_array (32.9322,1.13165) 101 +dictGet test_01037.dict_array (32.9348,1.22606) 101 +dictGet test_01037.dict_array (32.9417,3.77998) 101 +dictGet test_01037.dict_array (32.9428,3.11936) 101 +dictGet test_01037.dict_array (32.9482,1.18092) 101 +dictGet test_01037.dict_array (32.9506,0.0609364) 101 +dictGet test_01037.dict_array (32.953,-0.828308) 101 +dictGet test_01037.dict_array (32.9593,3.5209099999999998) 101 +dictGet test_01037.dict_array (32.9617,2.07711) 5999168 +dictGet test_01037.dict_array (32.966,0.693749) 101 +dictGet test_01037.dict_array (32.9668,-0.716432) 101 +dictGet test_01037.dict_array (32.9702,1.98555) 101 +dictGet test_01037.dict_array (32.9782,1.73819) 101 +dictGet test_01037.dict_array (32.9805,3.71151) 101 +dictGet test_01037.dict_array (32.9821,2.97225) 101 +dictGet test_01037.dict_array (32.995,-0.830301) 101 +dictGet test_01037.dict_array (33.0234,0.770848) 101 +dictGet test_01037.dict_array (33.0312,-0.340964) 101 +dictGet test_01037.dict_array (33.0366,-0.756795) 101 +dictGet test_01037.dict_array (33.0438,0.812871) 101 +dictGet test_01037.dict_array (33.0455,1.84843) 101 +dictGet test_01037.dict_array (33.0498,0.0913292) 101 +dictGet test_01037.dict_array (33.0506,1.53739) 101 +dictGet test_01037.dict_array (33.0554,2.4265) 101 +dictGet test_01037.dict_array (33.0741,3.61332) 101 +dictGet test_01037.dict_array (33.0765,-0.179985) 101 +dictGet test_01037.dict_array (33.087,1.46465) 101 +dictGet test_01037.dict_array (33.0906,-0.620383) 101 +dictGet test_01037.dict_array (33.1047,-1.28027) 101 +dictGet test_01037.dict_array (33.1072,1.96303) 101 +dictGet test_01037.dict_array (33.1081,-0.897874) 101 +dictGet test_01037.dict_array (33.1122,1.8950200000000001) 101 +dictGet test_01037.dict_array (33.1237,2.63993) 101 +dictGet test_01037.dict_array (33.1238,0.753963) 101 +dictGet test_01037.dict_array (33.1257,0.495668) 101 +dictGet test_01037.dict_array (33.1258,1.78341) 101 +dictGet test_01037.dict_array (33.127,2.59646) 101 +dictGet test_01037.dict_array (33.1324,-1.23742) 101 +dictGet test_01037.dict_array (33.1359,3.83491) 101 +dictGet test_01037.dict_array (33.1628,-0.379588) 101 +dictGet test_01037.dict_array (33.1679,1.25601) 101 +dictGet test_01037.dict_array (33.1688,-1.35553) 101 +dictGet test_01037.dict_array (33.181,2.10943) 101 +dictGet test_01037.dict_array (33.1871,2.81171) 101 +dictGet test_01037.dict_array (33.1877,0.771297) 101 +dictGet test_01037.dict_array (33.1883,-0.204797) 101 +dictGet test_01037.dict_array (33.1886,3.27998) 101 +dictGet test_01037.dict_array (33.1955,0.708907) 101 +dictGet test_01037.dict_array (33.2044,-0.769275) 101 +dictGet test_01037.dict_array (33.2182,3.36103) 101 +dictGet test_01037.dict_array (33.2192,3.43586) 101 +dictGet test_01037.dict_array (33.2322,-0.916753) 101 +dictGet test_01037.dict_array (33.2359,-0.81321) 101 +dictGet test_01037.dict_array (33.238,0.635072) 101 +dictGet test_01037.dict_array (33.2398,3.02588) 101 +dictGet test_01037.dict_array (33.2469,2.35698) 101 +dictGet test_01037.dict_array (33.247,2.3327) 101 +dictGet test_01037.dict_array (33.2579,2.8027100000000003) 101 +dictGet test_01037.dict_array (33.2607,0.321082) 101 +dictGet test_01037.dict_array (33.2653,0.243336) 101 +dictGet test_01037.dict_array (33.2758,0.831836) 101 +dictGet test_01037.dict_array (33.2771,0.886536) 101 +dictGet test_01037.dict_array (33.2914,1.16026) 101 +dictGet test_01037.dict_array (33.2914,1.38882) 101 +dictGet test_01037.dict_array (33.2982,-1.16604) 101 +dictGet test_01037.dict_array (33.2985,0.842556) 101 +dictGet test_01037.dict_array (33.3005,2.8338900000000002) 101 +dictGet test_01037.dict_array (33.305,0.0969475) 101 +dictGet test_01037.dict_array (33.3072,3.82163) 101 +dictGet test_01037.dict_array (33.312,3.41475) 101 +dictGet test_01037.dict_array (33.3129,2.46048) 101 +dictGet test_01037.dict_array (33.3134,3.46863) 101 +dictGet test_01037.dict_array (33.3203,2.33139) 101 +dictGet test_01037.dict_array (33.324,0.433701) 101 +dictGet test_01037.dict_array (33.3338,2.44705) 101 +dictGet test_01037.dict_array (33.337,4.06475) 101 +dictGet test_01037.dict_array (33.3469,1.08172) 101 +dictGet test_01037.dict_array (33.3538,0.717896) 101 +dictGet test_01037.dict_array (33.3618,1.37899) 101 +dictGet test_01037.dict_array (33.3698,0.547744) 101 +dictGet test_01037.dict_array (33.3705,0.957619) 101 +dictGet test_01037.dict_array (33.3821,3.07258) 101 +dictGet test_01037.dict_array (33.3881,3.0626) 101 +dictGet test_01037.dict_array (33.393,-0.816186) 101 +dictGet test_01037.dict_array (33.3945,0.869508) 101 +dictGet test_01037.dict_array (33.4001,1.24186) 101 +dictGet test_01037.dict_array (33.4008,2.34911) 101 +dictGet test_01037.dict_array (33.4166,-1.2808899999999999) 101 +dictGet test_01037.dict_array (33.4167,3.0655) 101 +dictGet test_01037.dict_array (33.4204,2.81887) 101 +dictGet test_01037.dict_array (33.4211,1.71128) 101 +dictGet test_01037.dict_array (33.4237,2.91761) 101 +dictGet test_01037.dict_array (33.4266,1.5955599999999999) 101 +dictGet test_01037.dict_array (33.4353,-0.391392) 101 +dictGet test_01037.dict_array (33.4362,-0.134658) 101 +dictGet test_01037.dict_array (33.4386,0.15396) 101 +dictGet test_01037.dict_array (33.4421,-0.50712) 101 +dictGet test_01037.dict_array (33.452,0.915829) 101 +dictGet test_01037.dict_array (33.463,-0.0882717) 101 +dictGet test_01037.dict_array (33.464,-1.00949) 101 +dictGet test_01037.dict_array (33.4692,0.954092) 101 +dictGet test_01037.dict_array (33.4716,1.9538799999999998) 101 +dictGet test_01037.dict_array (33.4756,1.85836) 101 +dictGet test_01037.dict_array (33.4859,4.0751) 101 +dictGet test_01037.dict_array (33.4899,3.54193) 101 +dictGet test_01037.dict_array (33.4935,3.49794) 101 +dictGet test_01037.dict_array (33.494,-0.983356) 101 +dictGet test_01037.dict_array (33.4955,-1.28128) 101 +dictGet test_01037.dict_array (33.4965,-0.278687) 101 +dictGet test_01037.dict_array (33.4991,0.647491) 101 +dictGet test_01037.dict_array (33.5076,2.2272) 101 +dictGet test_01037.dict_array (33.5079,-0.498199) 101 +dictGet test_01037.dict_array (33.5157,0.535034) 101 +dictGet test_01037.dict_array (33.5171,2.49677) 101 +dictGet test_01037.dict_array (33.5255,2.4447200000000002) 101 +dictGet test_01037.dict_array (33.526,4.01194) 101 +dictGet test_01037.dict_array (33.5288,0.789434) 101 +dictGet test_01037.dict_array (33.5356,-1.17671) 101 +dictGet test_01037.dict_array (33.5402,1.49152) 101 +dictGet test_01037.dict_array (33.5418,3.45757) 101 +dictGet test_01037.dict_array (33.5428,1.90712) 101 +dictGet test_01037.dict_array (33.5556,-0.55741) 101 +dictGet test_01037.dict_array (33.5564,0.876858) 101 +dictGet test_01037.dict_array (33.5567,-0.10208) 101 +dictGet test_01037.dict_array (33.5645,-0.124824) 101 +dictGet test_01037.dict_array (33.5663,3.4872) 101 +dictGet test_01037.dict_array (33.5716,-0.0107611) 101 +dictGet test_01037.dict_array (33.578,3.55714) 101 +dictGet test_01037.dict_array (33.5826,-0.49076) 101 +dictGet test_01037.dict_array (33.5909,0.773737) 101 +dictGet test_01037.dict_array (33.5958,2.9619999999999997) 5994231 +dictGet test_01037.dict_array (33.6193,-0.919755) 101 +dictGet test_01037.dict_array (33.6313,0.652132) 101 +dictGet test_01037.dict_array (33.632,0.823351) 101 +dictGet test_01037.dict_array (33.66,2.18998) 101 +dictGet test_01037.dict_array (33.6621,0.535395) 101 +dictGet test_01037.dict_array (33.6726,3.19367) 101 +dictGet test_01037.dict_array (33.6912,1.74522) 101 +dictGet test_01037.dict_array (33.705,0.706397) 101 +dictGet test_01037.dict_array (33.7076,0.7622) 101 +dictGet test_01037.dict_array (33.7112,1.70187) 101 +dictGet test_01037.dict_array (33.7246,-1.14837) 101 +dictGet test_01037.dict_array (33.7326,2.62413) 5994231 +dictGet test_01037.dict_array (33.7332,2.82137) 5994231 +dictGet test_01037.dict_array (33.7434,0.394672) 101 +dictGet test_01037.dict_array (33.7443,1.54557) 101 +dictGet test_01037.dict_array (33.7506,1.57317) 101 +dictGet test_01037.dict_array (33.7526,1.8578999999999999) 101 +dictGet test_01037.dict_array (33.766,4.15013) 101 +dictGet test_01037.dict_array (33.7834,2.41789) 101 +dictGet test_01037.dict_array (33.7864,0.230935) 101 +dictGet test_01037.dict_array (33.7965,3.05709) 101 +dictGet test_01037.dict_array (33.7998,3.32881) 101 +dictGet test_01037.dict_array (33.8003,2.97338) 5994231 +dictGet test_01037.dict_array (33.8007,-1.08962) 101 +dictGet test_01037.dict_array (33.8022,-0.139488) 101 +dictGet test_01037.dict_array (33.8065,2.70857) 5994231 +dictGet test_01037.dict_array (33.8169,-0.607788) 101 +dictGet test_01037.dict_array (33.8203,0.108512) 101 +dictGet test_01037.dict_array (33.8231,-1.03449) 101 +dictGet test_01037.dict_array (33.8312,3.49458) 101 +dictGet test_01037.dict_array (33.8342,0.297518) 101 +dictGet test_01037.dict_array (33.8352,0.165872) 101 +dictGet test_01037.dict_array (33.8354,1.87277) 101 +dictGet test_01037.dict_array (33.8371,1.60103) 101 +dictGet test_01037.dict_array (33.8387,1.9968) 101 +dictGet test_01037.dict_array (33.8403,3.5805) 101 +dictGet test_01037.dict_array (33.8414,-0.703067) 101 +dictGet test_01037.dict_array (33.844,-0.179472) 101 +dictGet test_01037.dict_array (33.8468,3.40137) 101 +dictGet test_01037.dict_array (33.8509,4.15334) 101 +dictGet test_01037.dict_array (33.8539,2.38339) 101 +dictGet test_01037.dict_array (33.858,-1.3122500000000001) 101 +dictGet test_01037.dict_array (33.859,3.72626) 101 +dictGet test_01037.dict_array (33.8616,2.24433) 101 +dictGet test_01037.dict_array (33.8621,3.01035) 101 +dictGet test_01037.dict_array (33.8623,1.17559) 101 +dictGet test_01037.dict_array (33.8682,2.706) 5994231 +dictGet test_01037.dict_array (33.8684,0.189231) 101 +dictGet test_01037.dict_array (33.872,1.93574) 101 +dictGet test_01037.dict_array (33.8844,3.80404) 101 +dictGet test_01037.dict_array (33.8888,0.594884) 101 +dictGet test_01037.dict_array (33.8946,2.74161) 101 +dictGet test_01037.dict_array (33.9023,0.6239) 101 +dictGet test_01037.dict_array (33.9057,0.873222) 101 +dictGet test_01037.dict_array (33.9157,-1.26607) 101 +dictGet test_01037.dict_array (33.92,2.06848) 101 +dictGet test_01037.dict_array (33.9298,-0.00526229) 101 +dictGet test_01037.dict_array (33.932,3.07063) 101 +dictGet test_01037.dict_array (33.9322,0.629385) 101 +dictGet test_01037.dict_array (33.9367,-1.41955) 101 +dictGet test_01037.dict_array (33.937,1.42532) 101 +dictGet test_01037.dict_array (33.9375,1.1467100000000001) 101 +dictGet test_01037.dict_array (33.9434,-1.05739) 101 +dictGet test_01037.dict_array (33.9477,3.34809) 101 +dictGet test_01037.dict_array (33.95,2.21715) 101 +dictGet test_01037.dict_array (33.955799999999996,0.305176) 101 +dictGet test_01037.dict_array (33.9686,-0.28273) 101 +dictGet test_01037.dict_array (33.9703,4.1255) 101 +dictGet test_01037.dict_array (33.9707,3.08199) 101 +dictGet test_01037.dict_array (33.9754,1.06203) 101 +dictGet test_01037.dict_array (33.9757,3.72468) 101 +dictGet test_01037.dict_array (33.9775,-0.0440599) 101 +dictGet test_01037.dict_array (33.9777,-0.251484) 101 +dictGet test_01037.dict_array (33.9789,-0.339374) 101 +dictGet test_01037.dict_array (33.9849,2.54515) 5994231 +dictGet test_01037.dict_array (33.9885,-0.318557) 101 +dictGet test_01037.dict_array (33.9977,1.07175) 101 +dictGet test_01037.dict_array (33.9984,-0.700517) 101 +dictGet test_01037.dict_array (34.0149,3.53338) 101 +dictGet test_01037.dict_array (34.0173,3.39155) 101 +dictGet test_01037.dict_array (34.0317,3.9579) 101 +dictGet test_01037.dict_array (34.0369,3.83612) 101 +dictGet test_01037.dict_array (34.043,-0.0887221) 101 +dictGet test_01037.dict_array (34.0487,1.14252) 101 +dictGet test_01037.dict_array (34.052,1.74832) 101 +dictGet test_01037.dict_array (34.0711,-0.898071) 101 +dictGet test_01037.dict_array (34.0747,1.55057) 101 +dictGet test_01037.dict_array (34.0803,3.16763) 101 +dictGet test_01037.dict_array (34.0872,3.75555) 101 +dictGet test_01037.dict_array (34.0965,1.62038) 101 +dictGet test_01037.dict_array (34.0977,-0.412691) 101 +dictGet test_01037.dict_array (34.0986,0.0294206) 101 +dictGet test_01037.dict_array (34.1072,3.15823) 101 +dictGet test_01037.dict_array (34.1092,3.09599) 101 +dictGet test_01037.dict_array (34.1206,1.04637) 5940222 +dictGet test_01037.dict_array (34.1209,3.13826) 101 +dictGet test_01037.dict_array (34.1265,3.95881) 101 +dictGet test_01037.dict_array (34.1286,-0.539319) 101 +dictGet test_01037.dict_array (34.1358,3.67451) 101 +dictGet test_01037.dict_array (34.1428,0.136115) 101 +dictGet test_01037.dict_array (34.157,1.73522) 101 +dictGet test_01037.dict_array (34.1581,1.48001) 101 +dictGet test_01037.dict_array (34.1682,3.42373) 101 +dictGet test_01037.dict_array (34.1683,-1.26511) 101 +dictGet test_01037.dict_array (34.1684,4.20007) 101 +dictGet test_01037.dict_array (34.1854,3.32089) 101 +dictGet test_01037.dict_array (34.2022,0.749536) 101 +dictGet test_01037.dict_array (34.2044,3.04865) 101 +dictGet test_01037.dict_array (34.22,-0.500055) 101 +dictGet test_01037.dict_array (34.2249,0.743775) 101 +dictGet test_01037.dict_array (34.2254,1.34702) 101 +dictGet test_01037.dict_array (34.2355,-0.898843) 101 +dictGet test_01037.dict_array (34.2394,2.0203699999999998) 101 +dictGet test_01037.dict_array (34.2466,1.83785) 101 +dictGet test_01037.dict_array (34.247,4.09563) 101 +dictGet test_01037.dict_array (34.2508,2.61312) 101 +dictGet test_01037.dict_array (34.2517,1.69642) 101 +dictGet test_01037.dict_array (34.2564,4.13033) 101 +dictGet test_01037.dict_array (34.2574,4.18928) 101 +dictGet test_01037.dict_array (34.2614,-0.478719) 101 +dictGet test_01037.dict_array (34.2625,2.38088) 101 +dictGet test_01037.dict_array (34.2666,3.1503) 101 +dictGet test_01037.dict_array (34.271,4.02223) 101 +dictGet test_01037.dict_array (34.2727,0.514755) 101 +dictGet test_01037.dict_array (34.278,1.98929) 101 +dictGet test_01037.dict_array (34.2798,-0.199208) 101 +dictGet test_01037.dict_array (34.2804,2.05184) 101 +dictGet test_01037.dict_array (34.2945,-1.11051) 101 +dictGet test_01037.dict_array (34.3168,-0.0829721) 101 +dictGet test_01037.dict_array (34.3345,3.4358) 101 +dictGet test_01037.dict_array (34.3377,1.13527) 5940222 +dictGet test_01037.dict_array (34.3383,1.27891) 5940222 +dictGet test_01037.dict_array (34.3391,1.47945) 5940222 +dictGet test_01037.dict_array (34.3441,0.627014) 101 +dictGet test_01037.dict_array (34.347,2.4853) 101 +dictGet test_01037.dict_array (34.3514,2.16247) 101 +dictGet test_01037.dict_array (34.3627,2.64533) 101 +dictGet test_01037.dict_array (34.3682,-0.227501) 101 +dictGet test_01037.dict_array (34.3756,4.21248) 101 +dictGet test_01037.dict_array (34.379,3.96604) 101 +dictGet test_01037.dict_array (34.3827,1.7518) 101 +dictGet test_01037.dict_array (34.3912,2.8834) 101 +dictGet test_01037.dict_array (34.3919,0.668829) 101 +dictGet test_01037.dict_array (34.3949,2.00338) 101 +dictGet test_01037.dict_array (34.3987,0.557268) 101 +dictGet test_01037.dict_array (34.4111,0.768558) 101 +dictGet test_01037.dict_array (34.4119,2.8742) 101 +dictGet test_01037.dict_array (34.416,3.50841) 101 +dictGet test_01037.dict_array (34.4212,1.24916) 5940222 +dictGet test_01037.dict_array (34.4251,0.457029) 101 +dictGet test_01037.dict_array (34.4274,-0.902559) 101 +dictGet test_01037.dict_array (34.4325,4.03159) 101 +dictGet test_01037.dict_array (34.438,1.63994) 101 +dictGet test_01037.dict_array (34.4403,-0.177594) 101 +dictGet test_01037.dict_array (34.4421,0.726712) 101 +dictGet test_01037.dict_array (34.4517,2.98611) 101 +dictGet test_01037.dict_array (34.4658,-1.312) 101 +dictGet test_01037.dict_array (34.4732,-0.0681338) 101 +dictGet test_01037.dict_array (34.4752,2.81646) 101 +dictGet test_01037.dict_array (34.4914,2.3858) 101 +dictGet test_01037.dict_array (34.4923,0.855231) 101 +dictGet test_01037.dict_array (34.5235,1.78468) 101 +dictGet test_01037.dict_array (34.5305,4.10608) 101 +dictGet test_01037.dict_array (34.5389,0.621937) 101 +dictGet test_01037.dict_array (34.5406,3.17145) 101 +dictGet test_01037.dict_array (34.5434,-0.56306) 101 +dictGet test_01037.dict_array (34.5449,3.13311) 101 +dictGet test_01037.dict_array (34.5491,2.31572) 101 +dictGet test_01037.dict_array (34.5539,2.94028) 101 +dictGet test_01037.dict_array (34.5546,-0.208825) 101 +dictGet test_01037.dict_array (34.5549,3.78486) 101 +dictGet test_01037.dict_array (34.5676,0.307148) 101 +dictGet test_01037.dict_array (34.5743,1.5217399999999999) 101 +dictGet test_01037.dict_array (34.5775,3.48046) 101 +dictGet test_01037.dict_array (34.5815,2.5243700000000002) 101 +dictGet test_01037.dict_array (34.5841,4.21191) 101 +dictGet test_01037.dict_array (34.5887,2.65083) 101 +dictGet test_01037.dict_array (34.5937,3.2143) 101 +dictGet test_01037.dict_array (34.6013,-1.0612) 101 +dictGet test_01037.dict_array (34.6089,1.36066) 101 +dictGet test_01037.dict_array (34.6103,3.40227) 101 +dictGet test_01037.dict_array (34.6128,1.92276) 101 +dictGet test_01037.dict_array (34.6175,2.43627) 101 +dictGet test_01037.dict_array (34.6209,3.43776) 101 +dictGet test_01037.dict_array (34.6234,2.60237) 101 +dictGet test_01037.dict_array (34.6275,3.52479) 101 +dictGet test_01037.dict_array (34.635,0.568558) 101 +dictGet test_01037.dict_array (34.6373,2.37692) 101 +dictGet test_01037.dict_array (34.6375,3.52234) 101 +dictGet test_01037.dict_array (34.6426,2.12397) 101 +dictGet test_01037.dict_array (34.6513,2.80915) 101 +dictGet test_01037.dict_array (34.6632,2.30039) 101 +dictGet test_01037.dict_array (34.6691,1.86582) 101 +dictGet test_01037.dict_array (34.6739,0.15342) 101 +dictGet test_01037.dict_array (34.6825,0.0499679) 101 +dictGet test_01037.dict_array (34.6893,0.454326) 101 +dictGet test_01037.dict_array (34.6957,-0.358598) 101 +dictGet test_01037.dict_array (34.6986,0.562679) 101 +dictGet test_01037.dict_array (34.712,1.12114) 101 +dictGet test_01037.dict_array (34.7126,-0.0057301) 101 +dictGet test_01037.dict_array (34.7137,0.0248501) 101 +dictGet test_01037.dict_array (34.7162,1.15623) 101 +dictGet test_01037.dict_array (34.7258,3.95142) 101 +dictGet test_01037.dict_array (34.7347,3.5232099999999997) 101 +dictGet test_01037.dict_array (34.7363,2.23374) 101 +dictGet test_01037.dict_array (34.7375,0.397841) 101 +dictGet test_01037.dict_array (34.7423,3.09198) 101 +dictGet test_01037.dict_array (34.7452,3.09029) 101 +dictGet test_01037.dict_array (34.7539,-1.06943) 101 +dictGet test_01037.dict_array (34.7733,-0.00912717) 101 +dictGet test_01037.dict_array (34.774,2.71088) 101 +dictGet test_01037.dict_array (34.7771,1.46009) 101 +dictGet test_01037.dict_array (34.7782,-1.28308) 101 +dictGet test_01037.dict_array (34.7924,3.63564) 101 +dictGet test_01037.dict_array (34.7939,-0.416676) 101 +dictGet test_01037.dict_array (34.7964,-0.401773) 101 +dictGet test_01037.dict_array (34.7974,0.0286873) 101 +dictGet test_01037.dict_array (34.7975,3.05965) 101 +dictGet test_01037.dict_array (34.8037,3.07263) 101 +dictGet test_01037.dict_array (34.8254,-0.390284) 101 +dictGet test_01037.dict_array (34.828,1.91869) 101 +dictGet test_01037.dict_array (34.8289,3.71058) 101 +dictGet test_01037.dict_array (34.8403,2.14606) 101 +dictGet test_01037.dict_array (34.8437,2.20617) 101 +dictGet test_01037.dict_array (34.8469,2.38435) 101 +dictGet test_01037.dict_array (34.86,1.45705) 101 +dictGet test_01037.dict_array (34.8612,0.914248) 101 +dictGet test_01037.dict_array (34.8663,3.4215400000000002) 101 +dictGet test_01037.dict_array (34.8724,-0.375144) 101 +dictGet test_01037.dict_array (34.8795,3.29317) 101 +dictGet test_01037.dict_array (34.8823,1.21988) 101 +dictGet test_01037.dict_array (34.8834,1.07657) 101 +dictGet test_01037.dict_array (34.8837,0.157648) 101 +dictGet test_01037.dict_array (34.8871,-0.9755) 101 +dictGet test_01037.dict_array (34.8871,1.8943699999999999) 101 +dictGet test_01037.dict_array (34.889,3.36756) 101 +dictGet test_01037.dict_array (34.8907,1.24874) 101 +dictGet test_01037.dict_array (34.8965,3.13508) 101 +dictGet test_01037.dict_array (34.9042,2.62092) 101 +dictGet test_01037.dict_array (34.9055,-0.0448967) 101 +dictGet test_01037.dict_array (34.9122,0.110576) 101 +dictGet test_01037.dict_array (34.9228,3.60183) 101 +dictGet test_01037.dict_array (34.9237,1.21715) 101 +dictGet test_01037.dict_array (34.9296,1.70459) 101 +dictGet test_01037.dict_array (34.941,-1.14663) 101 +dictGet test_01037.dict_array (34.9448,1.18923) 101 +dictGet test_01037.dict_array (34.9462,3.81678) 101 +dictGet test_01037.dict_array (34.9466,0.593463) 101 +dictGet test_01037.dict_array (34.9485,0.150307) 101 +dictGet test_01037.dict_array (34.9542,0.487238) 101 +dictGet test_01037.dict_array (34.9559,2.03473) 101 +dictGet test_01037.dict_array (34.9671,-0.960225) 101 +dictGet test_01037.dict_array (34.9711,2.63444) 101 +dictGet test_01037.dict_array (34.9892,0.354775) 101 +dictGet test_01037.dict_array (34.9907,1.40724) 101 +dictGet test_01037.dict_array (34.9916,-0.00173097) 101 +dictGet test_01037.dict_array (34.9919,2.06167) 101 diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.reference b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh new file mode 100755 index 00000000000..9fe264ec30a --- /dev/null +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +TMP_DIR="/tmp" + +declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") + +tar -xf ${CURDIR}/01037_test_data_search.tar.gz -C ${CURDIR} + +$CLICKHOUSE_CLIENT -n --query=" +DROP DATABASE IF EXISTS test_01037; +CREATE DATABASE test_01037 Engine = Ordinary; +DROP TABLE IF EXISTS test_01037.points; +CREATE TABLE test_01037.points (x Float64, y Float64) ENGINE = Memory; +" + +$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.points FORMAT TSV" --max_insert_block_size=100000 < "${CURDIR}/01037_point_data" + +rm ${CURDIR}/01037_point_data + +$CLICKHOUSE_CLIENT -n --query=" +DROP TABLE IF EXISTS test_01037.polygons_array; + +CREATE TABLE test_01037.polygons_array +( + key Array(Array(Array(Array(Float64)))), + name String, + value UInt64 +) +ENGINE = Memory; +" + +$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.polygons_array FORMAT JSONEachRow" --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" + +rm ${CURDIR}/01037_polygon_data + +for type in ${SearchTypes[@]}; +do + outputFile="${TMP_DIR}/results${type}.out" + + $CLICKHOUSE_CLIENT -n --query=" + DROP DICTIONARY IF EXISTS test_01037.dict_array; + + CREATE DICTIONARY test_01037.dict_array + ( + key Array(Array(Array(Array(Float64)))), + name String DEFAULT 'qqq', + value UInt64 DEFAULT 101 + ) + PRIMARY KEY key + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) + LIFETIME(0) + LAYOUT($type()); + + select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'value', key) from test_01037.points order by x, y; + " > $outputFile + + diff -q "${CURDIR}/01037_polygon_dicts_correctness_all.ans" "$outputFile" +done + +$CLICKHOUSE_CLIENT -n --query=" +DROP TABLE test_01037.points; +DROP DATABASE test_01037; +" diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.ans b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.ans new file mode 100644 index 00000000000..45fa7637421 --- /dev/null +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.ans @@ -0,0 +1,1000 @@ +dictGet test_01037.dict_array (29.5699,2.50068) 101 +dictGet test_01037.dict_array (29.5796,1.55456) 101 +dictGet test_01037.dict_array (29.5796,2.36864) 101 +dictGet test_01037.dict_array (29.5844,1.59626) 101 +dictGet test_01037.dict_array (29.5886,4.03321) 101 +dictGet test_01037.dict_array (29.5914,3.02628) 101 +dictGet test_01037.dict_array (29.5926,-0.0965169) 101 +dictGet test_01037.dict_array (29.5968,2.37773) 101 +dictGet test_01037.dict_array (29.5984,0.755853) 101 +dictGet test_01037.dict_array (29.6066,3.47173) 101 +dictGet test_01037.dict_array (29.6085,-1.26007) 6489978 +dictGet test_01037.dict_array (29.6131,0.246565) 101 +dictGet test_01037.dict_array (29.6157,-0.266687) 101 +dictGet test_01037.dict_array (29.6164,2.94674) 101 +dictGet test_01037.dict_array (29.6195,-0.591941) 101 +dictGet test_01037.dict_array (29.6231,1.54818) 101 +dictGet test_01037.dict_array (29.6379,0.764114) 101 +dictGet test_01037.dict_array (29.6462,-0.772059) 934530 +dictGet test_01037.dict_array (29.6579,-1.07336) 6489978 +dictGet test_01037.dict_array (29.6618,-0.271842) 101 +dictGet test_01037.dict_array (29.6629,-0.303602) 101 +dictGet test_01037.dict_array (29.6659,-0.782823) 934530 +dictGet test_01037.dict_array (29.6736,-0.113832) 101 +dictGet test_01037.dict_array (29.6759,3.02905) 101 +dictGet test_01037.dict_array (29.6778,3.71898) 101 +dictGet test_01037.dict_array (29.6796,1.10433) 101 +dictGet test_01037.dict_array (29.6809,2.13677) 101 +dictGet test_01037.dict_array (29.6935,4.11894) 101 +dictGet test_01037.dict_array (29.6991,-1.4458199999999999) 101 +dictGet test_01037.dict_array (29.6997,3.17297) 101 +dictGet test_01037.dict_array (29.7043,3.6145899999999997) 101 +dictGet test_01037.dict_array (29.7065,3.24885) 101 +dictGet test_01037.dict_array (29.7126,0.28108) 101 +dictGet test_01037.dict_array (29.7192,0.174273) 101 +dictGet test_01037.dict_array (29.7217,-0.523481) 3501900 +dictGet test_01037.dict_array (29.7271,1.67967) 101 +dictGet test_01037.dict_array (29.7311,4.12444) 101 +dictGet test_01037.dict_array (29.7347,1.88378) 101 +dictGet test_01037.dict_array (29.7358,0.67944) 101 +dictGet test_01037.dict_array (29.7366,-0.2973) 101 +dictGet test_01037.dict_array (29.7446,0.646536) 101 +dictGet test_01037.dict_array (29.7453,-0.567963) 3501900 +dictGet test_01037.dict_array (29.764,4.04217) 101 +dictGet test_01037.dict_array (29.7655,1.51372) 101 +dictGet test_01037.dict_array (29.7744,1.12435) 101 +dictGet test_01037.dict_array (29.7774,-0.0681196) 3501895 +dictGet test_01037.dict_array (29.7784,1.54864) 101 +dictGet test_01037.dict_array (29.7785,2.24139) 101 +dictGet test_01037.dict_array (29.7922,0.220808) 101 +dictGet test_01037.dict_array (29.7936,2.37709) 101 +dictGet test_01037.dict_array (29.8008,0.948536) 101 +dictGet test_01037.dict_array (29.8115,0.201227) 101 +dictGet test_01037.dict_array (29.814,0.149601) 3501895 +dictGet test_01037.dict_array (29.8193,-1.35858) 101 +dictGet test_01037.dict_array (29.8201,0.965518) 101 +dictGet test_01037.dict_array (29.8265,-0.727286) 3501900 +dictGet test_01037.dict_array (29.8277,-0.531746) 3501900 +dictGet test_01037.dict_array (29.8289,3.63009) 101 +dictGet test_01037.dict_array (29.8548,0.838047) 101 +dictGet test_01037.dict_array (29.8641,-0.845265) 3501900 +dictGet test_01037.dict_array (29.8649,0.0562212) 3501895 +dictGet test_01037.dict_array (29.8701,-1.02045) 934530 +dictGet test_01037.dict_array (29.8733,2.76654) 101 +dictGet test_01037.dict_array (29.876,0.555475) 101 +dictGet test_01037.dict_array (29.8794,-0.800108) 3501900 +dictGet test_01037.dict_array (29.8813,2.7426399999999997) 101 +dictGet test_01037.dict_array (29.897100000000002,2.66193) 101 +dictGet test_01037.dict_array (29.908,4.01339) 101 +dictGet test_01037.dict_array (29.9165,-1.08246) 3501894 +dictGet test_01037.dict_array (29.9201,-0.420861) 3498054 +dictGet test_01037.dict_array (29.9217,3.03778) 101 +dictGet test_01037.dict_array (29.9355,0.773833) 101 +dictGet test_01037.dict_array (29.947,3.76517) 101 +dictGet test_01037.dict_array (29.9518,-0.60557) 3498056 +dictGet test_01037.dict_array (29.9564,-0.600163) 3498056 +dictGet test_01037.dict_array (29.959600000000002,4.16591) 101 +dictGet test_01037.dict_array (29.9615,-1.33708) 3501894 +dictGet test_01037.dict_array (29.9699,-0.392375) 3498054 +dictGet test_01037.dict_array (29.9776,1.04552) 101 +dictGet test_01037.dict_array (29.9784,4.02756) 101 +dictGet test_01037.dict_array (29.9819,4.00597) 101 +dictGet test_01037.dict_array (29.9826,1.2816100000000001) 101 +dictGet test_01037.dict_array (30.0026,2.76257) 101 +dictGet test_01037.dict_array (30.0126,3.68255) 101 +dictGet test_01037.dict_array (30.0131,0.796576) 3501892 +dictGet test_01037.dict_array (30.018,1.16523) 101 +dictGet test_01037.dict_array (30.0261,-0.210653) 3501896 +dictGet test_01037.dict_array (30.0472,-1.11007) 3501894 +dictGet test_01037.dict_array (30.0542,-0.479585) 3498054 +dictGet test_01037.dict_array (30.0613,1.6278000000000001) 101 +dictGet test_01037.dict_array (30.0617,-0.0551152) 3501895 +dictGet test_01037.dict_array (30.0637,2.62066) 101 +dictGet test_01037.dict_array (30.0721,1.6424400000000001) 101 +dictGet test_01037.dict_array (30.0769,-0.402636) 3498054 +dictGet test_01037.dict_array (30.0791,-0.277435) 3501896 +dictGet test_01037.dict_array (30.0931,0.0327512) 3501895 +dictGet test_01037.dict_array (30.1059,3.52623) 101 +dictGet test_01037.dict_array (30.1103,0.865466) 3501892 +dictGet test_01037.dict_array (30.1115,2.95243) 101 +dictGet test_01037.dict_array (30.1144,1.71029) 101 +dictGet test_01037.dict_array (30.1311,-0.864751) 3501899 +dictGet test_01037.dict_array (30.1336,-0.851386) 3501899 +dictGet test_01037.dict_array (30.1393,3.89901) 101 +dictGet test_01037.dict_array (30.1456,-0.531898) 3498054 +dictGet test_01037.dict_array (30.1492,2.07833) 101 +dictGet test_01037.dict_array (30.1575,2.43856) 101 +dictGet test_01037.dict_array (30.1682,1.19771) 101 +dictGet test_01037.dict_array (30.1716,3.9853300000000003) 101 +dictGet test_01037.dict_array (30.1849,2.78374) 101 +dictGet test_01037.dict_array (30.1866,0.65658) 3498021 +dictGet test_01037.dict_array (30.1885,1.56943) 101 +dictGet test_01037.dict_array (30.1959,-1.38202) 101 +dictGet test_01037.dict_array (30.1999,1.58413) 101 +dictGet test_01037.dict_array (30.2024,0.713081) 3498021 +dictGet test_01037.dict_array (30.2054,0.620143) 3498021 +dictGet test_01037.dict_array (30.2091,1.51641) 101 +dictGet test_01037.dict_array (30.2124,-0.331782) 3498031 +dictGet test_01037.dict_array (30.226,3.03527) 101 +dictGet test_01037.dict_array (30.2261,3.18486) 101 +dictGet test_01037.dict_array (30.2288,2.48407) 101 +dictGet test_01037.dict_array (30.2345,3.7462400000000002) 101 +dictGet test_01037.dict_array (30.2375,0.62046) 3498021 +dictGet test_01037.dict_array (30.2425,-0.472914) 3498054 +dictGet test_01037.dict_array (30.247,3.95863) 101 +dictGet test_01037.dict_array (30.2494,-0.305093) 3498031 +dictGet test_01037.dict_array (30.2499,2.54337) 101 +dictGet test_01037.dict_array (30.2606,2.16644) 101 +dictGet test_01037.dict_array (30.2672,3.94847) 101 +dictGet test_01037.dict_array (30.2709,-0.136264) 6088794 +dictGet test_01037.dict_array (30.2764,1.18654) 101 +dictGet test_01037.dict_array (30.2765,1.20383) 101 +dictGet test_01037.dict_array (30.2839,1.05762) 3498024 +dictGet test_01037.dict_array (30.286,0.469327) 3498021 +dictGet test_01037.dict_array (30.2927,3.1693) 101 +dictGet test_01037.dict_array (30.2935,3.49854) 101 +dictGet test_01037.dict_array (30.307,0.312338) 3498021 +dictGet test_01037.dict_array (30.3085,1.07791) 3498024 +dictGet test_01037.dict_array (30.3139,2.77248) 101 +dictGet test_01037.dict_array (30.314,0.822823) 3498024 +dictGet test_01037.dict_array (30.3227,-0.587351) 3498055 +dictGet test_01037.dict_array (30.332,1.00174) 3498024 +dictGet test_01037.dict_array (30.3388,0.844148) 3498024 +dictGet test_01037.dict_array (30.3485,0.561902) 3498021 +dictGet test_01037.dict_array (30.3497,0.180362) 6489998 +dictGet test_01037.dict_array (30.361,4.13016) 101 +dictGet test_01037.dict_array (30.3623,-0.0484027) 6489998 +dictGet test_01037.dict_array (30.3638,3.9845800000000002) 101 +dictGet test_01037.dict_array (30.3853,3.16051) 101 +dictGet test_01037.dict_array (30.3974,2.6617800000000003) 101 +dictGet test_01037.dict_array (30.4002,-1.15886) 101 +dictGet test_01037.dict_array (30.4008,-0.387015) 3498031 +dictGet test_01037.dict_array (30.4018,1.86493) 101 +dictGet test_01037.dict_array (30.4239,1.16818) 3498024 +dictGet test_01037.dict_array (30.4363,3.63938) 101 +dictGet test_01037.dict_array (30.4377,-0.81315) 3498063 +dictGet test_01037.dict_array (30.4391,3.54703) 101 +dictGet test_01037.dict_array (30.4424,-1.39435) 101 +dictGet test_01037.dict_array (30.4441,2.8463000000000003) 101 +dictGet test_01037.dict_array (30.4517,3.28117) 101 +dictGet test_01037.dict_array (30.4658,2.6928) 101 +dictGet test_01037.dict_array (30.4734,2.66161) 101 +dictGet test_01037.dict_array (30.4799,-1.07578) 101 +dictGet test_01037.dict_array (30.4837,-1.02486) 3501899 +dictGet test_01037.dict_array (30.485,1.06326) 3498024 +dictGet test_01037.dict_array (30.495,1.12306) 101 +dictGet test_01037.dict_array (30.501,2.27264) 101 +dictGet test_01037.dict_array (30.5027,1.99382) 101 +dictGet test_01037.dict_array (30.5194,-1.03943) 3501893 +dictGet test_01037.dict_array (30.5239,1.04328) 101 +dictGet test_01037.dict_array (30.528,3.82041) 101 +dictGet test_01037.dict_array (30.5299,-0.715248) 3498063 +dictGet test_01037.dict_array (30.5331,1.19603) 101 +dictGet test_01037.dict_array (30.535800000000002,2.71485) 101 +dictGet test_01037.dict_array (30.5405,0.804694) 3498023 +dictGet test_01037.dict_array (30.542,1.23739) 101 +dictGet test_01037.dict_array (30.5432,4.04189) 101 +dictGet test_01037.dict_array (30.5457,-0.956121) 3501893 +dictGet test_01037.dict_array (30.5506,3.07443) 101 +dictGet test_01037.dict_array (30.5539,3.87084) 101 +dictGet test_01037.dict_array (30.5578,3.78837) 101 +dictGet test_01037.dict_array (30.5588,0.966135) 3498022 +dictGet test_01037.dict_array (30.5637,2.5605) 101 +dictGet test_01037.dict_array (30.5647,-1.27328) 101 +dictGet test_01037.dict_array (30.5656,-0.0581332) 6088794 +dictGet test_01037.dict_array (30.5715,0.65755) 3498023 +dictGet test_01037.dict_array (30.5727,3.01604) 101 +dictGet test_01037.dict_array (30.5729,-0.976857) 3501893 +dictGet test_01037.dict_array (30.5751,0.60204) 3498023 +dictGet test_01037.dict_array (30.5854,3.02473) 101 +dictGet test_01037.dict_array (30.5866,0.174099) 6489998 +dictGet test_01037.dict_array (30.5947,0.875193) 3498023 +dictGet test_01037.dict_array (30.5992,-0.403901) 3498063 +dictGet test_01037.dict_array (30.6002,4.18891) 101 +dictGet test_01037.dict_array (30.6025,0.217712) 6489998 +dictGet test_01037.dict_array (30.6054,0.927203) 3498022 +dictGet test_01037.dict_array (30.6075,3.79359) 101 +dictGet test_01037.dict_array (30.6159,3.82773) 101 +dictGet test_01037.dict_array (30.627,3.84039) 101 +dictGet test_01037.dict_array (30.6308,0.77517) 3498023 +dictGet test_01037.dict_array (30.6338,0.179565) 6489998 +dictGet test_01037.dict_array (30.6461,1.3293599999999999) 101 +dictGet test_01037.dict_array (30.6674,-0.424547) 3498063 +dictGet test_01037.dict_array (30.669,1.76539) 101 +dictGet test_01037.dict_array (30.6788,4.01239) 101 +dictGet test_01037.dict_array (30.6864,3.59158) 101 +dictGet test_01037.dict_array (30.7049,-0.875413) 3501893 +dictGet test_01037.dict_array (30.705,1.3307) 101 +dictGet test_01037.dict_array (30.7063,-0.473192) 3498063 +dictGet test_01037.dict_array (30.7075,-1.1958199999999999) 101 +dictGet test_01037.dict_array (30.7101,-0.367562) 3498012 +dictGet test_01037.dict_array (30.7203,2.98725) 101 +dictGet test_01037.dict_array (30.7213,2.2745699999999998) 101 +dictGet test_01037.dict_array (30.7446,-0.334144) 3498012 +dictGet test_01037.dict_array (30.7468,3.82967) 101 +dictGet test_01037.dict_array (30.747,-0.384779) 3498012 +dictGet test_01037.dict_array (30.7681,0.904198) 3498022 +dictGet test_01037.dict_array (30.7757,1.78743) 101 +dictGet test_01037.dict_array (30.8021,-0.479212) 3498012 +dictGet test_01037.dict_array (30.8079,-1.40869) 101 +dictGet test_01037.dict_array (30.8206,-0.0608489) 3498012 +dictGet test_01037.dict_array (30.8218,0.43909) 3498023 +dictGet test_01037.dict_array (30.8239,0.10014) 3498012 +dictGet test_01037.dict_array (30.8282,4.15409) 101 +dictGet test_01037.dict_array (30.8288,-0.709528) 3501893 +dictGet test_01037.dict_array (30.8326,0.156011) 3498012 +dictGet test_01037.dict_array (30.8328,-1.03704) 101 +dictGet test_01037.dict_array (30.839,2.15528) 101 +dictGet test_01037.dict_array (30.8452,0.219377) 3498013 +dictGet test_01037.dict_array (30.8463,0.0515355) 3498012 +dictGet test_01037.dict_array (30.8526,2.06614) 101 +dictGet test_01037.dict_array (30.8566,0.517876) 3498023 +dictGet test_01037.dict_array (30.8588,-1.31738) 101 +dictGet test_01037.dict_array (30.8681,0.44207) 3498013 +dictGet test_01037.dict_array (30.8914,1.0072) 3498022 +dictGet test_01037.dict_array (30.897,0.483425) 3498013 +dictGet test_01037.dict_array (30.905,2.8731999999999998) 3501793 +dictGet test_01037.dict_array (30.9051,2.21956) 101 +dictGet test_01037.dict_array (30.9115,4.00663) 101 +dictGet test_01037.dict_array (30.9167,-0.834462) 3501893 +dictGet test_01037.dict_array (30.9252,-1.3289900000000001) 101 +dictGet test_01037.dict_array (30.9314,1.85384) 101 +dictGet test_01037.dict_array (30.9392,2.53236) 3501827 +dictGet test_01037.dict_array (30.9569,2.82038) 3501793 +dictGet test_01037.dict_array (30.9598,-0.641011) 3498012 +dictGet test_01037.dict_array (30.9601,-0.254928) 3498012 +dictGet test_01037.dict_array (30.9623,-1.3886) 101 +dictGet test_01037.dict_array (30.9707,0.888854) 3498022 +dictGet test_01037.dict_array (30.9766,2.81957) 3501793 +dictGet test_01037.dict_array (30.9775,2.69273) 3501793 +dictGet test_01037.dict_array (30.9821,0.587715) 3498013 +dictGet test_01037.dict_array (30.9887,4.0233) 101 +dictGet test_01037.dict_array (30.9914,0.259542) 3498013 +dictGet test_01037.dict_array (30.9986,-1.36832) 101 +dictGet test_01037.dict_array (31.008,0.628999) 3498013 +dictGet test_01037.dict_array (31.0168,-1.17462) 101 +dictGet test_01037.dict_array (31.0237,3.52547) 3501821 +dictGet test_01037.dict_array (31.0306,3.78522) 101 +dictGet test_01037.dict_array (31.0308,-0.72453) 3501893 +dictGet test_01037.dict_array (31.0463,2.41997) 3501825 +dictGet test_01037.dict_array (31.047,0.624184) 3498013 +dictGet test_01037.dict_array (31.0569,0.0706393) 3498015 +dictGet test_01037.dict_array (31.0583,1.3244099999999999) 3501926 +dictGet test_01037.dict_array (31.063,3.23861) 3501793 +dictGet test_01037.dict_array (31.068,0.695575) 3498022 +dictGet test_01037.dict_array (31.0687,1.85675) 101 +dictGet test_01037.dict_array (31.0692,0.254793) 3498014 +dictGet test_01037.dict_array (31.0766,0.828128) 3498022 +dictGet test_01037.dict_array (31.0833,0.0612782) 3498015 +dictGet test_01037.dict_array (31.0833,2.59748) 3501793 +dictGet test_01037.dict_array (31.0861,-1.3778299999999999) 101 +dictGet test_01037.dict_array (31.0874,3.07258) 3501793 +dictGet test_01037.dict_array (31.0882,1.4882) 3501926 +dictGet test_01037.dict_array (31.0924,3.42242) 3501821 +dictGet test_01037.dict_array (31.0927,2.67448) 3501793 +dictGet test_01037.dict_array (31.0936,1.12292) 3498022 +dictGet test_01037.dict_array (31.0952,-0.336928) 3498012 +dictGet test_01037.dict_array (31.0978,3.48482) 3501826 +dictGet test_01037.dict_array (31.1107,3.7513199999999998) 3501826 +dictGet test_01037.dict_array (31.1156,1.19171) 3501926 +dictGet test_01037.dict_array (31.1176,0.223509) 3498015 +dictGet test_01037.dict_array (31.1249,0.946838) 3498022 +dictGet test_01037.dict_array (31.1267,1.48983) 3501926 +dictGet test_01037.dict_array (31.138,-0.289981) 3501898 +dictGet test_01037.dict_array (31.1382,3.02904) 3501793 +dictGet test_01037.dict_array (31.1475,2.6178) 3501793 +dictGet test_01037.dict_array (31.1491,1.37873) 3501926 +dictGet test_01037.dict_array (31.1525,3.72105) 3501826 +dictGet test_01037.dict_array (31.1526,-1.4129800000000001) 101 +dictGet test_01037.dict_array (31.1526,-0.186457) 3501898 +dictGet test_01037.dict_array (31.1539,2.78789) 3501793 +dictGet test_01037.dict_array (31.1548,-1.08552) 101 +dictGet test_01037.dict_array (31.1567,-0.0768925) 3501898 +dictGet test_01037.dict_array (31.1613,1.49617) 3501926 +dictGet test_01037.dict_array (31.1653,1.03777) 3498022 +dictGet test_01037.dict_array (31.1662,3.4214700000000002) 3501826 +dictGet test_01037.dict_array (31.1672,-0.0813169) 3501898 +dictGet test_01037.dict_array (31.177,0.440843) 3498014 +dictGet test_01037.dict_array (31.1788,-0.737151) 3501893 +dictGet test_01037.dict_array (31.1856,-0.144396) 3501898 +dictGet test_01037.dict_array (31.1959,3.66813) 3501826 +dictGet test_01037.dict_array (31.1996,-0.353983) 3501898 +dictGet test_01037.dict_array (31.2019,2.86802) 3501793 +dictGet test_01037.dict_array (31.2087,2.31245) 3501825 +dictGet test_01037.dict_array (31.2125,3.2713200000000002) 3501793 +dictGet test_01037.dict_array (31.2137,-0.108129) 3501898 +dictGet test_01037.dict_array (31.216,3.9156) 101 +dictGet test_01037.dict_array (31.2201,-0.202141) 3501898 +dictGet test_01037.dict_array (31.2285,2.09058) 101 +dictGet test_01037.dict_array (31.2502,4.01526) 101 +dictGet test_01037.dict_array (31.2585,3.11524) 3501793 +dictGet test_01037.dict_array (31.2645,-0.620418) 3501890 +dictGet test_01037.dict_array (31.2684,2.74277) 3501793 +dictGet test_01037.dict_array (31.2821,-1.12772) 101 +dictGet test_01037.dict_array (31.2821,2.46769) 3501825 +dictGet test_01037.dict_array (31.2887,3.91396) 101 +dictGet test_01037.dict_array (31.295,1.49942) 3501926 +dictGet test_01037.dict_array (31.2997,3.46122) 3501826 +dictGet test_01037.dict_array (31.3017,3.3263) 3501826 +dictGet test_01037.dict_array (31.3022,3.16754) 3501793 +dictGet test_01037.dict_array (31.3048,0.364962) 3498014 +dictGet test_01037.dict_array (31.305,3.1967) 3501793 +dictGet test_01037.dict_array (31.3061,1.84303) 101 +dictGet test_01037.dict_array (31.3082,-0.173851) 3501898 +dictGet test_01037.dict_array (31.3315,3.90932) 101 +dictGet test_01037.dict_array (31.3351,2.80164) 3501793 +dictGet test_01037.dict_array (31.3388,0.168765) 3498015 +dictGet test_01037.dict_array (31.339,0.25535) 3498094 +dictGet test_01037.dict_array (31.3423,1.7036799999999999) 3501926 +dictGet test_01037.dict_array (31.349,0.386456) 3498014 +dictGet test_01037.dict_array (31.3558,-1.04336) 101 +dictGet test_01037.dict_array (31.3564,0.478876) 3498014 +dictGet test_01037.dict_array (31.3607,-0.0860507) 3498015 +dictGet test_01037.dict_array (31.3831,3.84469) 101 +dictGet test_01037.dict_array (31.3886,-0.731137) 3501890 +dictGet test_01037.dict_array (31.4043,-0.348907) 5457271 +dictGet test_01037.dict_array (31.4081,1.47391) 3501926 +dictGet test_01037.dict_array (31.4176,-0.583645) 5457271 +dictGet test_01037.dict_array (31.4177,1.36972) 3501926 +dictGet test_01037.dict_array (31.4182,0.958303) 3498022 +dictGet test_01037.dict_array (31.4199,3.1738) 3501793 +dictGet test_01037.dict_array (31.4221,2.74876) 3501825 +dictGet test_01037.dict_array (31.4301,-0.122643) 3498015 +dictGet test_01037.dict_array (31.4344,1.00661) 3498022 +dictGet test_01037.dict_array (31.4375,4.20304) 101 +dictGet test_01037.dict_array (31.4377,0.289608) 3498094 +dictGet test_01037.dict_array (31.4379,0.54744) 3498014 +dictGet test_01037.dict_array (31.4459,3.94945) 101 +dictGet test_01037.dict_array (31.4559,-0.345063) 5457271 +dictGet test_01037.dict_array (31.464,0.726129) 3498014 +dictGet test_01037.dict_array (31.4662,-0.299019) 3498015 +dictGet test_01037.dict_array (31.4671,1.9605299999999999) 3501794 +dictGet test_01037.dict_array (31.4673,-0.403676) 5457271 +dictGet test_01037.dict_array (31.4712,-0.237941) 3498015 +dictGet test_01037.dict_array (31.4816,0.120264) 3498015 +dictGet test_01037.dict_array (31.4875,0.323483) 3498014 +dictGet test_01037.dict_array (31.490099999999998,-0.338163) 5457271 +dictGet test_01037.dict_array (31.4932,0.517674) 3498014 +dictGet test_01037.dict_array (31.5112,1.9689299999999998) 3501794 +dictGet test_01037.dict_array (31.5122,2.92785) 3501791 +dictGet test_01037.dict_array (31.5151,0.166429) 3498094 +dictGet test_01037.dict_array (31.5174,2.94802) 3501791 +dictGet test_01037.dict_array (31.5182,4.18776) 101 +dictGet test_01037.dict_array (31.5238,1.18793) 3498003 +dictGet test_01037.dict_array (31.5271,3.07446) 3501791 +dictGet test_01037.dict_array (31.5393,1.58061) 3501794 +dictGet test_01037.dict_array (31.5421,3.13711) 3501791 +dictGet test_01037.dict_array (31.5479,2.39897) 3497970 +dictGet test_01037.dict_array (31.5519,0.99285) 3498003 +dictGet test_01037.dict_array (31.5685,3.47987) 3501824 +dictGet test_01037.dict_array (31.5959,0.437382) 3498014 +dictGet test_01037.dict_array (31.6003,0.194376) 3498094 +dictGet test_01037.dict_array (31.6026,2.15457) 3501794 +dictGet test_01037.dict_array (31.606,2.45365) 3497970 +dictGet test_01037.dict_array (31.6062,-0.453441) 3501890 +dictGet test_01037.dict_array (31.6107,1.35247) 3497974 +dictGet test_01037.dict_array (31.6155,3.85588) 101 +dictGet test_01037.dict_array (31.6222,2.03326) 3501794 +dictGet test_01037.dict_array (31.6231,-0.123059) 3498083 +dictGet test_01037.dict_array (31.6244,1.6885599999999998) 3497974 +dictGet test_01037.dict_array (31.6459,0.669716) 3498014 +dictGet test_01037.dict_array (31.6563,-0.0644741) 3498083 +dictGet test_01037.dict_array (31.6618,-0.551121) 3501890 +dictGet test_01037.dict_array (31.6725,-0.38922) 3498085 +dictGet test_01037.dict_array (31.6727,4.10336) 101 +dictGet test_01037.dict_array (31.6739,4.1391) 101 +dictGet test_01037.dict_array (31.6897,2.8694699999999997) 3501792 +dictGet test_01037.dict_array (31.6902,3.98792) 101 +dictGet test_01037.dict_array (31.6945,2.46687) 3497970 +dictGet test_01037.dict_array (31.6987,-1.3796) 101 +dictGet test_01037.dict_array (31.7012,2.34845) 3497970 +dictGet test_01037.dict_array (31.7036,0.0228348) 3501888 +dictGet test_01037.dict_array (31.7046,3.68111) 3501824 +dictGet test_01037.dict_array (31.7055,2.92556) 3501792 +dictGet test_01037.dict_array (31.7102,1.04532) 3498003 +dictGet test_01037.dict_array (31.7149,-0.443302) 3498085 +dictGet test_01037.dict_array (31.7195,2.99311) 3501791 +dictGet test_01037.dict_array (31.7274,0.166719) 3498094 +dictGet test_01037.dict_array (31.7565,-0.565382) 3498085 +dictGet test_01037.dict_array (31.7615,0.771626) 3498014 +dictGet test_01037.dict_array (31.7739,1.8970099999999999) 3497974 +dictGet test_01037.dict_array (31.7848,1.2623199999999999) 3498003 +dictGet test_01037.dict_array (31.7912,-0.788599) 101 +dictGet test_01037.dict_array (31.8011,2.65853) 3497970 +dictGet test_01037.dict_array (31.8032,-0.0590108) 3501888 +dictGet test_01037.dict_array (31.8038,1.9618799999999998) 3497974 +dictGet test_01037.dict_array (31.8098,-1.46851) 101 +dictGet test_01037.dict_array (31.8131,3.41982) 3501791 +dictGet test_01037.dict_array (31.8169,3.31059) 3501791 +dictGet test_01037.dict_array (31.8202,-0.193692) 3501888 +dictGet test_01037.dict_array (31.8306,1.57586) 3497974 +dictGet test_01037.dict_array (31.8382,-0.787948) 101 +dictGet test_01037.dict_array (31.8433,2.49692) 3497970 +dictGet test_01037.dict_array (31.8436,2.41851) 3497970 +dictGet test_01037.dict_array (31.8563,-1.10787) 101 +dictGet test_01037.dict_array (31.8683,0.996504) 3498002 +dictGet test_01037.dict_array (31.8693,-0.828142) 101 +dictGet test_01037.dict_array (31.8723,1.08929) 3498003 +dictGet test_01037.dict_array (31.8737,0.881127) 3498002 +dictGet test_01037.dict_array (31.8881,-0.58441) 101 +dictGet test_01037.dict_array (31.9011,0.121349) 3498094 +dictGet test_01037.dict_array (31.9066,2.13045) 3497965 +dictGet test_01037.dict_array (31.9142,1.03368) 3498002 +dictGet test_01037.dict_array (31.9155,3.38363) 3501791 +dictGet test_01037.dict_array (31.9168,1.3166) 3498004 +dictGet test_01037.dict_array (31.9185,-1.11879) 101 +dictGet test_01037.dict_array (31.9186,-0.647948) 101 +dictGet test_01037.dict_array (31.9311,3.96928) 101 +dictGet test_01037.dict_array (31.9335,1.47048) 3497974 +dictGet test_01037.dict_array (31.9443,-1.36175) 101 +dictGet test_01037.dict_array (31.9481,2.34231) 3497970 +dictGet test_01037.dict_array (31.9526,1.36565) 3498004 +dictGet test_01037.dict_array (31.9629,2.5208399999999997) 3497970 +dictGet test_01037.dict_array (31.9765,0.975783) 3498002 +dictGet test_01037.dict_array (31.9923,3.31773) 3501791 +dictGet test_01037.dict_array (31.9994,0.972816) 3498002 +dictGet test_01037.dict_array (32.001,3.47425) 3501791 +dictGet test_01037.dict_array (32.0127,2.13874) 3497965 +dictGet test_01037.dict_array (32.0244,3.2092) 3501792 +dictGet test_01037.dict_array (32.029,1.18039) 3498004 +dictGet test_01037.dict_array (32.0315,0.566073) 3498095 +dictGet test_01037.dict_array (32.0354,1.0766499999999999) 3498004 +dictGet test_01037.dict_array (32.0399,-1.11576) 101 +dictGet test_01037.dict_array (32.053,2.16849) 3497965 +dictGet test_01037.dict_array (32.0542,0.042328) 3498096 +dictGet test_01037.dict_array (32.0576,2.47001) 3497970 +dictGet test_01037.dict_array (32.061,3.7498899999999997) 101 +dictGet test_01037.dict_array (32.0623,1.25134) 3498004 +dictGet test_01037.dict_array (32.0626,1.9611399999999999) 3497965 +dictGet test_01037.dict_array (32.0666,-0.0904247) 3498096 +dictGet test_01037.dict_array (32.0681,2.28442) 3497970 +dictGet test_01037.dict_array (32.0692,1.50869) 3497981 +dictGet test_01037.dict_array (32.0724,4.03314) 101 +dictGet test_01037.dict_array (32.0729,-0.064324) 101 +dictGet test_01037.dict_array (32.079,0.293758) 3498094 +dictGet test_01037.dict_array (32.0847,-1.19814) 101 +dictGet test_01037.dict_array (32.0974,-0.91927) 101 +dictGet test_01037.dict_array (32.0979,-0.736979) 101 +dictGet test_01037.dict_array (32.106,-1.33063) 101 +dictGet test_01037.dict_array (32.1189,0.246715) 3498094 +dictGet test_01037.dict_array (32.1207,4.00883) 101 +dictGet test_01037.dict_array (32.1396,1.12402) 3498004 +dictGet test_01037.dict_array (32.1413,1.5668) 3497981 +dictGet test_01037.dict_array (32.143,1.35559) 3498004 +dictGet test_01037.dict_array (32.1538,1.32881) 3498004 +dictGet test_01037.dict_array (32.1549,4.06552) 101 +dictGet test_01037.dict_array (32.1555,-0.79275) 101 +dictGet test_01037.dict_array (32.163,1.17733) 3498004 +dictGet test_01037.dict_array (32.1634,2.94273) 3501792 +dictGet test_01037.dict_array (32.1644,1.85666) 3497965 +dictGet test_01037.dict_array (32.1745,0.435458) 3498095 +dictGet test_01037.dict_array (32.1765,1.65149) 3497981 +dictGet test_01037.dict_array (32.1893,2.08924) 3497965 +dictGet test_01037.dict_array (32.2024,0.222191) 3498093 +dictGet test_01037.dict_array (32.2107,1.34379) 3497981 +dictGet test_01037.dict_array (32.2109,3.9018699999999997) 101 +dictGet test_01037.dict_array (32.2123,1.85233) 3497965 +dictGet test_01037.dict_array (32.2144,3.72534) 101 +dictGet test_01037.dict_array (32.2218,2.5386699999999998) 3497970 +dictGet test_01037.dict_array (32.2279,2.84267) 3497245 +dictGet test_01037.dict_array (32.2345,3.33295) 3501792 +dictGet test_01037.dict_array (32.2435,3.85283) 101 +dictGet test_01037.dict_array (32.2527,-0.480608) 101 +dictGet test_01037.dict_array (32.2566,-0.837882) 101 +dictGet test_01037.dict_array (32.2627,2.57708) 3497970 +dictGet test_01037.dict_array (32.2733,0.244931) 3498096 +dictGet test_01037.dict_array (32.2761,4.05808) 101 +dictGet test_01037.dict_array (32.2764,3.78472) 101 +dictGet test_01037.dict_array (32.2814,-1.26011) 101 +dictGet test_01037.dict_array (32.2861,3.02427) 3497245 +dictGet test_01037.dict_array (32.2924,0.928609) 3498004 +dictGet test_01037.dict_array (32.2963,-0.78543) 101 +dictGet test_01037.dict_array (32.3039,3.21175) 3501792 +dictGet test_01037.dict_array (32.3107,0.698287) 3498004 +dictGet test_01037.dict_array (32.3138,0.0595677) 3498106 +dictGet test_01037.dict_array (32.3339,0.707056) 3498004 +dictGet test_01037.dict_array (32.3351,0.415474) 3498106 +dictGet test_01037.dict_array (32.342,-0.681023) 101 +dictGet test_01037.dict_array (32.3463,1.83196) 3497126 +dictGet test_01037.dict_array (32.3494,2.43799) 3497114 +dictGet test_01037.dict_array (32.3524,3.47049) 3501822 +dictGet test_01037.dict_array (32.3531,2.33115) 3497114 +dictGet test_01037.dict_array (32.3602,0.116106) 3498106 +dictGet test_01037.dict_array (32.3612,1.1598) 3498004 +dictGet test_01037.dict_array (32.3689,3.34847) 3501822 +dictGet test_01037.dict_array (32.3695,0.734055) 3498004 +dictGet test_01037.dict_array (32.3825,3.85017) 101 +dictGet test_01037.dict_array (32.3835,-1.25491) 101 +dictGet test_01037.dict_array (32.4018,-0.728568) 101 +dictGet test_01037.dict_array (32.4044,2.96727) 3497245 +dictGet test_01037.dict_array (32.4101,2.9988) 3497245 +dictGet test_01037.dict_array (32.417,-1.12908) 101 +dictGet test_01037.dict_array (32.4172,4.1952) 101 +dictGet test_01037.dict_array (32.4239,2.49512) 3497245 +dictGet test_01037.dict_array (32.4258,4.05137) 101 +dictGet test_01037.dict_array (32.4264,-0.427357) 101 +dictGet test_01037.dict_array (32.4274,3.59377) 3501822 +dictGet test_01037.dict_array (32.4286,-1.24757) 101 +dictGet test_01037.dict_array (32.4294,3.0665) 3497245 +dictGet test_01037.dict_array (32.4333,-0.353347) 101 +dictGet test_01037.dict_array (32.4391,3.64421) 3501822 +dictGet test_01037.dict_array (32.4401,3.70635) 3501822 +dictGet test_01037.dict_array (32.45,1.68918) 3497126 +dictGet test_01037.dict_array (32.4507,-0.133471) 101 +dictGet test_01037.dict_array (32.4592,0.976458) 3498105 +dictGet test_01037.dict_array (32.4595,1.89135) 3497126 +dictGet test_01037.dict_array (32.4604,0.280248) 3498106 +dictGet test_01037.dict_array (32.4835,0.472731) 3498106 +dictGet test_01037.dict_array (32.4855,2.01938) 3497126 +dictGet test_01037.dict_array (32.4872,2.01697) 3497126 +dictGet test_01037.dict_array (32.4911,0.613106) 3498105 +dictGet test_01037.dict_array (32.4918,2.17834) 3497114 +dictGet test_01037.dict_array (32.4947,2.34595) 3497114 +dictGet test_01037.dict_array (32.5035,2.92234) 3497245 +dictGet test_01037.dict_array (32.5132,-0.331206) 101 +dictGet test_01037.dict_array (32.5156,-0.412604) 3501887 +dictGet test_01037.dict_array (32.5158,2.9067499999999997) 3497245 +dictGet test_01037.dict_array (32.5249,2.44519) 3497114 +dictGet test_01037.dict_array (32.5293,-0.790952) 101 +dictGet test_01037.dict_array (32.5319,3.96854) 101 +dictGet test_01037.dict_array (32.5518,3.6093) 3501822 +dictGet test_01037.dict_array (32.5541,3.5225400000000002) 3501822 +dictGet test_01037.dict_array (32.5569,0.816123) 3498105 +dictGet test_01037.dict_array (32.5646,1.9775) 3497126 +dictGet test_01037.dict_array (32.5733,3.81271) 101 +dictGet test_01037.dict_array (32.5767,0.948327) 3498105 +dictGet test_01037.dict_array (32.5971,1.76179) 3497126 +dictGet test_01037.dict_array (32.6035,-0.716157) 101 +dictGet test_01037.dict_array (32.6087,4.21614) 101 +dictGet test_01037.dict_array (32.6171,0.024481) 101 +dictGet test_01037.dict_array (32.6189,-0.775391) 101 +dictGet test_01037.dict_array (32.6198,2.92081) 3497167 +dictGet test_01037.dict_array (32.621,-0.970784) 101 +dictGet test_01037.dict_array (32.6266,0.650009) 3498105 +dictGet test_01037.dict_array (32.6315,2.15144) 3497126 +dictGet test_01037.dict_array (32.6385,-0.436803) 101 +dictGet test_01037.dict_array (32.6449,-0.191292) 101 +dictGet test_01037.dict_array (32.6535,2.10385) 3497126 +dictGet test_01037.dict_array (32.6592,3.49973) 3501822 +dictGet test_01037.dict_array (32.6598,2.5980600000000003) 3497114 +dictGet test_01037.dict_array (32.6612,2.95681) 3497167 +dictGet test_01037.dict_array (32.6636,-0.57235) 101 +dictGet test_01037.dict_array (32.669,-0.382702) 101 +dictGet test_01037.dict_array (32.6752,1.30748) 3497981 +dictGet test_01037.dict_array (32.6811,2.9559800000000003) 3497167 +dictGet test_01037.dict_array (32.6821,0.57336) 3498105 +dictGet test_01037.dict_array (32.6828,3.91304) 101 +dictGet test_01037.dict_array (32.6979,3.96868) 101 +dictGet test_01037.dict_array (32.6983,3.15784) 3497167 +dictGet test_01037.dict_array (32.7122,0.794293) 3498105 +dictGet test_01037.dict_array (32.7131,-0.847256) 101 +dictGet test_01037.dict_array (32.7219,0.883461) 3498105 +dictGet test_01037.dict_array (32.7228,1.78808) 3497126 +dictGet test_01037.dict_array (32.7273,-0.206908) 101 +dictGet test_01037.dict_array (32.7292,0.259331) 3501889 +dictGet test_01037.dict_array (32.7304,-1.38317) 101 +dictGet test_01037.dict_array (32.7353,1.01601) 3498105 +dictGet test_01037.dict_array (32.7354,4.17574) 101 +dictGet test_01037.dict_array (32.7357,-0.190194) 101 +dictGet test_01037.dict_array (32.7465,-1.37598) 101 +dictGet test_01037.dict_array (32.7494,-0.275675) 101 +dictGet test_01037.dict_array (32.7514,0.128951) 3501889 +dictGet test_01037.dict_array (32.753,3.44207) 3501822 +dictGet test_01037.dict_array (32.7686,2.11713) 3497126 +dictGet test_01037.dict_array (32.7694,1.47159) 3497388 +dictGet test_01037.dict_array (32.7768,0.0401042) 101 +dictGet test_01037.dict_array (32.781,-1.34283) 101 +dictGet test_01037.dict_array (32.7814,1.73876) 3497388 +dictGet test_01037.dict_array (32.7856,-1.06363) 101 +dictGet test_01037.dict_array (32.792699999999996,-1.1255600000000001) 101 +dictGet test_01037.dict_array (32.7941,-0.645447) 101 +dictGet test_01037.dict_array (32.7946,1.48889) 3497388 +dictGet test_01037.dict_array (32.797,0.791753) 3501889 +dictGet test_01037.dict_array (32.7982,-0.537798) 101 +dictGet test_01037.dict_array (32.8091,2.3611) 3490438 +dictGet test_01037.dict_array (32.81,1.7130800000000002) 3497388 +dictGet test_01037.dict_array (32.8174,-0.288322) 101 +dictGet test_01037.dict_array (32.823,1.6546699999999999) 3497388 +dictGet test_01037.dict_array (32.8233,1.62108) 3497388 +dictGet test_01037.dict_array (32.8428,-0.400045) 101 +dictGet test_01037.dict_array (32.8479,2.13598) 3490438 +dictGet test_01037.dict_array (32.8524,0.199902) 3501889 +dictGet test_01037.dict_array (32.8543,3.23553) 3501820 +dictGet test_01037.dict_array (32.8562,1.31371) 3498117 +dictGet test_01037.dict_array (32.87,1.44256) 3498117 +dictGet test_01037.dict_array (32.8789,2.38192) 3490438 +dictGet test_01037.dict_array (32.8812,2.20734) 3497128 +dictGet test_01037.dict_array (32.8815,-0.54427) 101 +dictGet test_01037.dict_array (32.8853,2.4859) 3497128 +dictGet test_01037.dict_array (32.8909,0.513964) 3501889 +dictGet test_01037.dict_array (32.9035,2.38999) 3490438 +dictGet test_01037.dict_array (32.9097,2.48131) 3497128 +dictGet test_01037.dict_array (32.928,-0.943269) 101 +dictGet test_01037.dict_array (32.9322,1.13165) 3498104 +dictGet test_01037.dict_array (32.9348,1.22606) 3498117 +dictGet test_01037.dict_array (32.9417,3.77998) 3501822 +dictGet test_01037.dict_array (32.9428,3.11936) 3497167 +dictGet test_01037.dict_array (32.9482,1.18092) 3498118 +dictGet test_01037.dict_array (32.9506,0.0609364) 101 +dictGet test_01037.dict_array (32.953,-0.828308) 101 +dictGet test_01037.dict_array (32.9593,3.5209099999999998) 3501822 +dictGet test_01037.dict_array (32.9617,2.07711) 3497128 +dictGet test_01037.dict_array (32.966,0.693749) 3498104 +dictGet test_01037.dict_array (32.9668,-0.716432) 101 +dictGet test_01037.dict_array (32.9702,1.98555) 3497127 +dictGet test_01037.dict_array (32.9782,1.73819) 3497388 +dictGet test_01037.dict_array (32.9805,3.71151) 3501822 +dictGet test_01037.dict_array (32.9821,2.97225) 3497167 +dictGet test_01037.dict_array (32.995,-0.830301) 101 +dictGet test_01037.dict_array (33.0234,0.770848) 3498104 +dictGet test_01037.dict_array (33.0312,-0.340964) 101 +dictGet test_01037.dict_array (33.0366,-0.756795) 101 +dictGet test_01037.dict_array (33.0438,0.812871) 3498118 +dictGet test_01037.dict_array (33.0455,1.84843) 3497127 +dictGet test_01037.dict_array (33.0498,0.0913292) 101 +dictGet test_01037.dict_array (33.0506,1.53739) 3497364 +dictGet test_01037.dict_array (33.0554,2.4265) 3497363 +dictGet test_01037.dict_array (33.0741,3.61332) 3501822 +dictGet test_01037.dict_array (33.0765,-0.179985) 101 +dictGet test_01037.dict_array (33.087,1.46465) 3497399 +dictGet test_01037.dict_array (33.0906,-0.620383) 101 +dictGet test_01037.dict_array (33.1047,-1.28027) 101 +dictGet test_01037.dict_array (33.1072,1.96303) 3497127 +dictGet test_01037.dict_array (33.1081,-0.897874) 101 +dictGet test_01037.dict_array (33.1122,1.8950200000000001) 3497127 +dictGet test_01037.dict_array (33.1237,2.63993) 3497165 +dictGet test_01037.dict_array (33.1238,0.753963) 3498118 +dictGet test_01037.dict_array (33.1257,0.495668) 3498102 +dictGet test_01037.dict_array (33.1258,1.78341) 3497364 +dictGet test_01037.dict_array (33.127,2.59646) 3497166 +dictGet test_01037.dict_array (33.1324,-1.23742) 101 +dictGet test_01037.dict_array (33.1359,3.83491) 101 +dictGet test_01037.dict_array (33.1628,-0.379588) 101 +dictGet test_01037.dict_array (33.1679,1.25601) 3498117 +dictGet test_01037.dict_array (33.1688,-1.35553) 101 +dictGet test_01037.dict_array (33.181,2.10943) 3497363 +dictGet test_01037.dict_array (33.1871,2.81171) 3497165 +dictGet test_01037.dict_array (33.1877,0.771297) 3498118 +dictGet test_01037.dict_array (33.1883,-0.204797) 101 +dictGet test_01037.dict_array (33.1886,3.27998) 3501820 +dictGet test_01037.dict_array (33.1955,0.708907) 3498118 +dictGet test_01037.dict_array (33.2044,-0.769275) 101 +dictGet test_01037.dict_array (33.2182,3.36103) 3501820 +dictGet test_01037.dict_array (33.2192,3.43586) 3501822 +dictGet test_01037.dict_array (33.2322,-0.916753) 101 +dictGet test_01037.dict_array (33.2359,-0.81321) 101 +dictGet test_01037.dict_array (33.238,0.635072) 3498111 +dictGet test_01037.dict_array (33.2398,3.02588) 3497165 +dictGet test_01037.dict_array (33.2469,2.35698) 3497363 +dictGet test_01037.dict_array (33.247,2.3327) 3497363 +dictGet test_01037.dict_array (33.2579,2.8027100000000003) 3497165 +dictGet test_01037.dict_array (33.2607,0.321082) 101 +dictGet test_01037.dict_array (33.2653,0.243336) 101 +dictGet test_01037.dict_array (33.2758,0.831836) 3498118 +dictGet test_01037.dict_array (33.2771,0.886536) 3498118 +dictGet test_01037.dict_array (33.2914,1.16026) 3498117 +dictGet test_01037.dict_array (33.2914,1.38882) 3497399 +dictGet test_01037.dict_array (33.2982,-1.16604) 101 +dictGet test_01037.dict_array (33.2985,0.842556) 3498112 +dictGet test_01037.dict_array (33.3005,2.8338900000000002) 3497165 +dictGet test_01037.dict_array (33.305,0.0969475) 101 +dictGet test_01037.dict_array (33.3072,3.82163) 101 +dictGet test_01037.dict_array (33.312,3.41475) 3501820 +dictGet test_01037.dict_array (33.3129,2.46048) 3497166 +dictGet test_01037.dict_array (33.3134,3.46863) 3501820 +dictGet test_01037.dict_array (33.3203,2.33139) 3497166 +dictGet test_01037.dict_array (33.324,0.433701) 101 +dictGet test_01037.dict_array (33.3338,2.44705) 3497166 +dictGet test_01037.dict_array (33.337,4.06475) 101 +dictGet test_01037.dict_array (33.3469,1.08172) 3498126 +dictGet test_01037.dict_array (33.3538,0.717896) 3498112 +dictGet test_01037.dict_array (33.3618,1.37899) 3497399 +dictGet test_01037.dict_array (33.3698,0.547744) 3501862 +dictGet test_01037.dict_array (33.3705,0.957619) 3498112 +dictGet test_01037.dict_array (33.3821,3.07258) 3497165 +dictGet test_01037.dict_array (33.3881,3.0626) 3497165 +dictGet test_01037.dict_array (33.393,-0.816186) 101 +dictGet test_01037.dict_array (33.3945,0.869508) 3498110 +dictGet test_01037.dict_array (33.4001,1.24186) 3498117 +dictGet test_01037.dict_array (33.4008,2.34911) 3497166 +dictGet test_01037.dict_array (33.4166,-1.2808899999999999) 101 +dictGet test_01037.dict_array (33.4167,3.0655) 3497165 +dictGet test_01037.dict_array (33.4204,2.81887) 3497165 +dictGet test_01037.dict_array (33.4211,1.71128) 3497400 +dictGet test_01037.dict_array (33.4237,2.91761) 3497165 +dictGet test_01037.dict_array (33.4266,1.5955599999999999) 3497399 +dictGet test_01037.dict_array (33.4353,-0.391392) 101 +dictGet test_01037.dict_array (33.4362,-0.134658) 101 +dictGet test_01037.dict_array (33.4386,0.15396) 101 +dictGet test_01037.dict_array (33.4421,-0.50712) 101 +dictGet test_01037.dict_array (33.452,0.915829) 3498126 +dictGet test_01037.dict_array (33.463,-0.0882717) 101 +dictGet test_01037.dict_array (33.464,-1.00949) 101 +dictGet test_01037.dict_array (33.4692,0.954092) 3498126 +dictGet test_01037.dict_array (33.4716,1.9538799999999998) 3497400 +dictGet test_01037.dict_array (33.4756,1.85836) 3497400 +dictGet test_01037.dict_array (33.4859,4.0751) 101 +dictGet test_01037.dict_array (33.4899,3.54193) 3501820 +dictGet test_01037.dict_array (33.4935,3.49794) 3501820 +dictGet test_01037.dict_array (33.494,-0.983356) 101 +dictGet test_01037.dict_array (33.4955,-1.28128) 101 +dictGet test_01037.dict_array (33.4965,-0.278687) 101 +dictGet test_01037.dict_array (33.4991,0.647491) 3498110 +dictGet test_01037.dict_array (33.5076,2.2272) 3497424 +dictGet test_01037.dict_array (33.5079,-0.498199) 101 +dictGet test_01037.dict_array (33.5157,0.535034) 3501862 +dictGet test_01037.dict_array (33.5171,2.49677) 3497166 +dictGet test_01037.dict_array (33.5255,2.4447200000000002) 3497166 +dictGet test_01037.dict_array (33.526,4.01194) 101 +dictGet test_01037.dict_array (33.5288,0.789434) 3498110 +dictGet test_01037.dict_array (33.5356,-1.17671) 101 +dictGet test_01037.dict_array (33.5402,1.49152) 3497399 +dictGet test_01037.dict_array (33.5418,3.45757) 3501820 +dictGet test_01037.dict_array (33.5428,1.90712) 3497400 +dictGet test_01037.dict_array (33.5556,-0.55741) 101 +dictGet test_01037.dict_array (33.5564,0.876858) 3498128 +dictGet test_01037.dict_array (33.5567,-0.10208) 101 +dictGet test_01037.dict_array (33.5645,-0.124824) 101 +dictGet test_01037.dict_array (33.5663,3.4872) 3501820 +dictGet test_01037.dict_array (33.5716,-0.0107611) 101 +dictGet test_01037.dict_array (33.578,3.55714) 3501820 +dictGet test_01037.dict_array (33.5826,-0.49076) 101 +dictGet test_01037.dict_array (33.5909,0.773737) 3498110 +dictGet test_01037.dict_array (33.5958,2.9619999999999997) 3497425 +dictGet test_01037.dict_array (33.6193,-0.919755) 101 +dictGet test_01037.dict_array (33.6313,0.652132) 3498110 +dictGet test_01037.dict_array (33.632,0.823351) 3498128 +dictGet test_01037.dict_array (33.66,2.18998) 3497424 +dictGet test_01037.dict_array (33.6621,0.535395) 3498135 +dictGet test_01037.dict_array (33.6726,3.19367) 3497438 +dictGet test_01037.dict_array (33.6912,1.74522) 3497400 +dictGet test_01037.dict_array (33.705,0.706397) 3498135 +dictGet test_01037.dict_array (33.7076,0.7622) 3498128 +dictGet test_01037.dict_array (33.7112,1.70187) 3497400 +dictGet test_01037.dict_array (33.7246,-1.14837) 101 +dictGet test_01037.dict_array (33.7326,2.62413) 3497425 +dictGet test_01037.dict_array (33.7332,2.82137) 3497425 +dictGet test_01037.dict_array (33.7434,0.394672) 3498135 +dictGet test_01037.dict_array (33.7443,1.54557) 3497398 +dictGet test_01037.dict_array (33.7506,1.57317) 3497398 +dictGet test_01037.dict_array (33.7526,1.8578999999999999) 3497424 +dictGet test_01037.dict_array (33.766,4.15013) 101 +dictGet test_01037.dict_array (33.7834,2.41789) 3497439 +dictGet test_01037.dict_array (33.7864,0.230935) 101 +dictGet test_01037.dict_array (33.7965,3.05709) 3497438 +dictGet test_01037.dict_array (33.7998,3.32881) 3497438 +dictGet test_01037.dict_array (33.8003,2.97338) 3497425 +dictGet test_01037.dict_array (33.8007,-1.08962) 101 +dictGet test_01037.dict_array (33.8022,-0.139488) 101 +dictGet test_01037.dict_array (33.8065,2.70857) 3497425 +dictGet test_01037.dict_array (33.8169,-0.607788) 101 +dictGet test_01037.dict_array (33.8203,0.108512) 3501863 +dictGet test_01037.dict_array (33.8231,-1.03449) 101 +dictGet test_01037.dict_array (33.8312,3.49458) 3501829 +dictGet test_01037.dict_array (33.8342,0.297518) 3501863 +dictGet test_01037.dict_array (33.8352,0.165872) 101 +dictGet test_01037.dict_array (33.8354,1.87277) 3497424 +dictGet test_01037.dict_array (33.8371,1.60103) 3497398 +dictGet test_01037.dict_array (33.8387,1.9968) 3497424 +dictGet test_01037.dict_array (33.8403,3.5805) 3501829 +dictGet test_01037.dict_array (33.8414,-0.703067) 101 +dictGet test_01037.dict_array (33.844,-0.179472) 101 +dictGet test_01037.dict_array (33.8468,3.40137) 3501829 +dictGet test_01037.dict_array (33.8509,4.15334) 101 +dictGet test_01037.dict_array (33.8539,2.38339) 3497439 +dictGet test_01037.dict_array (33.858,-1.3122500000000001) 101 +dictGet test_01037.dict_array (33.859,3.72626) 3501829 +dictGet test_01037.dict_array (33.8616,2.24433) 3497424 +dictGet test_01037.dict_array (33.8621,3.01035) 3497438 +dictGet test_01037.dict_array (33.8623,1.17559) 3498129 +dictGet test_01037.dict_array (33.8682,2.706) 3497425 +dictGet test_01037.dict_array (33.8684,0.189231) 3501863 +dictGet test_01037.dict_array (33.872,1.93574) 3497424 +dictGet test_01037.dict_array (33.8844,3.80404) 3501829 +dictGet test_01037.dict_array (33.8888,0.594884) 3498135 +dictGet test_01037.dict_array (33.8946,2.74161) 3497438 +dictGet test_01037.dict_array (33.9023,0.6239) 3498135 +dictGet test_01037.dict_array (33.9057,0.873222) 3498136 +dictGet test_01037.dict_array (33.9157,-1.26607) 101 +dictGet test_01037.dict_array (33.92,2.06848) 3497397 +dictGet test_01037.dict_array (33.9298,-0.00526229) 101 +dictGet test_01037.dict_array (33.932,3.07063) 3497438 +dictGet test_01037.dict_array (33.9322,0.629385) 3501864 +dictGet test_01037.dict_array (33.9367,-1.41955) 101 +dictGet test_01037.dict_array (33.937,1.42532) 3498173 +dictGet test_01037.dict_array (33.9375,1.1467100000000001) 3498159 +dictGet test_01037.dict_array (33.9434,-1.05739) 101 +dictGet test_01037.dict_array (33.9477,3.34809) 3501829 +dictGet test_01037.dict_array (33.95,2.21715) 3497397 +dictGet test_01037.dict_array (33.955799999999996,0.305176) 3501859 +dictGet test_01037.dict_array (33.9686,-0.28273) 101 +dictGet test_01037.dict_array (33.9703,4.1255) 3501829 +dictGet test_01037.dict_array (33.9707,3.08199) 3497438 +dictGet test_01037.dict_array (33.9754,1.06203) 3498159 +dictGet test_01037.dict_array (33.9757,3.72468) 3501829 +dictGet test_01037.dict_array (33.9775,-0.0440599) 101 +dictGet test_01037.dict_array (33.9777,-0.251484) 101 +dictGet test_01037.dict_array (33.9789,-0.339374) 101 +dictGet test_01037.dict_array (33.9849,2.54515) 3497425 +dictGet test_01037.dict_array (33.9885,-0.318557) 101 +dictGet test_01037.dict_array (33.9977,1.07175) 3498159 +dictGet test_01037.dict_array (33.9984,-0.700517) 101 +dictGet test_01037.dict_array (34.0149,3.53338) 3501829 +dictGet test_01037.dict_array (34.0173,3.39155) 3501829 +dictGet test_01037.dict_array (34.0317,3.9579) 3501829 +dictGet test_01037.dict_array (34.0369,3.83612) 3501829 +dictGet test_01037.dict_array (34.043,-0.0887221) 101 +dictGet test_01037.dict_array (34.0487,1.14252) 3498159 +dictGet test_01037.dict_array (34.052,1.74832) 3497397 +dictGet test_01037.dict_array (34.0711,-0.898071) 101 +dictGet test_01037.dict_array (34.0747,1.55057) 3498173 +dictGet test_01037.dict_array (34.0803,3.16763) 3497438 +dictGet test_01037.dict_array (34.0872,3.75555) 3501829 +dictGet test_01037.dict_array (34.0965,1.62038) 3498173 +dictGet test_01037.dict_array (34.0977,-0.412691) 101 +dictGet test_01037.dict_array (34.0986,0.0294206) 101 +dictGet test_01037.dict_array (34.1072,3.15823) 3497438 +dictGet test_01037.dict_array (34.1092,3.09599) 3497438 +dictGet test_01037.dict_array (34.1206,1.04637) 3498160 +dictGet test_01037.dict_array (34.1209,3.13826) 3497438 +dictGet test_01037.dict_array (34.1265,3.95881) 3501829 +dictGet test_01037.dict_array (34.1286,-0.539319) 101 +dictGet test_01037.dict_array (34.1358,3.67451) 3501829 +dictGet test_01037.dict_array (34.1428,0.136115) 101 +dictGet test_01037.dict_array (34.157,1.73522) 3497397 +dictGet test_01037.dict_array (34.1581,1.48001) 3498172 +dictGet test_01037.dict_array (34.1682,3.42373) 3501829 +dictGet test_01037.dict_array (34.1683,-1.26511) 101 +dictGet test_01037.dict_array (34.1684,4.20007) 101 +dictGet test_01037.dict_array (34.1854,3.32089) 3501829 +dictGet test_01037.dict_array (34.2022,0.749536) 3501864 +dictGet test_01037.dict_array (34.2044,3.04865) 3497438 +dictGet test_01037.dict_array (34.22,-0.500055) 101 +dictGet test_01037.dict_array (34.2249,0.743775) 3501864 +dictGet test_01037.dict_array (34.2254,1.34702) 3498172 +dictGet test_01037.dict_array (34.2355,-0.898843) 101 +dictGet test_01037.dict_array (34.2394,2.0203699999999998) 3497439 +dictGet test_01037.dict_array (34.2466,1.83785) 3498251 +dictGet test_01037.dict_array (34.247,4.09563) 101 +dictGet test_01037.dict_array (34.2508,2.61312) 3497439 +dictGet test_01037.dict_array (34.2517,1.69642) 3498251 +dictGet test_01037.dict_array (34.2564,4.13033) 101 +dictGet test_01037.dict_array (34.2574,4.18928) 101 +dictGet test_01037.dict_array (34.2614,-0.478719) 101 +dictGet test_01037.dict_array (34.2625,2.38088) 3497439 +dictGet test_01037.dict_array (34.2666,3.1503) 3501829 +dictGet test_01037.dict_array (34.271,4.02223) 101 +dictGet test_01037.dict_array (34.2727,0.514755) 101 +dictGet test_01037.dict_array (34.278,1.98929) 3497439 +dictGet test_01037.dict_array (34.2798,-0.199208) 101 +dictGet test_01037.dict_array (34.2804,2.05184) 3497439 +dictGet test_01037.dict_array (34.2945,-1.11051) 101 +dictGet test_01037.dict_array (34.3168,-0.0829721) 101 +dictGet test_01037.dict_array (34.3345,3.4358) 3501829 +dictGet test_01037.dict_array (34.3377,1.13527) 3498162 +dictGet test_01037.dict_array (34.3383,1.27891) 3498161 +dictGet test_01037.dict_array (34.3391,1.47945) 3498161 +dictGet test_01037.dict_array (34.3441,0.627014) 101 +dictGet test_01037.dict_array (34.347,2.4853) 3497439 +dictGet test_01037.dict_array (34.3514,2.16247) 3497439 +dictGet test_01037.dict_array (34.3627,2.64533) 3497439 +dictGet test_01037.dict_array (34.3682,-0.227501) 101 +dictGet test_01037.dict_array (34.3756,4.21248) 101 +dictGet test_01037.dict_array (34.379,3.96604) 101 +dictGet test_01037.dict_array (34.3827,1.7518) 3498251 +dictGet test_01037.dict_array (34.3912,2.8834) 3501830 +dictGet test_01037.dict_array (34.3919,0.668829) 101 +dictGet test_01037.dict_array (34.3949,2.00338) 3497439 +dictGet test_01037.dict_array (34.3987,0.557268) 101 +dictGet test_01037.dict_array (34.4111,0.768558) 101 +dictGet test_01037.dict_array (34.4119,2.8742) 3501830 +dictGet test_01037.dict_array (34.416,3.50841) 3501829 +dictGet test_01037.dict_array (34.4212,1.24916) 3498161 +dictGet test_01037.dict_array (34.4251,0.457029) 101 +dictGet test_01037.dict_array (34.4274,-0.902559) 101 +dictGet test_01037.dict_array (34.4325,4.03159) 101 +dictGet test_01037.dict_array (34.438,1.63994) 3498251 +dictGet test_01037.dict_array (34.4403,-0.177594) 101 +dictGet test_01037.dict_array (34.4421,0.726712) 101 +dictGet test_01037.dict_array (34.4517,2.98611) 3501830 +dictGet test_01037.dict_array (34.4658,-1.312) 101 +dictGet test_01037.dict_array (34.4732,-0.0681338) 101 +dictGet test_01037.dict_array (34.4752,2.81646) 3501830 +dictGet test_01037.dict_array (34.4914,2.3858) 3497439 +dictGet test_01037.dict_array (34.4923,0.855231) 101 +dictGet test_01037.dict_array (34.5235,1.78468) 3498251 +dictGet test_01037.dict_array (34.5305,4.10608) 101 +dictGet test_01037.dict_array (34.5389,0.621937) 101 +dictGet test_01037.dict_array (34.5406,3.17145) 101 +dictGet test_01037.dict_array (34.5434,-0.56306) 101 +dictGet test_01037.dict_array (34.5449,3.13311) 3501829 +dictGet test_01037.dict_array (34.5491,2.31572) 3497439 +dictGet test_01037.dict_array (34.5539,2.94028) 3501830 +dictGet test_01037.dict_array (34.5546,-0.208825) 101 +dictGet test_01037.dict_array (34.5549,3.78486) 101 +dictGet test_01037.dict_array (34.5676,0.307148) 101 +dictGet test_01037.dict_array (34.5743,1.5217399999999999) 3501838 +dictGet test_01037.dict_array (34.5775,3.48046) 101 +dictGet test_01037.dict_array (34.5815,2.5243700000000002) 3501830 +dictGet test_01037.dict_array (34.5841,4.21191) 101 +dictGet test_01037.dict_array (34.5887,2.65083) 3501830 +dictGet test_01037.dict_array (34.5937,3.2143) 101 +dictGet test_01037.dict_array (34.6013,-1.0612) 101 +dictGet test_01037.dict_array (34.6089,1.36066) 3501838 +dictGet test_01037.dict_array (34.6103,3.40227) 101 +dictGet test_01037.dict_array (34.6128,1.92276) 3498251 +dictGet test_01037.dict_array (34.6175,2.43627) 3498251 +dictGet test_01037.dict_array (34.6209,3.43776) 101 +dictGet test_01037.dict_array (34.6234,2.60237) 3501830 +dictGet test_01037.dict_array (34.6275,3.52479) 101 +dictGet test_01037.dict_array (34.635,0.568558) 101 +dictGet test_01037.dict_array (34.6373,2.37692) 3498251 +dictGet test_01037.dict_array (34.6375,3.52234) 101 +dictGet test_01037.dict_array (34.6426,2.12397) 3498251 +dictGet test_01037.dict_array (34.6513,2.80915) 3501830 +dictGet test_01037.dict_array (34.6632,2.30039) 3498251 +dictGet test_01037.dict_array (34.6691,1.86582) 3498251 +dictGet test_01037.dict_array (34.6739,0.15342) 101 +dictGet test_01037.dict_array (34.6825,0.0499679) 101 +dictGet test_01037.dict_array (34.6893,0.454326) 101 +dictGet test_01037.dict_array (34.6957,-0.358598) 101 +dictGet test_01037.dict_array (34.6986,0.562679) 101 +dictGet test_01037.dict_array (34.712,1.12114) 101 +dictGet test_01037.dict_array (34.7126,-0.0057301) 101 +dictGet test_01037.dict_array (34.7137,0.0248501) 101 +dictGet test_01037.dict_array (34.7162,1.15623) 101 +dictGet test_01037.dict_array (34.7258,3.95142) 101 +dictGet test_01037.dict_array (34.7347,3.5232099999999997) 101 +dictGet test_01037.dict_array (34.7363,2.23374) 3501830 +dictGet test_01037.dict_array (34.7375,0.397841) 101 +dictGet test_01037.dict_array (34.7423,3.09198) 101 +dictGet test_01037.dict_array (34.7452,3.09029) 101 +dictGet test_01037.dict_array (34.7539,-1.06943) 101 +dictGet test_01037.dict_array (34.7733,-0.00912717) 101 +dictGet test_01037.dict_array (34.774,2.71088) 3501830 +dictGet test_01037.dict_array (34.7771,1.46009) 3501835 +dictGet test_01037.dict_array (34.7782,-1.28308) 101 +dictGet test_01037.dict_array (34.7924,3.63564) 101 +dictGet test_01037.dict_array (34.7939,-0.416676) 101 +dictGet test_01037.dict_array (34.7964,-0.401773) 101 +dictGet test_01037.dict_array (34.7974,0.0286873) 101 +dictGet test_01037.dict_array (34.7975,3.05965) 101 +dictGet test_01037.dict_array (34.8037,3.07263) 101 +dictGet test_01037.dict_array (34.8254,-0.390284) 101 +dictGet test_01037.dict_array (34.828,1.91869) 3498251 +dictGet test_01037.dict_array (34.8289,3.71058) 101 +dictGet test_01037.dict_array (34.8403,2.14606) 3501835 +dictGet test_01037.dict_array (34.8437,2.20617) 3501830 +dictGet test_01037.dict_array (34.8469,2.38435) 3501830 +dictGet test_01037.dict_array (34.86,1.45705) 101 +dictGet test_01037.dict_array (34.8612,0.914248) 101 +dictGet test_01037.dict_array (34.8663,3.4215400000000002) 101 +dictGet test_01037.dict_array (34.8724,-0.375144) 101 +dictGet test_01037.dict_array (34.8795,3.29317) 101 +dictGet test_01037.dict_array (34.8823,1.21988) 101 +dictGet test_01037.dict_array (34.8834,1.07657) 101 +dictGet test_01037.dict_array (34.8837,0.157648) 101 +dictGet test_01037.dict_array (34.8871,-0.9755) 101 +dictGet test_01037.dict_array (34.8871,1.8943699999999999) 3501835 +dictGet test_01037.dict_array (34.889,3.36756) 101 +dictGet test_01037.dict_array (34.8907,1.24874) 101 +dictGet test_01037.dict_array (34.8965,3.13508) 101 +dictGet test_01037.dict_array (34.9042,2.62092) 101 +dictGet test_01037.dict_array (34.9055,-0.0448967) 101 +dictGet test_01037.dict_array (34.9122,0.110576) 101 +dictGet test_01037.dict_array (34.9228,3.60183) 101 +dictGet test_01037.dict_array (34.9237,1.21715) 101 +dictGet test_01037.dict_array (34.9296,1.70459) 3501835 +dictGet test_01037.dict_array (34.941,-1.14663) 101 +dictGet test_01037.dict_array (34.9448,1.18923) 101 +dictGet test_01037.dict_array (34.9462,3.81678) 101 +dictGet test_01037.dict_array (34.9466,0.593463) 101 +dictGet test_01037.dict_array (34.9485,0.150307) 101 +dictGet test_01037.dict_array (34.9542,0.487238) 101 +dictGet test_01037.dict_array (34.9559,2.03473) 3501835 +dictGet test_01037.dict_array (34.9671,-0.960225) 101 +dictGet test_01037.dict_array (34.9711,2.63444) 101 +dictGet test_01037.dict_array (34.9892,0.354775) 101 +dictGet test_01037.dict_array (34.9907,1.40724) 101 +dictGet test_01037.dict_array (34.9916,-0.00173097) 101 +dictGet test_01037.dict_array (34.9919,2.06167) 101 diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.reference b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh new file mode 100755 index 00000000000..ab5e5035632 --- /dev/null +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +TMP_DIR="/tmp" + +declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") + +tar -xf ${CURDIR}/01037_test_data_perf.tar.gz -C ${CURDIR} + +$CLICKHOUSE_CLIENT -n --query=" +DROP DATABASE IF EXISTS test_01037; +CREATE DATABASE test_01037 Engine = Ordinary; +DROP TABLE IF EXISTS test_01037.points; +CREATE TABLE test_01037.points (x Float64, y Float64) ENGINE = Memory; +" + +$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.points FORMAT TSV" --max_insert_block_size=100000 < "${CURDIR}/01037_point_data" + +rm ${CURDIR}/01037_point_data + +$CLICKHOUSE_CLIENT -n --query=" +DROP TABLE IF EXISTS test_01037.polygons_array; + +CREATE TABLE test_01037.polygons_array +( + key Array(Array(Array(Array(Float64)))), + name String, + value UInt64 +) +ENGINE = Memory; +" + +$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.polygons_array FORMAT JSONEachRow" --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" + +rm ${CURDIR}/01037_polygon_data + +for type in ${SearchTypes[@]}; +do + outputFile="${TMP_DIR}/results${type}.out" + + $CLICKHOUSE_CLIENT -n --query=" + DROP DICTIONARY IF EXISTS test_01037.dict_array; + + CREATE DICTIONARY test_01037.dict_array + ( + key Array(Array(Array(Array(Float64)))), + name String DEFAULT 'qqq', + value UInt64 DEFAULT 101 + ) + PRIMARY KEY key + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) + LIFETIME(0) + LAYOUT($type()); + + select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'value', key) from test_01037.points order by x, y; + " > $outputFile + + diff -q "${CURDIR}/01037_polygon_dicts_correctness_fast.ans" "$outputFile" +done + +$CLICKHOUSE_CLIENT -n --query=" +DROP TABLE test_01037.points; +DROP DATABASE test_01037; +" diff --git a/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans similarity index 91% rename from tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference rename to tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans index 80af8a52400..dfad14fb113 100644 --- a/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.reference +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.ans @@ -81,19 +81,12 @@ dictHas test_01037.dict_array (-1,0) 1 dictHas test_01037.dict_array (-0.1,0) 1 dictHas test_01037.dict_array (0,-2) 1 dictHas test_01037.dict_array (0,-1.1) 1 -dictHas test_01037.dict_array (0,-1) 1 -dictHas test_01037.dict_array (0,0) 1 -dictHas test_01037.dict_array (0,1) 1 dictHas test_01037.dict_array (0,1.1) 1 dictHas test_01037.dict_array (0,2) 1 dictHas test_01037.dict_array (0.1,0) 1 dictHas test_01037.dict_array (0.99,2.99) 1 dictHas test_01037.dict_array (1,0) 1 -dictHas test_01037.dict_array (1,1) 1 -dictHas test_01037.dict_array (1,3) 1 dictHas test_01037.dict_array (3,3) 1 -dictHas test_01037.dict_array (5,1) 1 -dictHas test_01037.dict_array (5,5) 1 dictHas test_01037.dict_array (5,6) 1 dictHas test_01037.dict_array (7.01,7.01) 0 dictHas test_01037.dict_tuple (-100,-42) 0 @@ -101,18 +94,11 @@ dictHas test_01037.dict_tuple (-1,0) 1 dictHas test_01037.dict_tuple (-0.1,0) 1 dictHas test_01037.dict_tuple (0,-2) 1 dictHas test_01037.dict_tuple (0,-1.1) 1 -dictHas test_01037.dict_tuple (0,-1) 1 -dictHas test_01037.dict_tuple (0,0) 1 -dictHas test_01037.dict_tuple (0,1) 1 dictHas test_01037.dict_tuple (0,1.1) 1 dictHas test_01037.dict_tuple (0,2) 1 dictHas test_01037.dict_tuple (0.1,0) 1 dictHas test_01037.dict_tuple (0.99,2.99) 1 dictHas test_01037.dict_tuple (1,0) 1 -dictHas test_01037.dict_tuple (1,1) 1 -dictHas test_01037.dict_tuple (1,3) 1 dictHas test_01037.dict_tuple (3,3) 1 -dictHas test_01037.dict_tuple (5,1) 1 -dictHas test_01037.dict_tuple (5,5) 1 dictHas test_01037.dict_tuple (5,6) 1 dictHas test_01037.dict_tuple (7.01,7.01) 0 diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.reference b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh old mode 100644 new mode 100755 similarity index 52% rename from tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql rename to tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index 9750ec04daf..0a407dffb6a --- a/tests/queries/0_stateless/01037_polygon_dict_multi_polygons.sql +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -1,10 +1,15 @@ -SET send_logs_level = 'fatal'; +#!/usr/bin/env bash +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +TMP_DIR="/tmp" + +$CLICKHOUSE_CLIENT -n --query=" DROP DATABASE IF EXISTS test_01037; CREATE DATABASE test_01037 Engine = Ordinary; -DROP DICTIONARY IF EXISTS test_01037.dict_array; DROP TABLE IF EXISTS test_01037.polygons_array; CREATE TABLE test_01037.polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; @@ -15,18 +20,6 @@ INSERT INTO test_01037.polygons_array VALUES ([[[[-1, 1], [1, 1], [1, 3], [-1, 3 INSERT INTO test_01037.polygons_array VALUES ([[[[-3, 1], [-3, -1], [0, -1], [0, 1]]]], 'Click South', 423); INSERT INTO test_01037.polygons_array VALUES ([[[[-1, -1], [1, -1], [1, -3], [-1, -3]]]], 'Click West', 424); -CREATE DICTIONARY test_01037.dict_array -( - key Array(Array(Array(Array(Float64)))), - name String DEFAULT 'qqq', - value UInt64 DEFAULT 101 -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) -LIFETIME(MIN 1 MAX 10) -LAYOUT(POLYGON()); - -DROP DICTIONARY IF EXISTS test_01037.dict_tuple; DROP TABLE IF EXISTS test_01037.polygons_tuple; CREATE TABLE test_01037.polygons_tuple (key Array(Array(Array(Tuple(Float64, Float64)))), name String, value UInt64) ENGINE = Memory; @@ -37,17 +30,6 @@ INSERT INTO test_01037.polygons_tuple VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3 INSERT INTO test_01037.polygons_tuple VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South', 423); INSERT INTO test_01037.polygons_tuple VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West', 424); -CREATE DICTIONARY test_01037.dict_tuple -( - key Array(Array(Array(Tuple(Float64, Float64)))), - name String DEFAULT 'qqq', - value UInt64 DEFAULT 101 -) -PRIMARY KEY key -SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037')) -LIFETIME(MIN 1 MAX 10) -LAYOUT(POLYGON()); - DROP TABLE IF EXISTS test_01037.points; CREATE TABLE test_01037.points (x Float64, y Float64, def_i UInt64, def_s String) ENGINE = Memory; @@ -58,50 +40,79 @@ INSERT INTO test_01037.points VALUES (0.0, -1.1, 115, 'aat'); INSERT INTO test_01037.points VALUES (3.0, 3.0, 22, 'bb'); INSERT INTO test_01037.points VALUES (5.0, 6.0, 33, 'cc'); INSERT INTO test_01037.points VALUES (-100.0, -42.0, 44, 'dd'); -INSERT INTO test_01037.points VALUES (7.01, 7.01, 55, 'ee') +INSERT INTO test_01037.points VALUES (7.01, 7.01, 55, 'ee'); INSERT INTO test_01037.points VALUES (0.99, 2.99, 66, 'ee'); INSERT INTO test_01037.points VALUES (1.0, 0.0, 771, 'ffa'); INSERT INTO test_01037.points VALUES (-1.0, 0.0, 772, 'ffb'); INSERT INTO test_01037.points VALUES (0.0, 2.0, 773, 'ffc'); INSERT INTO test_01037.points VALUES (0.0, -2.0, 774, 'ffd'); +" -select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; -select 'dictGet', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGet(dict_name, 'name', key), - dictGet(dict_name, 'value', key) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, 'www'), - dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; -select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictGetOrDefault(dict_name, 'name', key, def_s), - dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; +declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") -INSERT INTO test_01037.points VALUES (5.0, 5.0, 0, ''); -INSERT INTO test_01037.points VALUES (5.0, 1.0, 0, ''); -INSERT INTO test_01037.points VALUES (1.0, 3.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, 0.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, 1.0, 0, ''); -INSERT INTO test_01037.points VALUES (0.0, -1.0, 0, ''); -INSERT INTO test_01037.points VALUES (1.0, 1.0, 0, ''); +for type in ${SearchTypes[@]}; +do + outputFile="${TMP_DIR}/results${type}.out" -select 'dictHas', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; + $CLICKHOUSE_CLIENT -n --query=" + DROP DICTIONARY IF EXISTS test_01037.dict_array; + CREATE DICTIONARY test_01037.dict_array + ( + key Array(Array(Array(Array(Float64)))), + name String DEFAULT 'qqq', + value UInt64 DEFAULT 101 + ) + PRIMARY KEY key + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_array' PASSWORD '' DB 'test_01037')) + LIFETIME(0) + LAYOUT($type()); -select 'dictHas', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, - dictHas(dict_name, key) from test_01037.points order by x, y; + DROP DICTIONARY IF EXISTS test_01037.dict_tuple; + CREATE DICTIONARY test_01037.dict_tuple + ( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String DEFAULT 'qqq', + value UInt64 DEFAULT 101 + ) + PRIMARY KEY key + SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'polygons_tuple' PASSWORD '' DB 'test_01037')) + LIFETIME(0) + LAYOUT($type()); + + select 'dictGet', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'name', key), + dictGet(dict_name, 'value', key) from test_01037.points order by x, y; + select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'name', key, 'www'), + dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; + select 'dictGetOrDefault', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'name', key, def_s), + dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; + select 'dictGet', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGet(dict_name, 'name', key), + dictGet(dict_name, 'value', key) from test_01037.points order by x, y; + select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'name', key, 'www'), + dictGetOrDefault(dict_name, 'value', key, toUInt64(1234)) from test_01037.points order by x, y; + select 'dictGetOrDefault', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictGetOrDefault(dict_name, 'name', key, def_s), + dictGetOrDefault(dict_name, 'value', key, def_i) from test_01037.points order by x, y; + select 'dictHas', 'test_01037.dict_array' as dict_name, tuple(x, y) as key, + dictHas(dict_name, key) from test_01037.points order by x, y; + select 'dictHas', 'test_01037.dict_tuple' as dict_name, tuple(x, y) as key, + dictHas(dict_name, key) from test_01037.points order by x, y; + " > $outputFile + + diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" +done + +$CLICKHOUSE_CLIENT -n --query=" DROP DICTIONARY test_01037.dict_array; DROP DICTIONARY test_01037.dict_tuple; DROP TABLE test_01037.polygons_array; DROP TABLE test_01037.polygons_tuple; DROP TABLE test_01037.points; DROP DATABASE test_01037; +" diff --git a/tests/queries/0_stateless/01037_test_data_perf.tar.gz b/tests/queries/0_stateless/01037_test_data_perf.tar.gz new file mode 100644 index 00000000000..4d42db1f769 Binary files /dev/null and b/tests/queries/0_stateless/01037_test_data_perf.tar.gz differ diff --git a/tests/queries/0_stateless/01037_test_data_search.tar.gz b/tests/queries/0_stateless/01037_test_data_search.tar.gz new file mode 100644 index 00000000000..7a2506164d3 Binary files /dev/null and b/tests/queries/0_stateless/01037_test_data_search.tar.gz differ diff --git a/tests/queries/0_stateless/01292_create_user.reference b/tests/queries/0_stateless/01292_create_user.reference index 922ee54bef4..775bbaa6a26 100644 --- a/tests/queries/0_stateless/01292_create_user.reference +++ b/tests/queries/0_stateless/01292_create_user.reference @@ -81,6 +81,15 @@ CREATE USER u6_01292 DEFAULT ROLE NONE -- complex CREATE USER u1_01292 IDENTIFIED WITH plaintext_password HOST LOCAL SETTINGS readonly = 1 CREATE USER u1_01292 HOST LIKE \'%.%.myhost.com\' DEFAULT ROLE NONE SETTINGS PROFILE default +-- if not exists +CREATE USER u1_01292 +GRANT r1_01292 TO u1_01292 +-- if not exists-part2 +CREATE USER u1_01292 +GRANT r1_01292, r2_01292 TO u1_01292 +-- or replace +CREATE USER u1_01292 +CREATE USER u2_01292 -- multiple users in one command CREATE USER u1_01292 DEFAULT ROLE NONE CREATE USER u2_01292 DEFAULT ROLE NONE diff --git a/tests/queries/0_stateless/01292_create_user.sql b/tests/queries/0_stateless/01292_create_user.sql index 5ae7f3921e6..c8d408147e9 100644 --- a/tests/queries/0_stateless/01292_create_user.sql +++ b/tests/queries/0_stateless/01292_create_user.sql @@ -177,6 +177,24 @@ ALTER USER u1_01292 NOT IDENTIFIED HOST LIKE '%.%.myhost.com' DEFAULT ROLE NONE SHOW CREATE USER u1_01292; DROP USER u1_01292; +SELECT '-- if not exists'; +CREATE USER u1_01292; +GRANT r1_01292 TO u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +SELECT '-- if not exists-part2'; +CREATE USER IF NOT EXISTS u1_01292; +GRANT r2_01292 TO u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +SELECT '-- or replace'; +CREATE USER OR REPLACE u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +CREATE USER IF NOT EXISTS u2_01292; +SHOW CREATE USER u2_01292; +DROP USER u1_01292, u2_01292; + SELECT '-- multiple users in one command'; CREATE USER u1_01292, u2_01292 DEFAULT ROLE NONE; CREATE USER u3_01292, u4_01292 HOST LIKE '%.%.myhost.com'; diff --git a/tests/queries/0_stateless/01318_map_add_map_subtract.reference b/tests/queries/0_stateless/01318_map_add_map_subtract.reference new file mode 100644 index 00000000000..fb38786ab01 --- /dev/null +++ b/tests/queries/0_stateless/01318_map_add_map_subtract.reference @@ -0,0 +1,51 @@ +([1],[5]) +([1,2],[3,2]) +([1,3],[3,2]) +([1,4],[3,2]) +([1,5],[3,2]) +([1,6],[3,2]) +([1,7],[3,2]) +([1,8],[3,2]) +([1,9],[3,2]) +([1,10],[3,2]) +([1,2],[5,2]) +([1,2],[3,4]) +([1,2,3],[3,2,2]) +([1,2,4],[3,2,2]) +([1,2,5],[3,2,2]) +([1,2,6],[3,2,2]) +([1,2,7],[3,2,2]) +([1,2,8],[3,2,2]) +([1,2,9],[3,2,2]) +([1,2,10],[3,2,2]) +([1,2],[2,2]) Tuple(Array(UInt8), Array(UInt64)) +([1,2],[2,2]) Tuple(Array(UInt16), Array(UInt64)) +([1,2],[2,2]) Tuple(Array(UInt32), Array(UInt64)) +([1,2],[2,2]) Tuple(Array(UInt64), Array(UInt64)) +([1,2],[2,2]) Tuple(Array(Int16), Array(Int64)) +([1,2],[2,2]) Tuple(Array(Int16), Array(Int64)) +([1,2],[2,2]) Tuple(Array(Int32), Array(Int64)) +([1,2],[2,2]) Tuple(Array(Int64), Array(Int64)) +([1,2],[3.300000023841858,2]) Tuple(Array(UInt8), Array(Float64)) +([1,2],[3.3000000000000003,2]) Tuple(Array(UInt8), Array(Float64)) +(['a','b'],[1,2]) +(['a','b','c'],[1,1,1]) +(['a','b','d'],[1,1,1]) +(['a','b'],[1,2]) Tuple(Array(FixedString(1)), Array(UInt64)) +(['a','b','c'],[1,1,1]) Tuple(Array(FixedString(1)), Array(UInt64)) +(['a','b','d'],[1,1,1]) Tuple(Array(FixedString(1)), Array(UInt64)) +(['a','b'],[1,2]) +(['a','b','c'],[1,1,1]) +(['a','b','d'],[1,1,1]) +(['a'],[2]) Tuple(Array(Enum16(\'a\' = 1, \'b\' = 2)), Array(Int64)) +(['b'],[2]) Tuple(Array(Enum16(\'a\' = 1, \'b\' = 2)), Array(Int64)) +(['a'],[2]) Tuple(Array(Enum8(\'a\' = 1, \'b\' = 2)), Array(Int64)) +(['b'],[2]) Tuple(Array(Enum8(\'a\' = 1, \'b\' = 2)), Array(Int64)) +(['00000000-89ab-cdef-0123-456789abcdef'],[2]) Tuple(Array(UUID), Array(Int64)) +(['11111111-89ab-cdef-0123-456789abcdef'],[4]) Tuple(Array(UUID), Array(Int64)) +([1,2],[0,0]) Tuple(Array(UInt8), Array(UInt64)) +([1,2],[18446744073709551615,18446744073709551615]) Tuple(Array(UInt8), Array(UInt64)) +([1,2],[-1,-1]) Tuple(Array(UInt8), Array(Int64)) +([1,2],[-1.0999999761581423,0]) Tuple(Array(UInt8), Array(Float64)) +([1,2],[-1,-1]) Tuple(Array(UInt8), Array(Int64)) +([1,2,3],[-2,-2,1]) Tuple(Array(UInt8), Array(Int64)) diff --git a/tests/queries/0_stateless/01318_map_add_map_subtract.sql b/tests/queries/0_stateless/01318_map_add_map_subtract.sql new file mode 100644 index 00000000000..40c08e0a147 --- /dev/null +++ b/tests/queries/0_stateless/01318_map_add_map_subtract.sql @@ -0,0 +1,45 @@ +drop table if exists map_test; +create table map_test engine=TinyLog() as (select ([1, number], [toInt32(2),2]) as map from numbers(1, 10)); + +-- mapAdd +select mapAdd([1], [1]); -- { serverError 42 } +select mapAdd(([1], [1])); -- { serverError 42 } +select mapAdd(([1], [1]), map) from map_test; -- { serverError 43 } +select mapAdd(([toUInt64(1)], [1]), map) from map_test; -- { serverError 43 } +select mapAdd(([toUInt64(1), 2], [toInt32(1)]), map) from map_test; -- {serverError 42 } + +select mapAdd(([toUInt64(1)], [toInt32(1)]), map) from map_test; +select mapAdd(cast(map, 'Tuple(Array(UInt8), Array(UInt8))'), ([1], [1]), ([2],[2]) ) from map_test; + +-- cleanup +drop table map_test; + +-- check types +select mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res); +select mapAdd(([toUInt16(1), 2], [toUInt16(1), 1]), ([toUInt16(1), 2], [toUInt16(1), 1])) as res, toTypeName(res); +select mapAdd(([toUInt32(1), 2], [toUInt32(1), 1]), ([toUInt32(1), 2], [toUInt32(1), 1])) as res, toTypeName(res); +select mapAdd(([toUInt64(1), 2], [toUInt64(1), 1]), ([toUInt64(1), 2], [toUInt64(1), 1])) as res, toTypeName(res); + +select mapAdd(([toInt8(1), 2], [toInt8(1), 1]), ([toInt8(1), 2], [toInt8(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt16(1), 2], [toInt16(1), 1]), ([toInt16(1), 2], [toInt16(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt32(1), 2], [toInt32(1), 1]), ([toInt32(1), 2], [toInt32(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt64(1), 2], [toInt64(1), 1]), ([toInt64(1), 2], [toInt64(1), 1])) as res, toTypeName(res); + +select mapAdd(([1, 2], [toFloat32(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapAdd(([toFloat32(1), 2], [toFloat64(1.1), 1]), ([toFloat32(1), 2], [2.2, 1])) as res, toTypeName(res); -- { serverError 44 } +select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [1, 1])) as res, toTypeName(res); -- { serverError 43 } +select mapAdd((['a', 'b'], [1, 1]), ([key], [1])) from values('key String', ('b'), ('c'), ('d')); +select mapAdd((cast(['a', 'b'], 'Array(FixedString(1))'), [1, 1]), ([key], [1])) as res, toTypeName(res) from values('key FixedString(1)', ('b'), ('c'), ('d')); +select mapAdd((cast(['a', 'b'], 'Array(LowCardinality(String))'), [1, 1]), ([key], [1])) from values('key String', ('b'), ('c'), ('d')); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(Enum16(\'a\'=1, \'b\'=2)), val Array(Int16)', (['a'], [1]), (['b'], [1])); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(Enum8(\'a\'=1, \'b\'=2)), val Array(Int16)', (['a'], [1]), (['b'], [1])); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(UUID), val Array(Int32)', (['00000000-89ab-cdef-0123-456789abcdef'], [1]), (['11111111-89ab-cdef-0123-456789abcdef'], [2])); + +-- mapSubtract, same rules as mapAdd +select mapSubtract(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res); +select mapSubtract(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [2, 2])) as res, toTypeName(res); -- overflow +select mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt16(2), 2])) as res, toTypeName(res); +select mapSubtract(([1, 2], [toFloat32(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt16(2), 2])) as res, toTypeName(res); +select mapSubtract(([toUInt8(3)], [toInt32(1)]), ([toUInt8(1), 2], [toInt32(2), 2])) as res, toTypeName(res); diff --git a/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.reference b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.reference new file mode 100644 index 00000000000..96e34d5a44c --- /dev/null +++ b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.reference @@ -0,0 +1,2 @@ +\N +\N diff --git a/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql new file mode 100644 index 00000000000..aa8e8be6673 --- /dev/null +++ b/tests/queries/0_stateless/01402_cast_nullable_string_to_enum.sql @@ -0,0 +1,11 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/5818#issuecomment-619628445 +SELECT CAST(CAST(NULL AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); +SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); + +-- empty string still not acceptable +SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; } +SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError 36; } + +-- non-Nullable Enum() still not acceptable +SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError 349; } +SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError 349; } diff --git a/tests/queries/0_stateless/01414_mutations_and_errors.reference b/tests/queries/0_stateless/01414_mutations_and_errors.reference new file mode 100644 index 00000000000..166a9c6b7b8 --- /dev/null +++ b/tests/queries/0_stateless/01414_mutations_and_errors.reference @@ -0,0 +1,4 @@ +42 +Hello +42 +Hello diff --git a/tests/queries/0_stateless/01414_mutations_and_errors.sql b/tests/queries/0_stateless/01414_mutations_and_errors.sql new file mode 100644 index 00000000000..af7eeb8b9ee --- /dev/null +++ b/tests/queries/0_stateless/01414_mutations_and_errors.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS mutation_table; + +CREATE TABLE mutation_table +( + date Date, + key UInt64, + value String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY tuple(); + +INSERT INTO mutation_table SELECT toDate('2019-10-01'), number, '42' FROM numbers(100); + +INSERT INTO mutation_table SELECT toDate('2019-10-02'), number, 'Hello' FROM numbers(100); + +SELECT distinct(value) FROM mutation_table ORDER BY value; + +ALTER TABLE mutation_table MODIFY COLUMN value UInt64 SETTINGS mutations_sync = 2; --{serverError 341} + +SELECT distinct(value) FROM mutation_table ORDER BY value; --{serverError 6} + +KILL MUTATION where table = 'mutation_table' and database = currentDatabase(); + +ALTER TABLE mutation_table MODIFY COLUMN value String SETTINGS mutations_sync = 2; + +SELECT distinct(value) FROM mutation_table ORDER BY value; + +DROP TABLE IF EXISTS mutation_table; diff --git a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.reference b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.reference new file mode 100644 index 00000000000..a55134cbe31 --- /dev/null +++ b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.reference @@ -0,0 +1,5 @@ +Mutation 0000000000 was killed +Cannot parse string 'Hello' as UInt64 +Cannot parse string 'Hello' as UInt64 +42 +Hello diff --git a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh new file mode 100755 index 00000000000..9881b1f7def --- /dev/null +++ b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS replicated_mutation_table" + +$CLICKHOUSE_CLIENT --query " + CREATE TABLE replicated_mutation_table( + date Date, + key UInt64, + value String + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/mutation_table', '1') + ORDER BY tuple() + PARTITION BY date +" + +$CLICKHOUSE_CLIENT --query "INSERT INTO replicated_mutation_table SELECT toDate('2019-10-02'), number, '42' FROM numbers(4)" + +$CLICKHOUSE_CLIENT --query "INSERT INTO replicated_mutation_table SELECT toDate('2019-10-02'), number, 'Hello' FROM numbers(4)" + +$CLICKHOUSE_CLIENT --query "ALTER TABLE replicated_mutation_table UPDATE key = key + 1 WHERE sleepEachRow(1) == 0 SETTINGS mutations_sync = 2" 2>&1 | grep -o 'Mutation 0000000000 was killed' | head -n 1 & + +check_query="SELECT count() FROM system.mutations WHERE table='replicated_mutation_table' and database='$CLICKHOUSE_DATABASE' and mutation_id='0000000000'" + +query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + +while [ "$query_result" != "1" ] +do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + sleep 0.5 +done + +$CLICKHOUSE_CLIENT --query "KILL MUTATION WHERE table='replicated_mutation_table' and database='$CLICKHOUSE_DATABASE' and mutation_id='0000000000'" &> /dev/null + +while [ "$query_result" != "0" ] +do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + sleep 0.5 +done + +wait + +$CLICKHOUSE_CLIENT --query "ALTER TABLE replicated_mutation_table MODIFY COLUMN value UInt64 SETTINGS replication_alter_partitions_sync = 2" 2>&1 | grep -o "Cannot parse string 'Hello' as UInt64" | head -n 1 & + +check_query="SELECT count() FROM system.mutations WHERE table='replicated_mutation_table' and database='$CLICKHOUSE_DATABASE' and mutation_id='0000000001'" + +query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + +while [ "$query_result" != "1" ] +do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + sleep 0.5 +done + +wait + +$CLICKHOUSE_CLIENT --query "KILL MUTATION WHERE table='replicated_mutation_table' and database='$CLICKHOUSE_DATABASE' AND mutation_id='0000000001'" &> /dev/null + +while [ "$query_result" != "0" ] +do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + sleep 0.5 +done + +$CLICKHOUSE_CLIENT --query "SELECT distinct(value) FROM replicated_mutation_table ORDER BY value" 2>&1 | grep -o "Cannot parse string 'Hello' as UInt64" | head -n 1 + +$CLICKHOUSE_CLIENT --query "ALTER TABLE replicated_mutation_table MODIFY COLUMN value String SETTINGS replication_alter_partitions_sync = 2" + +$CLICKHOUSE_CLIENT --query "SELECT distinct(value) FROM replicated_mutation_table ORDER BY value" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS replicated_mutation_table" diff --git a/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.reference b/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.reference new file mode 100644 index 00000000000..dd57652a491 --- /dev/null +++ b/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.reference @@ -0,0 +1,2 @@ +1 hello +1 world diff --git a/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.sql b/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.sql new file mode 100644 index 00000000000..f3bf24193a8 --- /dev/null +++ b/tests/queries/0_stateless/01415_inconsistent_merge_tree_settings.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t; + +SET mutations_sync = 1; +CREATE TABLE t (x UInt8, s String) ENGINE = MergeTree ORDER BY x SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 1000; + +INSERT INTO t VALUES (1, 'hello'); +SELECT * FROM t; + +ALTER TABLE t UPDATE s = 'world' WHERE x = 1; +SELECT * FROM t; + +DROP TABLE t; diff --git a/tests/queries/0_stateless/01415_sticking_mutations.reference b/tests/queries/0_stateless/01415_sticking_mutations.reference new file mode 100644 index 00000000000..5d2e2b67958 --- /dev/null +++ b/tests/queries/0_stateless/01415_sticking_mutations.reference @@ -0,0 +1,14 @@ +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value1` UInt64,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value1` UInt64,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `renamed_value1` String,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nSETTINGS index_granularity = 8192 +1 +CREATE TABLE default.sticking_mutations\n(\n `date` Date,\n `key` UInt64,\n `value1` UInt64,\n `value2` UInt8\n)\nENGINE = MergeTree()\nORDER BY key\nTTL date + toIntervalDay(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh new file mode 100755 index 00000000000..04846be06d6 --- /dev/null +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash + +set -e + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" + +function check_sticky_mutations() +{ + $CLICKHOUSE_CLIENT -n --query "CREATE TABLE sticking_mutations ( + date Date, + key UInt64, + value1 String, + value2 UInt8 + ) + ENGINE = MergeTree() + ORDER BY key;" + + $CLICKHOUSE_CLIENT --query "INSERT INTO sticking_mutations SELECT toDate('2020-07-10'), number, toString(number), number % 128 FROM numbers(1000)" + + $CLICKHOUSE_CLIENT --query "INSERT INTO sticking_mutations SELECT toDate('2100-01-10'), number, toString(number), number % 128 FROM numbers(1000)" + + # if merges stopped for normal merge tree mutations will stick + $CLICKHOUSE_CLIENT --query "SYSTEM STOP MERGES sticking_mutations" + + $CLICKHOUSE_CLIENT --query "$1" & + + ##### wait mutation to start ##### + check_query="SELECT count() FROM system.mutations WHERE table='sticking_mutations' and database='$CLICKHOUSE_DATABASE' and is_done = 0" + + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + + while [ "$query_result" == "0" ] + do + query_result=`$CLICKHOUSE_CLIENT --query="$check_query" 2>&1` + sleep 0.5 + done + ##### wait mutation to start ##### + + # Starting merges to execute sticked mutations + + $CLICKHOUSE_CLIENT --query "SYSTEM START MERGES sticking_mutations" + + # just to be sure, that previous mutations finished + $CLICKHOUSE_CLIENT --query "ALTER TABLE sticking_mutations DELETE WHERE value2 % 31 == 0 SETTINGS mutations_sync = 1" + + $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE sticking_mutations FINAL" + + $CLICKHOUSE_CLIENT --query "SELECT sum(cityHash64(*)) > 1 FROM sticking_mutations WHERE key > 10" + + $CLICKHOUSE_CLIENT --query "SHOW CREATE TABLE sticking_mutations" + + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" +} + +check_sticky_mutations "ALTER TABLE sticking_mutations DELETE WHERE value2 % 32 == 0, MODIFY COLUMN value1 UInt64" + +check_sticky_mutations "ALTER TABLE sticking_mutations MODIFY COLUMN value1 UInt64, DELETE WHERE value2 % 32 == 0" + +check_sticky_mutations "ALTER TABLE sticking_mutations UPDATE value1 = 15 WHERE key < 2000, DELETE WHERE value2 % 32 == 0" + +check_sticky_mutations "ALTER TABLE sticking_mutations DELETE WHERE value2 % 32 == 0, UPDATE value1 = 15 WHERE key < 2000" + +check_sticky_mutations "ALTER TABLE sticking_mutations DELETE WHERE value2 % 32 == 0, DROP COLUMN value1" + +check_sticky_mutations "ALTER TABLE sticking_mutations DELETE WHERE value2 % 32 == 0, RENAME COLUMN value1 TO renamed_value1" + +check_sticky_mutations "ALTER TABLE sticking_mutations MODIFY COLUMN value1 UInt64, MODIFY TTL date + INTERVAL 1 DAY" diff --git a/tests/queries/0_stateless/01416_join_totals_header_bug.reference b/tests/queries/0_stateless/01416_join_totals_header_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01416_join_totals_header_bug.sql b/tests/queries/0_stateless/01416_join_totals_header_bug.sql new file mode 100644 index 00000000000..089a1d4b72f --- /dev/null +++ b/tests/queries/0_stateless/01416_join_totals_header_bug.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS tableCommon; +DROP TABLE IF EXISTS tableTrees; +DROP TABLE IF EXISTS tableFlowers; + +CREATE TABLE tableCommon (`key` FixedString(15), `value` Nullable(Int8)) ENGINE = Log(); +CREATE TABLE tableTrees (`key` FixedString(15), `name` Nullable(Int8), `name2` Nullable(Int8)) ENGINE = Log(); +CREATE TABLE tableFlowers (`key` FixedString(15), `name` Nullable(Int8)) ENGINE = Log(); + +SELECT * FROM ( + SELECT common.key, common.value, trees.name, trees.name2 + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableTrees + ) trees ON (common.key = trees.key) +) +UNION ALL +( + SELECT common.key, common.value, + null as name, null as name2 + + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableFlowers + ) flowers ON (common.key = flowers.key) +); + +SELECT * FROM ( + SELECT common.key, common.value, trees.name, trees.name2 + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableTrees + ) trees ON (common.key = trees.key) +) +UNION ALL +( + SELECT common.key, common.value, + flowers.name, null as name2 + + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableFlowers + ) flowers ON (common.key = flowers.key) +); + +DROP TABLE IF EXISTS tableCommon; +DROP TABLE IF EXISTS tableTrees; +DROP TABLE IF EXISTS tableFlowers; diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose.reference b/tests/queries/0_stateless/01417_freeze_partition_verbose.reference new file mode 100644 index 00000000000..a3be410532b --- /dev/null +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose.reference @@ -0,0 +1,18 @@ +command_type partition_id part_name backup_name backup_path +FREEZE ALL 0 0_1_1_0 test_01417 shadow/test_01417/ +FREEZE ALL 1 1_2_2_0 test_01417 shadow/test_01417/ +FREEZE ALL 2 2_3_3_0 test_01417 shadow/test_01417/ +FREEZE ALL 3 3_4_4_0 test_01417 shadow/test_01417/ +FREEZE ALL 4 4_5_5_0 test_01417 shadow/test_01417/ +FREEZE ALL 5 5_6_6_0 test_01417 shadow/test_01417/ +FREEZE ALL 6 6_7_7_0 test_01417 shadow/test_01417/ +FREEZE ALL 7 7_8_8_0 test_01417 shadow/test_01417/ +FREEZE ALL 8 8_9_9_0 test_01417 shadow/test_01417/ +FREEZE ALL 9 9_10_10_0 test_01417 shadow/test_01417/ +command_type partition_id part_name backup_name backup_path +FREEZE PARTITION 3 3_4_4_0 test_01417_single_part shadow/test_01417_single_part/ +command_type partition_id part_name old_part_name +ATTACH PARTITION 3 3_12_12_0 3_4_4_0 +command_type partition_id part_name backup_name backup_path old_part_name +FREEZE PARTITION 7 7_8_8_0 test_01417_single_part_7 shadow/test_01417_single_part_7/ +ATTACH PART 5 5_13_13_0 5_6_6_0 diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose.sql b/tests/queries/0_stateless/01417_freeze_partition_verbose.sql new file mode 100644 index 00000000000..176093b4323 --- /dev/null +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS table_for_freeze; + +CREATE TABLE table_for_freeze +( + key UInt64, + value String +) +ENGINE = MergeTree() +ORDER BY key +PARTITION BY key % 10; + +INSERT INTO table_for_freeze SELECT number, toString(number) from numbers(10); + +ALTER TABLE table_for_freeze FREEZE WITH NAME 'test_01417' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze FREEZE PARTITION '3' WITH NAME 'test_01417_single_part' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze DETACH PARTITION '3'; + +INSERT INTO table_for_freeze VALUES (3, '3'); + +ALTER TABLE table_for_freeze ATTACH PARTITION '3' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze DETACH PARTITION '5'; + +ALTER TABLE table_for_freeze FREEZE PARTITION '7' WITH NAME 'test_01417_single_part_7', ATTACH PART '5_6_6_0' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +DROP TABLE IF EXISTS table_for_freeze; diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.reference b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.reference new file mode 100644 index 00000000000..7fea72f847d --- /dev/null +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.reference @@ -0,0 +1,18 @@ +command_type partition_id part_name backup_name backup_path +FREEZE ALL 0 0_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 1 1_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 2 2_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 3 3_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 4 4_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 5 5_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 6 6_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 7 7_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 8 8_0_0_0 test_01417 shadow/test_01417/ +FREEZE ALL 9 9_0_0_0 test_01417 shadow/test_01417/ +command_type partition_id part_name backup_name backup_path +FREEZE PARTITION 3 3_0_0_0 test_01417_single_part shadow/test_01417_single_part/ +command_type partition_id part_name old_part_name +ATTACH PARTITION 3 3_3_3_0 3_0_0_0 +command_type partition_id part_name backup_name backup_path old_part_name +FREEZE PARTITION 7 7_0_0_0 test_01417_single_part_7 shadow/test_01417_single_part_7/ +ATTACH PART 5 5_2_2_0 5_0_0_0 diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sql b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sql new file mode 100644 index 00000000000..4947d0c9fd8 --- /dev/null +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS table_for_freeze_replicated; + +CREATE TABLE table_for_freeze_replicated +( + key UInt64, + value String +) +ENGINE = ReplicatedMergeTree('/test/table_for_freeze_replicated', '1') +ORDER BY key +PARTITION BY key % 10; + +INSERT INTO table_for_freeze_replicated SELECT number, toString(number) from numbers(10); + +ALTER TABLE table_for_freeze_replicated FREEZE WITH NAME 'test_01417' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze_replicated FREEZE PARTITION '3' WITH NAME 'test_01417_single_part' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze_replicated DETACH PARTITION '3'; + +INSERT INTO table_for_freeze_replicated VALUES (3, '3'); + +ALTER TABLE table_for_freeze_replicated ATTACH PARTITION '3' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +ALTER TABLE table_for_freeze_replicated DETACH PARTITION '5'; + +ALTER TABLE table_for_freeze_replicated FREEZE PARTITION '7' WITH NAME 'test_01417_single_part_7', ATTACH PART '5_0_0_0' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; + +DROP TABLE IF EXISTS table_for_freeze_replicated; diff --git a/tests/queries/0_stateless/01417_update_permutation_crash.reference b/tests/queries/0_stateless/01417_update_permutation_crash.reference new file mode 100644 index 00000000000..2b4f3eda2e0 --- /dev/null +++ b/tests/queries/0_stateless/01417_update_permutation_crash.reference @@ -0,0 +1 @@ +(1,1,0) diff --git a/tests/queries/0_stateless/01417_update_permutation_crash.sql b/tests/queries/0_stateless/01417_update_permutation_crash.sql new file mode 100644 index 00000000000..f5923781734 --- /dev/null +++ b/tests/queries/0_stateless/01417_update_permutation_crash.sql @@ -0,0 +1 @@ +select tuple(1, 1, number) as t from numbers_mt(1000001) order by t, number limit 1; diff --git a/tests/queries/0_stateless/01418_index_analysis_bug.reference b/tests/queries/0_stateless/01418_index_analysis_bug.reference new file mode 100644 index 00000000000..54a57d63d56 --- /dev/null +++ b/tests/queries/0_stateless/01418_index_analysis_bug.reference @@ -0,0 +1,24 @@ +2020-07-03 706 +2020-07-04 695 +2020-07-05 726 +2020-07-06 686 +2020-07-07 715 +2020-07-08 706 +2020-07-09 695 +2020-07-10 726 +2020-07-11 686 +2020-07-12 715 +2020-07-13 706 +2020-07-14 695 +2020-07-15 726 +2020-07-16 686 +2020-07-17 715 +2020-07-18 706 +2020-07-19 695 +2020-07-20 726 +2020-07-21 686 +2020-07-22 715 +2020-07-23 706 +2020-07-24 695 +2020-07-25 726 +2 diff --git a/tests/queries/0_stateless/01418_index_analysis_bug.sql b/tests/queries/0_stateless/01418_index_analysis_bug.sql new file mode 100644 index 00000000000..c5033ac7d96 --- /dev/null +++ b/tests/queries/0_stateless/01418_index_analysis_bug.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS mytable_local; + +CREATE TABLE mytable_local ( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id); + +INSERT INTO mytable_local SELECT + toDateTime('2020-06-01 00:00:00') + toIntervalMinute(number) AS created, + toDate(created) AS eventday, + if((number % 100) > 50, 742522, number % 32141) AS user_id +FROM numbers(100000); + +SELECT + eventday, + count(*) +FROM mytable_local +WHERE (toYYYYMM(eventday) = 202007) AND (user_id = 742522) AND (eventday >= '2020-07-03') AND (eventday <= '2020-07-25') +GROUP BY eventday +ORDER BY eventday; + +DROP TABLE mytable_local; +DROP TABLE IF EXISTS table_float; + +CREATE TABLE table_float +( + f Float64, + u UInt32 +) +ENGINE = MergeTree +ORDER BY (f, u); + +INSERT INTO table_float VALUES (1.2, 1) (1.3, 2) (1.4, 3) (1.5, 4); + +SELECT count() +FROM table_float +WHERE (toUInt64(f) = 1) AND (f >= 1.3) AND (f <= 1.4) AND (u > 0); + +DROP TABLE table_float; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 31cb38cc6bf..955c67b0b96 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -55,8 +55,7 @@ "01200_mutations_memory_consumption", "01103_check_cpu_instructions_at_startup", "01037_polygon_dicts_", - "hyperscan", - "00992_system_parts_race_condition_zookeeper" + "hyperscan" ], "unbundled-build": [ "00429", diff --git a/tests/testflows/README.md b/tests/testflows/README.md index 645ad541795..41b8b38d142 100644 --- a/tests/testflows/README.md +++ b/tests/testflows/README.md @@ -48,8 +48,33 @@ python3 regression.py --local --clickhouse-binary-path "/usr/bin/clickhouse" You can control verbosity of the output by specifying the output format with `-o` or `--output` option. See `--help` for more details. +## Running Only Selected Tests + +You can run only the selected tests by passing `--only` option to the `regression.py`. + +For example, + +```bash +./regression.py --local --clickhouse-binary-path /usr/bin/clickhouse --only "/clickhouse/rbac/syntax/grant privilege/*" +``` + +will execute all `rbac/syntax/grant privilege` tests. + +If you want to run only a single test such as the `/clickhouse/rbac/syntax/grant privilege/grant privileges/privilege='KILL QUERY', on=('*.*',), allow_introspection=False` you can do it as follows + +```bash +./regression.py --local --clickhouse-binary-path /usr/bin/clickhouse --only "/clickhouse/rbac/syntax/grant privilege/grant privileges/privilege='KILL QUERY', on=('[*].[*]',), allow_introspection=False/*" +``` + +> Note that you need to surround special characters such as `*` with square brackets, for example `[*]`. + +> Note that you need to end the filtering pattern with `/*` to run all the steps inside the test. + +For more information, please see [Filtering](https://testflows.com/handbook/#Filtering) section in the [TestFlows Handbook]. + [Python 3]: https://www.python.org/ [Ubuntu]: https://ubuntu.com/ [TestFlows]: https://testflows.com +[TestFlows Handbook]: https://testflows.com/handbook/ [Docker]: https://www.docker.com/ [Docker Compose]: https://docs.docker.com/compose/ diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index 9f86d44124c..6d3ae97e000 100644 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -167,17 +167,20 @@ class Cluster(object): self.docker_compose += f" --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\"" self.lock = threading.Lock() - def shell(self, node): + def shell(self, node, timeout=120): """Returns unique shell terminal to be used. """ if node is None: return Shell() - return Shell(command=[ + shell = Shell(command=[ "/bin/bash", "--noediting", "-c", f"{self.docker_compose} exec {node} bash --noediting" ], name=node) - def bash(self, node, timeout=60): + shell.timeout = timeout + return shell + + def bash(self, node, timeout=120): """Returns thread-local bash terminal to a specific node. diff --git a/tests/testflows/ldap/regression.py b/tests/testflows/ldap/regression.py index 567807fc0a8..4a18052dcb5 100755 --- a/tests/testflows/ldap/regression.py +++ b/tests/testflows/ldap/regression.py @@ -23,6 +23,8 @@ xfails = { "connection protocols/starttls with custom port": [(Fail, "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server")], "connection protocols/tls cipher suite": + [(Fail, "can't get it to work")], + "connection protocols/tls minimum protocol version/:": [(Fail, "can't get it to work")] } diff --git a/tests/testflows/rbac/__init__.py b/tests/testflows/rbac/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/testflows/rbac/configs/clickhouse/common.xml b/tests/testflows/rbac/configs/clickhouse/common.xml new file mode 100644 index 00000000000..df952b28c82 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/common.xml @@ -0,0 +1,6 @@ + + Europe/Moscow + 0.0.0.0 + /var/lib/clickhouse/ + /var/lib/clickhouse/tmp/ + diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/logs.xml b/tests/testflows/rbac/configs/clickhouse/config.d/logs.xml new file mode 100644 index 00000000000..bdf1bbc11c1 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/logs.xml @@ -0,0 +1,17 @@ + + 3 + + trace + /var/log/clickhouse-server/log.log + /var/log/clickhouse-server/log.err.log + 1000M + 10 + /var/log/clickhouse-server/stderr.log + /var/log/clickhouse-server/stdout.log + + + system + part_log
+ 500 +
+
diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/ports.xml b/tests/testflows/rbac/configs/clickhouse/config.d/ports.xml new file mode 100644 index 00000000000..fbc6cea74c0 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/ports.xml @@ -0,0 +1,5 @@ + + + 8443 + 9440 + \ No newline at end of file diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml b/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml new file mode 100644 index 00000000000..302417c5891 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/remote.xml @@ -0,0 +1,85 @@ + + + + + + true + + clickhouse1 + 9000 + + + clickhouse2 + 9000 + + + clickhouse3 + 9000 + + + + + + true + + clickhouse1 + 9440 + 1 + + + clickhouse2 + 9440 + 1 + + + clickhouse3 + 9440 + 1 + + + + + + + clickhouse1 + 9000 + + + + + clickhouse2 + 9000 + + + + + clickhouse3 + 9000 + + + + + + + clickhouse1 + 9440 + 1 + + + + + clickhouse2 + 9440 + 1 + + + + + clickhouse3 + 9440 + 1 + + + + + diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml b/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml new file mode 100644 index 00000000000..ca65ffd5e04 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/ssl.xml @@ -0,0 +1,17 @@ + + + + /etc/clickhouse-server/ssl/server.crt + /etc/clickhouse-server/ssl/server.key + none + true + + + true + none + + AcceptCertificateHandler + + + + diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/storage.xml b/tests/testflows/rbac/configs/clickhouse/config.d/storage.xml new file mode 100644 index 00000000000..618fd6b6d24 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/storage.xml @@ -0,0 +1,20 @@ + + + + + + 1024 + + + + + + + default + + + + + + + diff --git a/tests/testflows/rbac/configs/clickhouse/config.d/zookeeper.xml b/tests/testflows/rbac/configs/clickhouse/config.d/zookeeper.xml new file mode 100644 index 00000000000..96270e7b645 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.d/zookeeper.xml @@ -0,0 +1,10 @@ + + + + + zookeeper + 2181 + + 15000 + + diff --git a/tests/testflows/rbac/configs/clickhouse/config.xml b/tests/testflows/rbac/configs/clickhouse/config.xml new file mode 100644 index 00000000000..65187edf806 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/config.xml @@ -0,0 +1,436 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + 8123 + 9000 + + + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + /var/lib/clickhouse/user_files/ + + + /var/lib/clickhouse/access/ + + + users.xml + + + default + + + + + + default + + + + + + + + + false + + + + + + + + localhost + 9000 + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + 7500 +
+ + + + system + trace_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + system + query_thread_log
+ toYYYYMM(event_date) + 7500 +
+ + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + +
diff --git a/tests/testflows/rbac/configs/clickhouse/ssl/dhparam.pem b/tests/testflows/rbac/configs/clickhouse/ssl/dhparam.pem new file mode 100644 index 00000000000..2e6cee0798d --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/ssl/dhparam.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAua92DDli13gJ+//ZXyGaggjIuidqB0crXfhUlsrBk9BV1hH3i7fR +XGP9rUdk2ubnB3k2ejBStL5oBrkHm9SzUFSQHqfDjLZjKoUpOEmuDc4cHvX1XTR5 +Pr1vf5cd0yEncJWG5W4zyUB8k++SUdL2qaeslSs+f491HBLDYn/h8zCgRbBvxhxb +9qeho1xcbnWeqkN6Kc9bgGozA16P9NLuuLttNnOblkH+lMBf42BSne/TWt3AlGZf +slKmmZcySUhF8aKfJnLKbkBCFqOtFRh8zBA9a7g+BT/lSANATCDPaAk1YVih2EKb +dpc3briTDbRsiqg2JKMI7+VdULY9bh3EawIBAg== +-----END DH PARAMETERS----- diff --git a/tests/testflows/rbac/configs/clickhouse/ssl/server.crt b/tests/testflows/rbac/configs/clickhouse/ssl/server.crt new file mode 100644 index 00000000000..7ade2d96273 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/ssl/server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI +4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T +4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU +7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj +sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg +pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC +kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j +4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr +85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C +L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD ++UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L +P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp +0g== +-----END CERTIFICATE----- diff --git a/tests/testflows/rbac/configs/clickhouse/ssl/server.key b/tests/testflows/rbac/configs/clickhouse/ssl/server.key new file mode 100644 index 00000000000..f0fb61ac443 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/ssl/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn +DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L +OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus +TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7 +JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F +KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6 +ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP +TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f +7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN +MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5 +212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu +plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr +/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu +Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa +CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g +1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5 +bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq +cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez +on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K +GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54 +nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU +2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh +vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa +dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr +vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1 +KCBtmIrQLqzMpnBpLNuSY+Q= +-----END PRIVATE KEY----- diff --git a/tests/testflows/rbac/configs/clickhouse/users.xml b/tests/testflows/rbac/configs/clickhouse/users.xml new file mode 100644 index 00000000000..86b2cd9e1e3 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse/users.xml @@ -0,0 +1,133 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + + + + + + + ::/0 + + + + default + + + default + + + 1 + + + + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/tests/testflows/rbac/configs/clickhouse1/config.d/macros.xml b/tests/testflows/rbac/configs/clickhouse1/config.d/macros.xml new file mode 100644 index 00000000000..6cdcc1b440c --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse1/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse1 + 01 + 01 + + diff --git a/tests/testflows/rbac/configs/clickhouse2/config.d/macros.xml b/tests/testflows/rbac/configs/clickhouse2/config.d/macros.xml new file mode 100644 index 00000000000..a114a9ce4ab --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse2/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse2 + 01 + 02 + + diff --git a/tests/testflows/rbac/configs/clickhouse3/config.d/macros.xml b/tests/testflows/rbac/configs/clickhouse3/config.d/macros.xml new file mode 100644 index 00000000000..904a27b0172 --- /dev/null +++ b/tests/testflows/rbac/configs/clickhouse3/config.d/macros.xml @@ -0,0 +1,8 @@ + + + + clickhouse3 + 01 + 03 + + diff --git a/tests/testflows/rbac/docker-compose/clickhouse-service.yml b/tests/testflows/rbac/docker-compose/clickhouse-service.yml new file mode 100644 index 00000000000..1e9f48b9b45 --- /dev/null +++ b/tests/testflows/rbac/docker-compose/clickhouse-service.yml @@ -0,0 +1,28 @@ +version: '2.3' + +services: + clickhouse: + image: yandex/clickhouse-integration-test + expose: + - "9000" + - "9009" + - "8123" + volumes: + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d:/etc/clickhouse-server/users.d" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml" + - "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse" + - "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge" + entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" + healthcheck: + test: clickhouse client --query='select 1' + interval: 3s + timeout: 2s + retries: 40 + start_period: 2s + cap_add: + - SYS_PTRACE + security_opt: + - label:disable diff --git a/tests/testflows/rbac/docker-compose/docker-compose.yml b/tests/testflows/rbac/docker-compose/docker-compose.yml new file mode 100644 index 00000000000..0f90e7fa7bc --- /dev/null +++ b/tests/testflows/rbac/docker-compose/docker-compose.yml @@ -0,0 +1,60 @@ +version: '2.3' + +services: + zookeeper: + extends: + file: zookeeper-service.yml + service: zookeeper + + clickhouse1: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse1 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse2: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse2 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + clickhouse3: + extends: + file: clickhouse-service.yml + service: clickhouse + hostname: clickhouse3 + volumes: + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/" + - "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/" + - "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml" + depends_on: + zookeeper: + condition: service_healthy + + # dummy service which does nothing, but allows to postpone + # 'docker-compose up -d' till all dependecies will go healthy + all_services_ready: + image: hello-world + depends_on: + clickhouse1: + condition: service_healthy + clickhouse2: + condition: service_healthy + clickhouse3: + condition: service_healthy + zookeeper: + condition: service_healthy diff --git a/tests/testflows/rbac/docker-compose/zookeeper-service.yml b/tests/testflows/rbac/docker-compose/zookeeper-service.yml new file mode 100644 index 00000000000..f3df33358be --- /dev/null +++ b/tests/testflows/rbac/docker-compose/zookeeper-service.yml @@ -0,0 +1,18 @@ +version: '2.3' + +services: + zookeeper: + image: zookeeper:3.4.12 + expose: + - "2181" + environment: + ZOO_TICK_TIME: 500 + ZOO_MY_ID: 1 + healthcheck: + test: echo stat | nc localhost 2181 + interval: 3s + timeout: 2s + retries: 5 + start_period: 2s + security_opt: + - label:disable diff --git a/tests/testflows/rbac/regression.py b/tests/testflows/rbac/regression.py new file mode 100755 index 00000000000..6a96183298c --- /dev/null +++ b/tests/testflows/rbac/regression.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +import sys +from testflows.core import * + +append_path(sys.path, "..") + +from helpers.cluster import Cluster +from helpers.argparser import argparser +from rbac.requirements import * + +issue_12507 = "https://github.com/ClickHouse/ClickHouse/issues/12507" +issue_12510 = "https://github.com/ClickHouse/ClickHouse/issues/12510" +issue_12600 = "https://github.com/ClickHouse/ClickHouse/issues/12600" + +xfails = { + "syntax/show create quota/I show create quota current": + [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/12495")], + "syntax/create role/I create role that already exists, throws exception": + [(Fail, issue_12510)], + "syntax/create user/I create user with if not exists, user does exist": + [(Fail, issue_12507)], + "syntax/create row policy/I create row policy if not exists, policy does exist": + [(Fail, issue_12507)], + "syntax/create quota/I create quota if not exists, quota does exist": + [(Fail, issue_12507)], + "syntax/create role/I create role if not exists, role does exist": + [(Fail, issue_12507)], + "syntax/create settings profile/I create settings profile if not exists, profile does exist": + [(Fail, issue_12507)], + "syntax/grant privilege/grant privileges/privilege='dictGet', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], + "syntax/grant privilege/grant privileges/privilege='CREATE', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], + "syntax/grant privilege/grant privileges/privilege='DROP', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], + "syntax/grant privilege/grant privileges/privilege='TRUNCATE', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], + "syntax/grant privilege/grant privileges/privilege='OPTIMIZE', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], + "syntax/grant privilege/grant privileges/privilege='SYSTEM', on=('db0.table0', 'db0.*', '*.*', 'tb0', '*'), allow_introspection=False": + [(Fail, issue_12600)], +} + +@TestModule +@ArgumentParser(argparser) +@XFails(xfails) +@Name("rbac") +def regression(self, local, clickhouse_binary_path): + """RBAC regression. + """ + nodes = { + "clickhouse": + ("clickhouse1", "clickhouse2", "clickhouse3") + } + with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster: + self.context.cluster = cluster + + Feature(run=load("rbac.tests.syntax.feature", "feature"), flags=TE) + +if main(): + regression() diff --git a/tests/testflows/rbac/requirements/__init__.py b/tests/testflows/rbac/requirements/__init__.py new file mode 100644 index 00000000000..02f7d430154 --- /dev/null +++ b/tests/testflows/rbac/requirements/__init__.py @@ -0,0 +1 @@ +from .requirements import * diff --git a/tests/testflows/rbac/requirements/requirements.md b/tests/testflows/rbac/requirements/requirements.md new file mode 100644 index 00000000000..e679b6b7fec --- /dev/null +++ b/tests/testflows/rbac/requirements/requirements.md @@ -0,0 +1,3438 @@ +# SRS-006 ClickHouse Role Based Access Control
Software Requirements Specification + +## Table of Contents + +* 1 [Revision History](#revision-history) +* 2 [Introduction](#introduction) +* 3 [Terminology](#terminology) +* 4 [Privilege Definitions](#privilege-definitions) +* 5 [Requirements](#requirements) + * 5.1 [Generic](#generic) + * 5.1.1 [RQ.SRS-006.RBAC](#rqsrs-006rbac) + * 5.1.2 [Login](#login) + * 5.1.2.1 [RQ.SRS-006.RBAC.Login](#rqsrs-006rbaclogin) + * 5.1.2.2 [RQ.SRS-006.RBAC.Login.DefaultUser](#rqsrs-006rbaclogindefaultuser) + * 5.1.3 [User](#user) + * 5.1.3.1 [RQ.SRS-006.RBAC.User](#rqsrs-006rbacuser) + * 5.1.3.2 [RQ.SRS-006.RBAC.User.Roles](#rqsrs-006rbacuserroles) + * 5.1.3.3 [RQ.SRS-006.RBAC.User.Privileges](#rqsrs-006rbacuserprivileges) + * 5.1.3.4 [RQ.SRS-006.RBAC.User.Variables](#rqsrs-006rbacuservariables) + * 5.1.3.5 [RQ.SRS-006.RBAC.User.Variables.Constraints](#rqsrs-006rbacuservariablesconstraints) + * 5.1.3.6 [RQ.SRS-006.RBAC.User.SettingsProfile](#rqsrs-006rbacusersettingsprofile) + * 5.1.3.7 [RQ.SRS-006.RBAC.User.Quotas](#rqsrs-006rbacuserquotas) + * 5.1.3.8 [RQ.SRS-006.RBAC.User.RowPolicies](#rqsrs-006rbacuserrowpolicies) + * 5.1.3.9 [RQ.SRS-006.RBAC.User.AccountLock](#rqsrs-006rbacuseraccountlock) + * 5.1.3.10 [RQ.SRS-006.RBAC.User.AccountLock.DenyAccess](#rqsrs-006rbacuseraccountlockdenyaccess) + * 5.1.3.11 [RQ.SRS-006.RBAC.User.DefaultRole](#rqsrs-006rbacuserdefaultrole) + * 5.1.3.12 [RQ.SRS-006.RBAC.User.RoleSelection](#rqsrs-006rbacuserroleselection) + * 5.1.3.13 [RQ.SRS-006.RBAC.User.ShowCreate](#rqsrs-006rbacusershowcreate) + * 5.1.3.14 [RQ.SRS-006.RBAC.User.ShowPrivileges](#rqsrs-006rbacusershowprivileges) + * 5.1.4 [Role](#role) + * 5.1.4.1 [RQ.SRS-006.RBAC.Role](#rqsrs-006rbacrole) + * 5.1.4.2 [RQ.SRS-006.RBAC.Role.Privileges](#rqsrs-006rbacroleprivileges) + * 5.1.4.3 [RQ.SRS-006.RBAC.Role.Variables](#rqsrs-006rbacrolevariables) + * 5.1.4.4 [RQ.SRS-006.RBAC.Role.SettingsProfile](#rqsrs-006rbacrolesettingsprofile) + * 5.1.4.5 [RQ.SRS-006.RBAC.Role.Quotas](#rqsrs-006rbacrolequotas) + * 5.1.4.6 [RQ.SRS-006.RBAC.Role.RowPolicies](#rqsrs-006rbacrolerowpolicies) + * 5.1.5 [Privileges](#privileges) + * 5.1.5.1 [RQ.SRS-006.RBAC.Privileges.Usage](#rqsrs-006rbacprivilegesusage) + * 5.1.5.2 [RQ.SRS-006.RBAC.Privileges.Select](#rqsrs-006rbacprivilegesselect) + * 5.1.5.3 [RQ.SRS-006.RBAC.Privileges.SelectColumns](#rqsrs-006rbacprivilegesselectcolumns) + * 5.1.5.4 [RQ.SRS-006.RBAC.Privileges.Insert](#rqsrs-006rbacprivilegesinsert) + * 5.1.5.5 [RQ.SRS-006.RBAC.Privileges.Delete](#rqsrs-006rbacprivilegesdelete) + * 5.1.5.6 [RQ.SRS-006.RBAC.Privileges.Alter](#rqsrs-006rbacprivilegesalter) + * 5.1.5.7 [RQ.SRS-006.RBAC.Privileges.Create](#rqsrs-006rbacprivilegescreate) + * 5.1.5.8 [RQ.SRS-006.RBAC.Privileges.Drop](#rqsrs-006rbacprivilegesdrop) + * 5.1.5.9 [RQ.SRS-006.RBAC.Privileges.All](#rqsrs-006rbacprivilegesall) + * 5.1.5.10 [RQ.SRS-006.RBAC.Privileges.All.GrantRevoke](#rqsrs-006rbacprivilegesallgrantrevoke) + * 5.1.5.11 [RQ.SRS-006.RBAC.Privileges.GrantOption](#rqsrs-006rbacprivilegesgrantoption) + * 5.1.5.12 [RQ.SRS-006.RBAC.Privileges.AdminOption](#rqsrs-006rbacprivilegesadminoption) + * 5.1.6 [Required Privileges](#required-privileges) + * 5.1.6.1 [RQ.SRS-006.RBAC.RequiredPrivileges.Insert](#rqsrs-006rbacrequiredprivilegesinsert) + * 5.1.6.2 [RQ.SRS-006.RBAC.RequiredPrivileges.Select](#rqsrs-006rbacrequiredprivilegesselect) + * 5.1.6.3 [RQ.SRS-006.RBAC.RequiredPrivileges.Create](#rqsrs-006rbacrequiredprivilegescreate) + * 5.1.6.4 [RQ.SRS-006.RBAC.RequiredPrivileges.Alter](#rqsrs-006rbacrequiredprivilegesalter) + * 5.1.6.5 [RQ.SRS-006.RBAC.RequiredPrivileges.Drop](#rqsrs-006rbacrequiredprivilegesdrop) + * 5.1.6.6 [RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table](#rqsrs-006rbacrequiredprivilegesdroptable) + * 5.1.6.7 [RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke](#rqsrs-006rbacrequiredprivilegesgrantrevoke) + * 5.1.6.8 [RQ.SRS-006.RBAC.RequiredPrivileges.Use](#rqsrs-006rbacrequiredprivilegesuse) + * 5.1.6.9 [RQ.SRS-006.RBAC.RequiredPrivileges.Admin](#rqsrs-006rbacrequiredprivilegesadmin) + * 5.1.7 [Partial Revokes](#partial-revokes) + * 5.1.7.1 [RQ.SRS-006.RBAC.PartialRevokes](#rqsrs-006rbacpartialrevokes) + * 5.1.8 [Settings Profile](#settings-profile) + * 5.1.8.1 [RQ.SRS-006.RBAC.SettingsProfile](#rqsrs-006rbacsettingsprofile) + * 5.1.8.2 [RQ.SRS-006.RBAC.SettingsProfile.Constraints](#rqsrs-006rbacsettingsprofileconstraints) + * 5.1.8.3 [RQ.SRS-006.RBAC.SettingsProfile.ShowCreate](#rqsrs-006rbacsettingsprofileshowcreate) + * 5.1.9 [Quotas](#quotas) + * 5.1.9.1 [RQ.SRS-006.RBAC.Quotas](#rqsrs-006rbacquotas) + * 5.1.9.2 [RQ.SRS-006.RBAC.Quotas.Keyed](#rqsrs-006rbacquotaskeyed) + * 5.1.9.3 [RQ.SRS-006.RBAC.Quotas.Queries](#rqsrs-006rbacquotasqueries) + * 5.1.9.4 [RQ.SRS-006.RBAC.Quotas.Errors](#rqsrs-006rbacquotaserrors) + * 5.1.9.5 [RQ.SRS-006.RBAC.Quotas.ResultRows](#rqsrs-006rbacquotasresultrows) + * 5.1.9.6 [RQ.SRS-006.RBAC.Quotas.ReadRows](#rqsrs-006rbacquotasreadrows) + * 5.1.9.7 [RQ.SRS-006.RBAC.Quotas.ResultBytes](#rqsrs-006rbacquotasresultbytes) + * 5.1.9.8 [RQ.SRS-006.RBAC.Quotas.ReadBytes](#rqsrs-006rbacquotasreadbytes) + * 5.1.9.9 [RQ.SRS-006.RBAC.Quotas.ExecutionTime](#rqsrs-006rbacquotasexecutiontime) + * 5.1.9.10 [RQ.SRS-006.RBAC.Quotas.ShowCreate](#rqsrs-006rbacquotasshowcreate) + * 5.1.10 [Row Policy](#row-policy) + * 5.1.10.1 [RQ.SRS-006.RBAC.RowPolicy](#rqsrs-006rbacrowpolicy) + * 5.1.10.2 [RQ.SRS-006.RBAC.RowPolicy.Condition](#rqsrs-006rbacrowpolicycondition) + * 5.1.10.3 [RQ.SRS-006.RBAC.RowPolicy.ShowCreate](#rqsrs-006rbacrowpolicyshowcreate) + * 5.2 [Specific](#specific) + * 5.2.10.1 [RQ.SRS-006.RBAC.User.Use.DefaultRole](#rqsrs-006rbacuserusedefaultrole) + * 5.2.10.2 [RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole](#rqsrs-006rbacuseruseallroleswhennodefaultrole) + * 5.2.10.3 [RQ.SRS-006.RBAC.User.Create](#rqsrs-006rbacusercreate) + * 5.2.10.4 [RQ.SRS-006.RBAC.User.Create.IfNotExists](#rqsrs-006rbacusercreateifnotexists) + * 5.2.10.5 [RQ.SRS-006.RBAC.User.Create.Replace](#rqsrs-006rbacusercreatereplace) + * 5.2.10.6 [RQ.SRS-006.RBAC.User.Create.Password.NoPassword](#rqsrs-006rbacusercreatepasswordnopassword) + * 5.2.10.7 [RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login](#rqsrs-006rbacusercreatepasswordnopasswordlogin) + * 5.2.10.8 [RQ.SRS-006.RBAC.User.Create.Password.PlainText](#rqsrs-006rbacusercreatepasswordplaintext) + * 5.2.10.9 [RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login](#rqsrs-006rbacusercreatepasswordplaintextlogin) + * 5.2.10.10 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Password](#rqsrs-006rbacusercreatepasswordsha256password) + * 5.2.10.11 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login](#rqsrs-006rbacusercreatepasswordsha256passwordlogin) + * 5.2.10.12 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash](#rqsrs-006rbacusercreatepasswordsha256hash) + * 5.2.10.13 [RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login](#rqsrs-006rbacusercreatepasswordsha256hashlogin) + * 5.2.10.14 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password](#rqsrs-006rbacusercreatepassworddoublesha1password) + * 5.2.10.15 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login](#rqsrs-006rbacusercreatepassworddoublesha1passwordlogin) + * 5.2.10.16 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash](#rqsrs-006rbacusercreatepassworddoublesha1hash) + * 5.2.10.17 [RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login](#rqsrs-006rbacusercreatepassworddoublesha1hashlogin) + * 5.2.10.18 [RQ.SRS-006.RBAC.User.Create.Host.Name](#rqsrs-006rbacusercreatehostname) + * 5.2.10.19 [RQ.SRS-006.RBAC.User.Create.Host.Regexp](#rqsrs-006rbacusercreatehostregexp) + * 5.2.10.20 [RQ.SRS-006.RBAC.User.Create.Host.IP](#rqsrs-006rbacusercreatehostip) + * 5.2.10.21 [RQ.SRS-006.RBAC.User.Create.Host.Any](#rqsrs-006rbacusercreatehostany) + * 5.2.10.22 [RQ.SRS-006.RBAC.User.Create.Host.None](#rqsrs-006rbacusercreatehostnone) + * 5.2.10.23 [RQ.SRS-006.RBAC.User.Create.Host.Local](#rqsrs-006rbacusercreatehostlocal) + * 5.2.10.24 [RQ.SRS-006.RBAC.User.Create.Host.Like](#rqsrs-006rbacusercreatehostlike) + * 5.2.10.25 [RQ.SRS-006.RBAC.User.Create.Host.Default](#rqsrs-006rbacusercreatehostdefault) + * 5.2.10.26 [RQ.SRS-006.RBAC.User.Create.DefaultRole](#rqsrs-006rbacusercreatedefaultrole) + * 5.2.10.27 [RQ.SRS-006.RBAC.User.Create.DefaultRole.None](#rqsrs-006rbacusercreatedefaultrolenone) + * 5.2.10.28 [RQ.SRS-006.RBAC.User.Create.DefaultRole.All](#rqsrs-006rbacusercreatedefaultroleall) + * 5.2.10.29 [RQ.SRS-006.RBAC.User.Create.Settings](#rqsrs-006rbacusercreatesettings) + * 5.2.10.30 [RQ.SRS-006.RBAC.User.Create.OnCluster](#rqsrs-006rbacusercreateoncluster) + * 5.2.10.31 [RQ.SRS-006.RBAC.User.Create.Syntax](#rqsrs-006rbacusercreatesyntax) + * 5.2.10.32 [RQ.SRS-006.RBAC.User.Alter](#rqsrs-006rbacuseralter) + * 5.2.10.33 [RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation](#rqsrs-006rbacuseralterorderofevaluation) + * 5.2.10.34 [RQ.SRS-006.RBAC.User.Alter.IfExists](#rqsrs-006rbacuseralterifexists) + * 5.2.10.35 [RQ.SRS-006.RBAC.User.Alter.Cluster](#rqsrs-006rbacuseraltercluster) + * 5.2.10.36 [RQ.SRS-006.RBAC.User.Alter.Rename](#rqsrs-006rbacuseralterrename) + * 5.2.10.37 [RQ.SRS-006.RBAC.User.Alter.Password.PlainText](#rqsrs-006rbacuseralterpasswordplaintext) + * 5.2.10.38 [RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password](#rqsrs-006rbacuseralterpasswordsha256password) + * 5.2.10.39 [RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password](#rqsrs-006rbacuseralterpassworddoublesha1password) + * 5.2.10.40 [RQ.SRS-006.RBAC.User.Alter.Host.AddDrop](#rqsrs-006rbacuseralterhostadddrop) + * 5.2.10.41 [RQ.SRS-006.RBAC.User.Alter.Host.Local](#rqsrs-006rbacuseralterhostlocal) + * 5.2.10.42 [RQ.SRS-006.RBAC.User.Alter.Host.Name](#rqsrs-006rbacuseralterhostname) + * 5.2.10.43 [RQ.SRS-006.RBAC.User.Alter.Host.Regexp](#rqsrs-006rbacuseralterhostregexp) + * 5.2.10.44 [RQ.SRS-006.RBAC.User.Alter.Host.IP](#rqsrs-006rbacuseralterhostip) + * 5.2.10.45 [RQ.SRS-006.RBAC.User.Alter.Host.Like](#rqsrs-006rbacuseralterhostlike) + * 5.2.10.46 [RQ.SRS-006.RBAC.User.Alter.Host.Any](#rqsrs-006rbacuseralterhostany) + * 5.2.10.47 [RQ.SRS-006.RBAC.User.Alter.Host.None](#rqsrs-006rbacuseralterhostnone) + * 5.2.10.48 [RQ.SRS-006.RBAC.User.Alter.DefaultRole](#rqsrs-006rbacuseralterdefaultrole) + * 5.2.10.49 [RQ.SRS-006.RBAC.User.Alter.DefaultRole.All](#rqsrs-006rbacuseralterdefaultroleall) + * 5.2.10.50 [RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept](#rqsrs-006rbacuseralterdefaultroleallexcept) + * 5.2.10.51 [RQ.SRS-006.RBAC.User.Alter.Settings](#rqsrs-006rbacuseraltersettings) + * 5.2.10.52 [RQ.SRS-006.RBAC.User.Alter.Settings.Min](#rqsrs-006rbacuseraltersettingsmin) + * 5.2.10.53 [RQ.SRS-006.RBAC.User.Alter.Settings.Max](#rqsrs-006rbacuseraltersettingsmax) + * 5.2.10.54 [RQ.SRS-006.RBAC.User.Alter.Settings.Profile](#rqsrs-006rbacuseraltersettingsprofile) + * 5.2.10.55 [RQ.SRS-006.RBAC.User.Alter.Syntax](#rqsrs-006rbacuseraltersyntax) + * 5.2.10.56 [RQ.SRS-006.RBAC.SetDefaultRole](#rqsrs-006rbacsetdefaultrole) + * 5.2.10.57 [RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser](#rqsrs-006rbacsetdefaultrolecurrentuser) + * 5.2.10.58 [RQ.SRS-006.RBAC.SetDefaultRole.All](#rqsrs-006rbacsetdefaultroleall) + * 5.2.10.59 [RQ.SRS-006.RBAC.SetDefaultRole.AllExcept](#rqsrs-006rbacsetdefaultroleallexcept) + * 5.2.10.60 [RQ.SRS-006.RBAC.SetDefaultRole.None](#rqsrs-006rbacsetdefaultrolenone) + * 5.2.10.61 [RQ.SRS-006.RBAC.SetDefaultRole.Syntax](#rqsrs-006rbacsetdefaultrolesyntax) + * 5.2.10.62 [RQ.SRS-006.RBAC.SetRole](#rqsrs-006rbacsetrole) + * 5.2.10.63 [RQ.SRS-006.RBAC.SetRole.Default](#rqsrs-006rbacsetroledefault) + * 5.2.10.64 [RQ.SRS-006.RBAC.SetRole.None](#rqsrs-006rbacsetrolenone) + * 5.2.10.65 [RQ.SRS-006.RBAC.SetRole.All](#rqsrs-006rbacsetroleall) + * 5.2.10.66 [RQ.SRS-006.RBAC.SetRole.AllExcept](#rqsrs-006rbacsetroleallexcept) + * 5.2.10.67 [RQ.SRS-006.RBAC.SetRole.Syntax](#rqsrs-006rbacsetrolesyntax) + * 5.2.10.68 [RQ.SRS-006.RBAC.User.ShowCreateUser](#rqsrs-006rbacusershowcreateuser) + * 5.2.10.69 [RQ.SRS-006.RBAC.User.ShowCreateUser.For](#rqsrs-006rbacusershowcreateuserfor) + * 5.2.10.70 [RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax](#rqsrs-006rbacusershowcreateusersyntax) + * 5.2.10.71 [RQ.SRS-006.RBAC.User.Drop](#rqsrs-006rbacuserdrop) + * 5.2.10.72 [RQ.SRS-006.RBAC.User.Drop.IfExists](#rqsrs-006rbacuserdropifexists) + * 5.2.10.73 [RQ.SRS-006.RBAC.User.Drop.OnCluster](#rqsrs-006rbacuserdroponcluster) + * 5.2.10.74 [RQ.SRS-006.RBAC.User.Drop.Syntax](#rqsrs-006rbacuserdropsyntax) + * 5.2.10.75 [RQ.SRS-006.RBAC.Role.Create](#rqsrs-006rbacrolecreate) + * 5.2.10.76 [RQ.SRS-006.RBAC.Role.Create.IfNotExists](#rqsrs-006rbacrolecreateifnotexists) + * 5.2.10.77 [RQ.SRS-006.RBAC.Role.Create.Replace](#rqsrs-006rbacrolecreatereplace) + * 5.2.10.78 [RQ.SRS-006.RBAC.Role.Create.Settings](#rqsrs-006rbacrolecreatesettings) + * 5.2.10.79 [RQ.SRS-006.RBAC.Role.Create.Syntax](#rqsrs-006rbacrolecreatesyntax) + * 5.2.10.80 [RQ.SRS-006.RBAC.Role.Create.Effect](#rqsrs-006rbacrolecreateeffect) + * 5.2.10.81 [RQ.SRS-006.RBAC.Role.Alter](#rqsrs-006rbacrolealter) + * 5.2.10.82 [RQ.SRS-006.RBAC.Role.Alter.IfExists](#rqsrs-006rbacrolealterifexists) + * 5.2.10.83 [RQ.SRS-006.RBAC.Role.Alter.Cluster](#rqsrs-006rbacrolealtercluster) + * 5.2.10.84 [RQ.SRS-006.RBAC.Role.Alter.Rename](#rqsrs-006rbacrolealterrename) + * 5.2.10.85 [RQ.SRS-006.RBAC.Role.Alter.Settings](#rqsrs-006rbacrolealtersettings) + * 5.2.10.86 [RQ.SRS-006.RBAC.Role.Alter.Effect](#rqsrs-006rbacrolealtereffect) + * 5.2.10.87 [RQ.SRS-006.RBAC.Role.Alter.Syntax](#rqsrs-006rbacrolealtersyntax) + * 5.2.10.88 [RQ.SRS-006.RBAC.Role.Drop](#rqsrs-006rbacroledrop) + * 5.2.10.89 [RQ.SRS-006.RBAC.Role.Drop.IfExists](#rqsrs-006rbacroledropifexists) + * 5.2.10.90 [RQ.SRS-006.RBAC.Role.Drop.Cluster](#rqsrs-006rbacroledropcluster) + * 5.2.10.91 [RQ.SRS-006.RBAC.Role.Drop.Effect](#rqsrs-006rbacroledropeffect) + * 5.2.10.92 [RQ.SRS-006.RBAC.Role.Drop.Syntax](#rqsrs-006rbacroledropsyntax) + * 5.2.10.93 [RQ.SRS-006.RBAC.Role.ShowCreate](#rqsrs-006rbacroleshowcreate) + * 5.2.10.94 [RQ.SRS-006.RBAC.Role.ShowCreate.Syntax](#rqsrs-006rbacroleshowcreatesyntax) + * 5.2.10.95 [RQ.SRS-006.RBAC.Grant.Privilege.To](#rqsrs-006rbacgrantprivilegeto) + * 5.2.10.96 [RQ.SRS-006.RBAC.Grant.Privilege.To.Effect](#rqsrs-006rbacgrantprivilegetoeffect) + * 5.2.10.97 [RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser](#rqsrs-006rbacgrantprivilegetocurrentuser) + * 5.2.10.98 [RQ.SRS-006.RBAC.Grant.Privilege.Select](#rqsrs-006rbacgrantprivilegeselect) + * 5.2.10.99 [RQ.SRS-006.RBAC.Grant.Privilege.Select.Effect](#rqsrs-006rbacgrantprivilegeselecteffect) + * 5.2.10.100 [RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns](#rqsrs-006rbacgrantprivilegeselectcolumns) + * 5.2.10.101 [RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns.Effect](#rqsrs-006rbacgrantprivilegeselectcolumnseffect) + * 5.2.10.102 [RQ.SRS-006.RBAC.Grant.Privilege.Insert](#rqsrs-006rbacgrantprivilegeinsert) + * 5.2.10.103 [RQ.SRS-006.RBAC.Grant.Privilege.Insert.Effect](#rqsrs-006rbacgrantprivilegeinserteffect) + * 5.2.10.104 [RQ.SRS-006.RBAC.Grant.Privilege.Alter](#rqsrs-006rbacgrantprivilegealter) + * 5.2.10.105 [RQ.SRS-006.RBAC.Grant.Privilege.Alter.Effect](#rqsrs-006rbacgrantprivilegealtereffect) + * 5.2.10.106 [RQ.SRS-006.RBAC.Grant.Privilege.Create](#rqsrs-006rbacgrantprivilegecreate) + * 5.2.10.107 [RQ.SRS-006.RBAC.Grant.Privilege.Create.Effect](#rqsrs-006rbacgrantprivilegecreateeffect) + * 5.2.10.108 [RQ.SRS-006.RBAC.Grant.Privilege.Drop](#rqsrs-006rbacgrantprivilegedrop) + * 5.2.10.109 [RQ.SRS-006.RBAC.Grant.Privilege.Drop.Effect](#rqsrs-006rbacgrantprivilegedropeffect) + * 5.2.10.110 [RQ.SRS-006.RBAC.Grant.Privilege.Truncate](#rqsrs-006rbacgrantprivilegetruncate) + * 5.2.10.111 [RQ.SRS-006.RBAC.Grant.Privilege.Truncate.Effect](#rqsrs-006rbacgrantprivilegetruncateeffect) + * 5.2.10.112 [RQ.SRS-006.RBAC.Grant.Privilege.Optimize](#rqsrs-006rbacgrantprivilegeoptimize) + * 5.2.10.113 [RQ.SRS-006.RBAC.Grant.Privilege.Optimize.Effect](#rqsrs-006rbacgrantprivilegeoptimizeeffect) + * 5.2.10.114 [RQ.SRS-006.RBAC.Grant.Privilege.Show](#rqsrs-006rbacgrantprivilegeshow) + * 5.2.10.115 [RQ.SRS-006.RBAC.Grant.Privilege.Show.Effect](#rqsrs-006rbacgrantprivilegeshoweffect) + * 5.2.10.116 [RQ.SRS-006.RBAC.Grant.Privilege.KillQuery](#rqsrs-006rbacgrantprivilegekillquery) + * 5.2.10.117 [RQ.SRS-006.RBAC.Grant.Privilege.KillQuery.Effect](#rqsrs-006rbacgrantprivilegekillqueryeffect) + * 5.2.10.118 [RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement](#rqsrs-006rbacgrantprivilegeaccessmanagement) + * 5.2.10.119 [RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement.Effect](#rqsrs-006rbacgrantprivilegeaccessmanagementeffect) + * 5.2.10.120 [RQ.SRS-006.RBAC.Grant.Privilege.System](#rqsrs-006rbacgrantprivilegesystem) + * 5.2.10.121 [RQ.SRS-006.RBAC.Grant.Privilege.System.Effect](#rqsrs-006rbacgrantprivilegesystemeffect) + * 5.2.10.122 [RQ.SRS-006.RBAC.Grant.Privilege.Introspection](#rqsrs-006rbacgrantprivilegeintrospection) + * 5.2.10.123 [RQ.SRS-006.RBAC.Grant.Privilege.Introspection.Effect](#rqsrs-006rbacgrantprivilegeintrospectioneffect) + * 5.2.10.124 [RQ.SRS-006.RBAC.Grant.Privilege.Sources](#rqsrs-006rbacgrantprivilegesources) + * 5.2.10.125 [RQ.SRS-006.RBAC.Grant.Privilege.Sources.Effect](#rqsrs-006rbacgrantprivilegesourceseffect) + * 5.2.10.126 [RQ.SRS-006.RBAC.Grant.Privilege.DictGet](#rqsrs-006rbacgrantprivilegedictget) + * 5.2.10.127 [RQ.SRS-006.RBAC.Grant.Privilege.DictGet.Effect](#rqsrs-006rbacgrantprivilegedictgeteffect) + * 5.2.10.128 [RQ.SRS-006.RBAC.Grant.Privilege.None](#rqsrs-006rbacgrantprivilegenone) + * 5.2.10.129 [RQ.SRS-006.RBAC.Grant.Privilege.None.Effect](#rqsrs-006rbacgrantprivilegenoneeffect) + * 5.2.10.130 [RQ.SRS-006.RBAC.Grant.Privilege.All](#rqsrs-006rbacgrantprivilegeall) + * 5.2.10.131 [RQ.SRS-006.RBAC.Grant.Privilege.All.Effect](#rqsrs-006rbacgrantprivilegealleffect) + * 5.2.10.132 [RQ.SRS-006.RBAC.Grant.Privilege.GrantOption](#rqsrs-006rbacgrantprivilegegrantoption) + * 5.2.10.133 [RQ.SRS-006.RBAC.Grant.Privilege.GrantOption.Effect](#rqsrs-006rbacgrantprivilegegrantoptioneffect) + * 5.2.10.134 [RQ.SRS-006.RBAC.Grant.Privilege.On](#rqsrs-006rbacgrantprivilegeon) + * 5.2.10.135 [RQ.SRS-006.RBAC.Grant.Privilege.On.Effect](#rqsrs-006rbacgrantprivilegeoneffect) + * 5.2.10.136 [RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns](#rqsrs-006rbacgrantprivilegeprivilegecolumns) + * 5.2.10.137 [RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns.Effect](#rqsrs-006rbacgrantprivilegeprivilegecolumnseffect) + * 5.2.10.138 [RQ.SRS-006.RBAC.Grant.Privilege.OnCluster](#rqsrs-006rbacgrantprivilegeoncluster) + * 5.2.10.139 [RQ.SRS-006.RBAC.Grant.Privilege.Syntax](#rqsrs-006rbacgrantprivilegesyntax) + * 5.2.10.140 [RQ.SRS-006.RBAC.Revoke.Privilege.Cluster](#rqsrs-006rbacrevokeprivilegecluster) + * 5.2.10.141 [RQ.SRS-006.RBAC.Revoke.Privilege.Cluster.Effect](#rqsrs-006rbacrevokeprivilegeclustereffect) + * 5.2.10.142 [RQ.SRS-006.RBAC.Revoke.Privilege.Any](#rqsrs-006rbacrevokeprivilegeany) + * 5.2.10.143 [RQ.SRS-006.RBAC.Revoke.Privilege.Any.Effect](#rqsrs-006rbacrevokeprivilegeanyeffect) + * 5.2.10.144 [RQ.SRS-006.RBAC.Revoke.Privilege.Select](#rqsrs-006rbacrevokeprivilegeselect) + * 5.2.10.145 [RQ.SRS-006.RBAC.Revoke.Privilege.Select.Effect](#rqsrs-006rbacrevokeprivilegeselecteffect) + * 5.2.10.146 [RQ.SRS-006.RBAC.Revoke.Privilege.Insert](#rqsrs-006rbacrevokeprivilegeinsert) + * 5.2.10.147 [RQ.SRS-006.RBAC.Revoke.Privilege.Insert.Effect](#rqsrs-006rbacrevokeprivilegeinserteffect) + * 5.2.10.148 [RQ.SRS-006.RBAC.Revoke.Privilege.Alter](#rqsrs-006rbacrevokeprivilegealter) + * 5.2.10.149 [RQ.SRS-006.RBAC.Revoke.Privilege.Alter.Effect](#rqsrs-006rbacrevokeprivilegealtereffect) + * 5.2.10.150 [RQ.SRS-006.RBAC.Revoke.Privilege.Create](#rqsrs-006rbacrevokeprivilegecreate) + * 5.2.10.151 [RQ.SRS-006.RBAC.Revoke.Privilege.Create.Effect](#rqsrs-006rbacrevokeprivilegecreateeffect) + * 5.2.10.152 [RQ.SRS-006.RBAC.Revoke.Privilege.Drop](#rqsrs-006rbacrevokeprivilegedrop) + * 5.2.10.153 [RQ.SRS-006.RBAC.Revoke.Privilege.Drop.Effect](#rqsrs-006rbacrevokeprivilegedropeffect) + * 5.2.10.154 [RQ.SRS-006.RBAC.Revoke.Privilege.Truncate](#rqsrs-006rbacrevokeprivilegetruncate) + * 5.2.10.155 [RQ.SRS-006.RBAC.Revoke.Privilege.Truncate.Effect](#rqsrs-006rbacrevokeprivilegetruncateeffect) + * 5.2.10.156 [RQ.SRS-006.RBAC.Revoke.Privilege.Optimize](#rqsrs-006rbacrevokeprivilegeoptimize) + * 5.2.10.157 [RQ.SRS-006.RBAC.Revoke.Privilege.Optimize.Effect](#rqsrs-006rbacrevokeprivilegeoptimizeeffect) + * 5.2.10.158 [RQ.SRS-006.RBAC.Revoke.Privilege.Show](#rqsrs-006rbacrevokeprivilegeshow) + * 5.2.10.159 [RQ.SRS-006.RBAC.Revoke.Privilege.Show.Effect](#rqsrs-006rbacrevokeprivilegeshoweffect) + * 5.2.10.160 [RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery](#rqsrs-006rbacrevokeprivilegekillquery) + * 5.2.10.161 [RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery.Effect](#rqsrs-006rbacrevokeprivilegekillqueryeffect) + * 5.2.10.162 [RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement](#rqsrs-006rbacrevokeprivilegeaccessmanagement) + * 5.2.10.163 [RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement.Effect](#rqsrs-006rbacrevokeprivilegeaccessmanagementeffect) + * 5.2.10.164 [RQ.SRS-006.RBAC.Revoke.Privilege.System](#rqsrs-006rbacrevokeprivilegesystem) + * 5.2.10.165 [RQ.SRS-006.RBAC.Revoke.Privilege.System.Effect](#rqsrs-006rbacrevokeprivilegesystemeffect) + * 5.2.10.166 [RQ.SRS-006.RBAC.Revoke.Privilege.Introspection](#rqsrs-006rbacrevokeprivilegeintrospection) + * 5.2.10.167 [RQ.SRS-006.RBAC.Revoke.Privilege.Introspection.Effect](#rqsrs-006rbacrevokeprivilegeintrospectioneffect) + * 5.2.10.168 [RQ.SRS-006.RBAC.Revoke.Privilege.Sources](#rqsrs-006rbacrevokeprivilegesources) + * 5.2.10.169 [RQ.SRS-006.RBAC.Revoke.Privilege.Sources.Effect](#rqsrs-006rbacrevokeprivilegesourceseffect) + * 5.2.10.170 [RQ.SRS-006.RBAC.Revoke.Privilege.DictGet](#rqsrs-006rbacrevokeprivilegedictget) + * 5.2.10.171 [RQ.SRS-006.RBAC.Revoke.Privilege.DictGet.Effect](#rqsrs-006rbacrevokeprivilegedictgeteffect) + * 5.2.10.172 [RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns](#rqsrs-006rbacrevokeprivilegeprivelegecolumns) + * 5.2.10.173 [RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns.Effect](#rqsrs-006rbacrevokeprivilegeprivelegecolumnseffect) + * 5.2.10.174 [RQ.SRS-006.RBAC.Revoke.Privilege.Multiple](#rqsrs-006rbacrevokeprivilegemultiple) + * 5.2.10.175 [RQ.SRS-006.RBAC.Revoke.Privilege.Multiple.Effect](#rqsrs-006rbacrevokeprivilegemultipleeffect) + * 5.2.10.176 [RQ.SRS-006.RBAC.Revoke.Privilege.All](#rqsrs-006rbacrevokeprivilegeall) + * 5.2.10.177 [RQ.SRS-006.RBAC.Revoke.Privilege.All.Effect](#rqsrs-006rbacrevokeprivilegealleffect) + * 5.2.10.178 [RQ.SRS-006.RBAC.Revoke.Privilege.None](#rqsrs-006rbacrevokeprivilegenone) + * 5.2.10.179 [RQ.SRS-006.RBAC.Revoke.Privilege.None.Effect](#rqsrs-006rbacrevokeprivilegenoneeffect) + * 5.2.10.180 [RQ.SRS-006.RBAC.Revoke.Privilege.On](#rqsrs-006rbacrevokeprivilegeon) + * 5.2.10.181 [RQ.SRS-006.RBAC.Revoke.Privilege.On.Effect](#rqsrs-006rbacrevokeprivilegeoneffect) + * 5.2.10.182 [RQ.SRS-006.RBAC.Revoke.Privilege.From](#rqsrs-006rbacrevokeprivilegefrom) + * 5.2.10.183 [RQ.SRS-006.RBAC.Revoke.Privilege.From.Effect](#rqsrs-006rbacrevokeprivilegefromeffect) + * 5.2.10.184 [RQ.SRS-006.RBAC.Revoke.Privilege.Syntax](#rqsrs-006rbacrevokeprivilegesyntax) + * 5.2.10.185 [RQ.SRS-006.RBAC.PartialRevoke.Syntax](#rqsrs-006rbacpartialrevokesyntax) + * 5.2.10.186 [RQ.SRS-006.RBAC.PartialRevoke.Effect](#rqsrs-006rbacpartialrevokeeffect) + * 5.2.10.187 [RQ.SRS-006.RBAC.Grant.Role](#rqsrs-006rbacgrantrole) + * 5.2.10.188 [RQ.SRS-006.RBAC.Grant.Role.Effect](#rqsrs-006rbacgrantroleeffect) + * 5.2.10.189 [RQ.SRS-006.RBAC.Grant.Role.CurrentUser](#rqsrs-006rbacgrantrolecurrentuser) + * 5.2.10.190 [RQ.SRS-006.RBAC.Grant.Role.CurrentUser.Effect](#rqsrs-006rbacgrantrolecurrentusereffect) + * 5.2.10.191 [RQ.SRS-006.RBAC.Grant.Role.AdminOption](#rqsrs-006rbacgrantroleadminoption) + * 5.2.10.192 [RQ.SRS-006.RBAC.Grant.Role.AdminOption.Effect](#rqsrs-006rbacgrantroleadminoptioneffect) + * 5.2.10.193 [RQ.SRS-006.RBAC.Grant.Role.OnCluster](#rqsrs-006rbacgrantroleoncluster) + * 5.2.10.194 [RQ.SRS-006.RBAC.Grant.Role.Syntax](#rqsrs-006rbacgrantrolesyntax) + * 5.2.10.195 [RQ.SRS-006.RBAC.Revoke.Role](#rqsrs-006rbacrevokerole) + * 5.2.10.196 [RQ.SRS-006.RBAC.Revoke.Role.Effect](#rqsrs-006rbacrevokeroleeffect) + * 5.2.10.197 [RQ.SRS-006.RBAC.Revoke.Role.Keywords](#rqsrs-006rbacrevokerolekeywords) + * 5.2.10.198 [RQ.SRS-006.RBAC.Revoke.Role.Keywords.Effect](#rqsrs-006rbacrevokerolekeywordseffect) + * 5.2.10.199 [RQ.SRS-006.RBAC.Revoke.Role.Cluster](#rqsrs-006rbacrevokerolecluster) + * 5.2.10.200 [RQ.SRS-006.RBAC.Revoke.Role.Cluster.Effect](#rqsrs-006rbacrevokeroleclustereffect) + * 5.2.10.201 [RQ.SRS-006.RBAC.Revoke.AdminOption](#rqsrs-006rbacrevokeadminoption) + * 5.2.10.202 [RQ.SRS-006.RBAC.Revoke.AdminOption.Effect](#rqsrs-006rbacrevokeadminoptioneffect) + * 5.2.10.203 [RQ.SRS-006.RBAC.Revoke.Role.Syntax](#rqsrs-006rbacrevokerolesyntax) + * 5.2.10.204 [RQ.SRS-006.RBAC.Show.Grants](#rqsrs-006rbacshowgrants) + * 5.2.10.205 [RQ.SRS-006.RBAC.Show.Grants.For](#rqsrs-006rbacshowgrantsfor) + * 5.2.10.206 [RQ.SRS-006.RBAC.Show.Grants.Syntax](#rqsrs-006rbacshowgrantssyntax) + * 5.2.10.207 [RQ.SRS-006.RBAC.SettingsProfile.Create](#rqsrs-006rbacsettingsprofilecreate) + * 5.2.10.208 [RQ.SRS-006.RBAC.SettingsProfile.Create.Effect](#rqsrs-006rbacsettingsprofilecreateeffect) + * 5.2.10.209 [RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists](#rqsrs-006rbacsettingsprofilecreateifnotexists) + * 5.2.10.210 [RQ.SRS-006.RBAC.SettingsProfile.Create.Replace](#rqsrs-006rbacsettingsprofilecreatereplace) + * 5.2.10.211 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables](#rqsrs-006rbacsettingsprofilecreatevariables) + * 5.2.10.212 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value](#rqsrs-006rbacsettingsprofilecreatevariablesvalue) + * 5.2.10.213 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value.Effect](#rqsrs-006rbacsettingsprofilecreatevariablesvalueeffect) + * 5.2.10.214 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints](#rqsrs-006rbacsettingsprofilecreatevariablesconstraints) + * 5.2.10.215 [RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints.Effect](#rqsrs-006rbacsettingsprofilecreatevariablesconstraintseffect) + * 5.2.10.216 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment](#rqsrs-006rbacsettingsprofilecreateassignment) + * 5.2.10.217 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None](#rqsrs-006rbacsettingsprofilecreateassignmentnone) + * 5.2.10.218 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All](#rqsrs-006rbacsettingsprofilecreateassignmentall) + * 5.2.10.219 [RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept](#rqsrs-006rbacsettingsprofilecreateassignmentallexcept) + * 5.2.10.220 [RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit](#rqsrs-006rbacsettingsprofilecreateinherit) + * 5.2.10.221 [RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster](#rqsrs-006rbacsettingsprofilecreateoncluster) + * 5.2.10.222 [RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax](#rqsrs-006rbacsettingsprofilecreatesyntax) + * 5.2.10.223 [RQ.SRS-006.RBAC.SettingsProfile.Alter](#rqsrs-006rbacsettingsprofilealter) + * 5.2.10.224 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Effect](#rqsrs-006rbacsettingsprofilealtereffect) + * 5.2.10.225 [RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists](#rqsrs-006rbacsettingsprofilealterifexists) + * 5.2.10.226 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename](#rqsrs-006rbacsettingsprofilealterrename) + * 5.2.10.227 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables](#rqsrs-006rbacsettingsprofilealtervariables) + * 5.2.10.228 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value](#rqsrs-006rbacsettingsprofilealtervariablesvalue) + * 5.2.10.229 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value.Effect](#rqsrs-006rbacsettingsprofilealtervariablesvalueeffect) + * 5.2.10.230 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints](#rqsrs-006rbacsettingsprofilealtervariablesconstraints) + * 5.2.10.231 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints.Effect](#rqsrs-006rbacsettingsprofilealtervariablesconstraintseffect) + * 5.2.10.232 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment](#rqsrs-006rbacsettingsprofilealterassignment) + * 5.2.10.233 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Effect](#rqsrs-006rbacsettingsprofilealterassignmenteffect) + * 5.2.10.234 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None](#rqsrs-006rbacsettingsprofilealterassignmentnone) + * 5.2.10.235 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All](#rqsrs-006rbacsettingsprofilealterassignmentall) + * 5.2.10.236 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept](#rqsrs-006rbacsettingsprofilealterassignmentallexcept) + * 5.2.10.237 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit](#rqsrs-006rbacsettingsprofilealterassignmentinherit) + * 5.2.10.238 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster](#rqsrs-006rbacsettingsprofilealterassignmentoncluster) + * 5.2.10.239 [RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax](#rqsrs-006rbacsettingsprofilealtersyntax) + * 5.2.10.240 [RQ.SRS-006.RBAC.SettingsProfile.Drop](#rqsrs-006rbacsettingsprofiledrop) + * 5.2.10.241 [RQ.SRS-006.RBAC.SettingsProfile.Drop.Effect](#rqsrs-006rbacsettingsprofiledropeffect) + * 5.2.10.242 [RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists](#rqsrs-006rbacsettingsprofiledropifexists) + * 5.2.10.243 [RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster](#rqsrs-006rbacsettingsprofiledroponcluster) + * 5.2.10.244 [RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax](#rqsrs-006rbacsettingsprofiledropsyntax) + * 5.2.10.245 [RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile](#rqsrs-006rbacsettingsprofileshowcreatesettingsprofile) + * 5.2.10.246 [RQ.SRS-006.RBAC.Quota.Create](#rqsrs-006rbacquotacreate) + * 5.2.10.247 [RQ.SRS-006.RBAC.Quota.Create.Effect](#rqsrs-006rbacquotacreateeffect) + * 5.2.10.248 [RQ.SRS-006.RBAC.Quota.Create.IfNotExists](#rqsrs-006rbacquotacreateifnotexists) + * 5.2.10.249 [RQ.SRS-006.RBAC.Quota.Create.Replace](#rqsrs-006rbacquotacreatereplace) + * 5.2.10.250 [RQ.SRS-006.RBAC.Quota.Create.Cluster](#rqsrs-006rbacquotacreatecluster) + * 5.2.10.251 [RQ.SRS-006.RBAC.Quota.Create.Interval](#rqsrs-006rbacquotacreateinterval) + * 5.2.10.252 [RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized](#rqsrs-006rbacquotacreateintervalrandomized) + * 5.2.10.253 [RQ.SRS-006.RBAC.Quota.Create.Queries](#rqsrs-006rbacquotacreatequeries) + * 5.2.10.254 [RQ.SRS-006.RBAC.Quota.Create.Errors](#rqsrs-006rbacquotacreateerrors) + * 5.2.10.255 [RQ.SRS-006.RBAC.Quota.Create.ResultRows](#rqsrs-006rbacquotacreateresultrows) + * 5.2.10.256 [RQ.SRS-006.RBAC.Quota.Create.ReadRows](#rqsrs-006rbacquotacreatereadrows) + * 5.2.10.257 [RQ.SRS-006.RBAC.Quota.Create.ResultBytes](#rqsrs-006rbacquotacreateresultbytes) + * 5.2.10.258 [RQ.SRS-006.RBAC.Quota.Create.ReadBytes](#rqsrs-006rbacquotacreatereadbytes) + * 5.2.10.259 [RQ.SRS-006.RBAC.Quota.Create.ExecutionTime](#rqsrs-006rbacquotacreateexecutiontime) + * 5.2.10.260 [RQ.SRS-006.RBAC.Quota.Create.NoLimits](#rqsrs-006rbacquotacreatenolimits) + * 5.2.10.261 [RQ.SRS-006.RBAC.Quota.Create.TrackingOnly](#rqsrs-006rbacquotacreatetrackingonly) + * 5.2.10.262 [RQ.SRS-006.RBAC.Quota.Create.KeyedBy](#rqsrs-006rbacquotacreatekeyedby) + * 5.2.10.263 [RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions](#rqsrs-006rbacquotacreatekeyedbyoptions) + * 5.2.10.264 [RQ.SRS-006.RBAC.Quota.Create.Assignment](#rqsrs-006rbacquotacreateassignment) + * 5.2.10.265 [RQ.SRS-006.RBAC.Quota.Create.Assignment.None](#rqsrs-006rbacquotacreateassignmentnone) + * 5.2.10.266 [RQ.SRS-006.RBAC.Quota.Create.Assignment.All](#rqsrs-006rbacquotacreateassignmentall) + * 5.2.10.267 [RQ.SRS-006.RBAC.Quota.Create.Assignment.Except](#rqsrs-006rbacquotacreateassignmentexcept) + * 5.2.10.268 [RQ.SRS-006.RBAC.Quota.Create.Syntax](#rqsrs-006rbacquotacreatesyntax) + * 5.2.10.269 [RQ.SRS-006.RBAC.Quota.Alter](#rqsrs-006rbacquotaalter) + * 5.2.10.270 [RQ.SRS-006.RBAC.Quota.Alter.Effect](#rqsrs-006rbacquotaaltereffect) + * 5.2.10.271 [RQ.SRS-006.RBAC.Quota.Alter.IfExists](#rqsrs-006rbacquotaalterifexists) + * 5.2.10.272 [RQ.SRS-006.RBAC.Quota.Alter.Rename](#rqsrs-006rbacquotaalterrename) + * 5.2.10.273 [RQ.SRS-006.RBAC.Quota.Alter.Cluster](#rqsrs-006rbacquotaaltercluster) + * 5.2.10.274 [RQ.SRS-006.RBAC.Quota.Alter.Interval](#rqsrs-006rbacquotaalterinterval) + * 5.2.10.275 [RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized](#rqsrs-006rbacquotaalterintervalrandomized) + * 5.2.10.276 [RQ.SRS-006.RBAC.Quota.Alter.Queries](#rqsrs-006rbacquotaalterqueries) + * 5.2.10.277 [RQ.SRS-006.RBAC.Quota.Alter.Errors](#rqsrs-006rbacquotaaltererrors) + * 5.2.10.278 [RQ.SRS-006.RBAC.Quota.Alter.ResultRows](#rqsrs-006rbacquotaalterresultrows) + * 5.2.10.279 [RQ.SRS-006.RBAC.Quota.Alter.ReadRows](#rqsrs-006rbacquotaalterreadrows) + * 5.2.10.280 [RQ.SRS-006.RBAC.Quota.ALter.ResultBytes](#rqsrs-006rbacquotaalterresultbytes) + * 5.2.10.281 [RQ.SRS-006.RBAC.Quota.Alter.ReadBytes](#rqsrs-006rbacquotaalterreadbytes) + * 5.2.10.282 [RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime](#rqsrs-006rbacquotaalterexecutiontime) + * 5.2.10.283 [RQ.SRS-006.RBAC.Quota.Alter.NoLimits](#rqsrs-006rbacquotaalternolimits) + * 5.2.10.284 [RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly](#rqsrs-006rbacquotaaltertrackingonly) + * 5.2.10.285 [RQ.SRS-006.RBAC.Quota.Alter.KeyedBy](#rqsrs-006rbacquotaalterkeyedby) + * 5.2.10.286 [RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions](#rqsrs-006rbacquotaalterkeyedbyoptions) + * 5.2.10.287 [RQ.SRS-006.RBAC.Quota.Alter.Assignment](#rqsrs-006rbacquotaalterassignment) + * 5.2.10.288 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.None](#rqsrs-006rbacquotaalterassignmentnone) + * 5.2.10.289 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.All](#rqsrs-006rbacquotaalterassignmentall) + * 5.2.10.290 [RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except](#rqsrs-006rbacquotaalterassignmentexcept) + * 5.2.10.291 [RQ.SRS-006.RBAC.Quota.Alter.Syntax](#rqsrs-006rbacquotaaltersyntax) + * 5.2.10.292 [RQ.SRS-006.RBAC.Quota.Drop](#rqsrs-006rbacquotadrop) + * 5.2.10.293 [RQ.SRS-006.RBAC.Quota.Drop.Effect](#rqsrs-006rbacquotadropeffect) + * 5.2.10.294 [RQ.SRS-006.RBAC.Quota.Drop.IfExists](#rqsrs-006rbacquotadropifexists) + * 5.2.10.295 [RQ.SRS-006.RBAC.Quota.Drop.Cluster](#rqsrs-006rbacquotadropcluster) + * 5.2.10.296 [RQ.SRS-006.RBAC.Quota.Drop.Syntax](#rqsrs-006rbacquotadropsyntax) + * 5.2.10.297 [RQ.SRS-006.RBAC.Quota.ShowQuotas](#rqsrs-006rbacquotashowquotas) + * 5.2.10.298 [RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile](#rqsrs-006rbacquotashowquotasintooutfile) + * 5.2.10.299 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Format](#rqsrs-006rbacquotashowquotasformat) + * 5.2.10.300 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings](#rqsrs-006rbacquotashowquotassettings) + * 5.2.10.301 [RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax](#rqsrs-006rbacquotashowquotassyntax) + * 5.2.10.302 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name](#rqsrs-006rbacquotashowcreatequotaname) + * 5.2.10.303 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current](#rqsrs-006rbacquotashowcreatequotacurrent) + * 5.2.10.304 [RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax](#rqsrs-006rbacquotashowcreatequotasyntax) + * 5.2.10.305 [RQ.SRS-006.RBAC.RowPolicy.Create](#rqsrs-006rbacrowpolicycreate) + * 5.2.10.306 [RQ.SRS-006.RBAC.RowPolicy.Create.Effect](#rqsrs-006rbacrowpolicycreateeffect) + * 5.2.10.307 [RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists](#rqsrs-006rbacrowpolicycreateifnotexists) + * 5.2.10.308 [RQ.SRS-006.RBAC.RowPolicy.Create.Replace](#rqsrs-006rbacrowpolicycreatereplace) + * 5.2.10.309 [RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster](#rqsrs-006rbacrowpolicycreateoncluster) + * 5.2.10.310 [RQ.SRS-006.RBAC.RowPolicy.Create.On](#rqsrs-006rbacrowpolicycreateon) + * 5.2.10.311 [RQ.SRS-006.RBAC.RowPolicy.Create.Access](#rqsrs-006rbacrowpolicycreateaccess) + * 5.2.10.312 [RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive](#rqsrs-006rbacrowpolicycreateaccesspermissive) + * 5.2.10.313 [RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive](#rqsrs-006rbacrowpolicycreateaccessrestrictive) + * 5.2.10.314 [RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect](#rqsrs-006rbacrowpolicycreateforselect) + * 5.2.10.315 [RQ.SRS-006.RBAC.RowPolicy.Create.Condition](#rqsrs-006rbacrowpolicycreatecondition) + * 5.2.10.316 [RQ.SRS-006.RBAC.RowPolicy.Create.Condition.Effect](#rqsrs-006rbacrowpolicycreateconditioneffect) + * 5.2.10.317 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment](#rqsrs-006rbacrowpolicycreateassignment) + * 5.2.10.318 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None](#rqsrs-006rbacrowpolicycreateassignmentnone) + * 5.2.10.319 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All](#rqsrs-006rbacrowpolicycreateassignmentall) + * 5.2.10.320 [RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept](#rqsrs-006rbacrowpolicycreateassignmentallexcept) + * 5.2.10.321 [RQ.SRS-006.RBAC.RowPolicy.Create.Syntax](#rqsrs-006rbacrowpolicycreatesyntax) + * 5.2.10.322 [RQ.SRS-006.RBAC.RowPolicy.Alter](#rqsrs-006rbacrowpolicyalter) + * 5.2.10.323 [RQ.SRS-006.RBAC.RowPolicy.Alter.Effect](#rqsrs-006rbacrowpolicyaltereffect) + * 5.2.10.324 [RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists](#rqsrs-006rbacrowpolicyalterifexists) + * 5.2.10.325 [RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect](#rqsrs-006rbacrowpolicyalterforselect) + * 5.2.10.326 [RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster](#rqsrs-006rbacrowpolicyalteroncluster) + * 5.2.10.327 [RQ.SRS-006.RBAC.RowPolicy.Alter.On](#rqsrs-006rbacrowpolicyalteron) + * 5.2.10.328 [RQ.SRS-006.RBAC.RowPolicy.Alter.Rename](#rqsrs-006rbacrowpolicyalterrename) + * 5.2.10.329 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access](#rqsrs-006rbacrowpolicyalteraccess) + * 5.2.10.330 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive](#rqsrs-006rbacrowpolicyalteraccesspermissive) + * 5.2.10.331 [RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive](#rqsrs-006rbacrowpolicyalteraccessrestrictive) + * 5.2.10.332 [RQ.SRS-006.RBAC.RowPolicy.Alter.Condition](#rqsrs-006rbacrowpolicyaltercondition) + * 5.2.10.333 [RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.Effect](#rqsrs-006rbacrowpolicyalterconditioneffect) + * 5.2.10.334 [RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None](#rqsrs-006rbacrowpolicyalterconditionnone) + * 5.2.10.335 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment](#rqsrs-006rbacrowpolicyalterassignment) + * 5.2.10.336 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None](#rqsrs-006rbacrowpolicyalterassignmentnone) + * 5.2.10.337 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All](#rqsrs-006rbacrowpolicyalterassignmentall) + * 5.2.10.338 [RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept](#rqsrs-006rbacrowpolicyalterassignmentallexcept) + * 5.2.10.339 [RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax](#rqsrs-006rbacrowpolicyaltersyntax) + * 5.2.10.340 [RQ.SRS-006.RBAC.RowPolicy.Drop](#rqsrs-006rbacrowpolicydrop) + * 5.2.10.341 [RQ.SRS-006.RBAC.RowPolicy.Drop.Effect](#rqsrs-006rbacrowpolicydropeffect) + * 5.2.10.342 [RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists](#rqsrs-006rbacrowpolicydropifexists) + * 5.2.10.343 [RQ.SRS-006.RBAC.RowPolicy.Drop.On](#rqsrs-006rbacrowpolicydropon) + * 5.2.10.344 [RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster](#rqsrs-006rbacrowpolicydroponcluster) + * 5.2.10.345 [RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax](#rqsrs-006rbacrowpolicydropsyntax) + * 5.2.10.346 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy](#rqsrs-006rbacrowpolicyshowcreaterowpolicy) + * 5.2.10.347 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On](#rqsrs-006rbacrowpolicyshowcreaterowpolicyon) + * 5.2.10.348 [RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax](#rqsrs-006rbacrowpolicyshowcreaterowpolicysyntax) + * 5.2.10.349 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies](#rqsrs-006rbacrowpolicyshowrowpolicies) + * 5.2.10.350 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On](#rqsrs-006rbacrowpolicyshowrowpolicieson) + * 5.2.10.351 [RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax](#rqsrs-006rbacrowpolicyshowrowpoliciessyntax) +* 6 [References](#references) + +## Revision History + +This document is stored in an electronic form using [Git] source control management software +hosted in a Gitlab repository. + +All the updates are tracked using the [Git]'s revision history. + +* Gitlab repository: https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/blob/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md +* Revision history: https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/commits/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md + +## Introduction + +[ClickHouse] currently has support for only basic access control. Users can be defined to allow +access to specific databases and dictionaries. A profile is used for the user +that can specify a read-only mode as well as a set of quotas that can limit user's resource +consumption. Beyond this basic functionality there is no way to control access rights within +a database. A user can either be denied access, have read-only rights or have complete access +to the whole database on the server. + +In many cases a more granular access control is needed where one can control user's access in +a much more granular approach. A typical solution to this problem in the **SQL** world +is provided by implementing **RBAC (role-based access control)**. +For example a version of **RBAC** is implemented by both [MySQL] and [PostgreSQL]. + +[ClickHouse] shall implement **RBAC** to meet the growing needs of its users. In order to minimize +the learning curve the concepts and the syntax of its implementation shall be +as close as possible to the [MySQL] and [PostgreSQL]. The goal is to allow for fast +transition of users which are already familiar with these features in those databases +to [ClickHouse]. + +## Terminology + +* **RBAC** - + role-based access control +* **quota** - + setting that limits specific resource consumption + +## Privilege Definitions + +* **usage** - + privilege to access a database or a table +* **select** - + privilege to read data from a database or a table +* **select columns** - + privilege to read specific columns from a table +* **insert** + privilege to insert data into a database or a table +* **delete** + privilege to delete a database or a table +* **alter** + privilege to alter tables +* **create** + privilege to create a database or a table +* **drop** + privilege to drop a database or a table +* **all** + privilege that includes **usage**, **select**, **select columns**, + **insert**, **delete**, **alter**, **create**, and **drop** +* **grant option** + privilege to grant the same privilege to other users or roles +* **admin option** + privilege to perform administrative tasks are defined in the **system queries** + +## Requirements + +### Generic + +#### RQ.SRS-006.RBAC +version: 1.0 + +[ClickHouse] SHALL support role based access control. + +#### Login + +##### RQ.SRS-006.RBAC.Login +version: 1.0 + +[ClickHouse] SHALL only allow access to the server for a given +user only when correct username and password are used during +the connection to the server. + +##### RQ.SRS-006.RBAC.Login.DefaultUser +version: 1.0 + +[ClickHouse] SHALL use the **default user** when no username and password +are specified during the connection to the server. + +#### User + +##### RQ.SRS-006.RBAC.User +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of +one or more **user** accounts to which roles, privileges, +settings profile, quotas and row policies can be assigned. + +##### RQ.SRS-006.RBAC.User.Roles +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **roles** +to a **user**. + +##### RQ.SRS-006.RBAC.User.Privileges +version: 1.0 + +[ClickHouse] SHALL support assigning one or more privileges to a **user**. + +##### RQ.SRS-006.RBAC.User.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning one or more variables to a **user**. + +##### RQ.SRS-006.RBAC.User.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support assigning min, max and read-only constraints +for the variables that can be set and read by the **user**. + +##### RQ.SRS-006.RBAC.User.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **settings profiles** +to a **user**. + +##### RQ.SRS-006.RBAC.User.Quotas +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **quotas** to a **user**. + +##### RQ.SRS-006.RBAC.User.RowPolicies +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **row policies** to a **user**. + +##### RQ.SRS-006.RBAC.User.AccountLock +version: 1.0 + +[ClickHouse] SHALL support locking and unlocking of **user** accounts. + +##### RQ.SRS-006.RBAC.User.AccountLock.DenyAccess +version: 1.0 + +[ClickHouse] SHALL deny access to the user whose account is locked. + +##### RQ.SRS-006.RBAC.User.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support assigning a default role to a **user**. + +##### RQ.SRS-006.RBAC.User.RoleSelection +version: 1.0 + +[ClickHouse] SHALL support selection of one or more **roles** from the available roles +that are assigned to a **user**. + +##### RQ.SRS-006.RBAC.User.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **user** account was created. + +##### RQ.SRS-006.RBAC.User.ShowPrivileges +version: 1.0 + +[ClickHouse] SHALL support listing the privileges of the **user**. + +#### Role + +##### RQ.SRS-006.RBAC.Role +version: 1.0 + +[ClikHouse] SHALL support creation and manipulation of **roles** +to which privileges, settings profile, quotas and row policies can be +assigned. + +##### RQ.SRS-006.RBAC.Role.Privileges +version: 1.0 + +[ClickHouse] SHALL support assigning one or more privileges to a **role**. + +##### RQ.SRS-006.RBAC.Role.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning one or more variables to a **role**. + +##### RQ.SRS-006.RBAC.Role.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **settings profiles** +to a **role**. + +##### RQ.SRS-006.RBAC.Role.Quotas +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **quotas** to a **role**. + +##### RQ.SRS-006.RBAC.Role.RowPolicies +version: 1.0 + +[ClickHouse] SHALL support assigning one or more **row policies** to a **role**. + +#### Privileges + +##### RQ.SRS-006.RBAC.Privileges.Usage +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **usage** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Select +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **select** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.SelectColumns +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **select columns** privilege +for a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Insert +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **insert** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Delete +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **delete** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Alter +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **alter** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Create +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **create** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.Drop +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **drop** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.All +version: 1.0 + +[ClickHouse] SHALL include in the **all** privilege the same rights +as provided by **usage**, **select**, **select columns**, +**insert**, **delete**, **alter**, **create**, and **drop** privileges. + +##### RQ.SRS-006.RBAC.Privileges.All.GrantRevoke +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **all** privileges +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **grant option** privilege +for a database or a specific table to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.Privileges.AdminOption +version: 1.0 + +[ClickHouse] SHALL support granting or revoking **admin option** privilege +to one or more **users** or **roles**. + +#### Required Privileges + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Insert +version: 1.0 + +[ClickHouse] SHALL not allow any `INSERT INTO` statements +to be executed unless the user has the **insert** privilege for the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Select +version: 1.0 + +[ClickHouse] SHALL not allow any `SELECT` statements +to be executed unless the user has the **select** or **select columns** privilege +for the destination table either because of the explicit grant +or through one of the roles assigned to the user. +If the the user only has the **select columns** +privilege then only the specified columns SHALL be available for reading. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Create +version: 1.0 + +[ClickHouse] SHALL not allow any `CREATE` statements +to be executed unless the user has the **create** privilege for the destination database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Alter +version: 1.0 + +[ClickHouse] SHALL not allow any `ALTER` statements +to be executed unless the user has the **alter** privilege for the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Drop +version: 1.0 + +[ClickHouse] SHALL not allow any `DROP` statements +to be executed unless the user has the **drop** privilege for the destination database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table +version: 1.0 + +[ClickHouse] SHALL not allow any `DROP TABLE` statements +to be executed unless the user has the **drop** privilege for the destination database or the table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke +version: 1.0 + +[ClickHouse] SHALL not allow any `GRANT` or `REVOKE` statements +to be executed unless the user has the **grant option** privilege +for the privilege of the destination table +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Use +version: 1.0 + +[ClickHouse] SHALL not allow the `USE` statement to be executed +unless the user has at least one of the privileges for the database +or the table inside that database +either because of the explicit grant or through one of the roles assigned to the user. + +##### RQ.SRS-006.RBAC.RequiredPrivileges.Admin +version: 1.0 + +[ClickHouse] SHALL not allow any of the following statements + +* `SYSTEM` +* `SHOW` +* `ATTACH` +* `CHECK TABLE` +* `DESCRIBE TABLE` +* `DETACH` +* `EXISTS` +* `KILL QUERY` +* `KILL MUTATION` +* `OPTIMIZE` +* `RENAME` +* `TRUNCATE` + +to be executed unless the user has the **admin option** privilege +through one of the roles with **admin option** privilege assigned to the user. + +#### Partial Revokes + +##### RQ.SRS-006.RBAC.PartialRevokes +version: 1.0 + +[ClickHouse] SHALL support partial revoking of privileges granted +to a **user** or a **role**. + +#### Settings Profile + +##### RQ.SRS-006.RBAC.SettingsProfile +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of **settings profiles** +that can include value definition for one or more variables and can +can be assigned to one or more **users** or **roles**. + +##### RQ.SRS-006.RBAC.SettingsProfile.Constraints +version: 1.0 + +[ClickHouse] SHALL support assigning min, max and read-only constraints +for the variables specified in the **settings profile**. + +##### RQ.SRS-006.RBAC.SettingsProfile.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **setting profile** was created. + +#### Quotas + +##### RQ.SRS-006.RBAC.Quotas +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of **quotas** +that can be used to limit resource usage by a **user** or a **role** +over a period of time. + +##### RQ.SRS-006.RBAC.Quotas.Keyed +version: 1.0 + +[ClickHouse] SHALL support creating **quotas** that are keyed +so that a quota is tracked separately for each key value. + +##### RQ.SRS-006.RBAC.Quotas.Queries +version: 1.0 + +[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests. + +##### RQ.SRS-006.RBAC.Quotas.Errors +version: 1.0 + +[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception. + +##### RQ.SRS-006.RBAC.Quotas.ResultRows +version: 1.0 + +[ClickHouse] SHALL support setting **result rows** quota to limit the +the total number of rows given as the result. + +##### RQ.SRS-006.RBAC.Quotas.ReadRows +version: 1.0 + +[ClickHouse] SHALL support setting **read rows** quota to limit the total +number of source rows read from tables for running the query on all remote servers. + +##### RQ.SRS-006.RBAC.Quotas.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support setting **result bytes** quota to limit the total number +of bytes that can be returned as the result. + +##### RQ.SRS-006.RBAC.Quotas.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support setting **read bytes** quota to limit the total number +of source bytes read from tables for running the query on all remote servers. + +##### RQ.SRS-006.RBAC.Quotas.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support setting **execution time** quota to limit the maximum +query execution time. + +##### RQ.SRS-006.RBAC.Quotas.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **quota** was created. + +#### Row Policy + +##### RQ.SRS-006.RBAC.RowPolicy +version: 1.0 + +[ClickHouse] SHALL support creation and manipulation of table **row policies** +that can be used to limit access to the table contents for a **user** or a **role** +using a specified **condition**. + +##### RQ.SRS-006.RBAC.RowPolicy.Condition +version: 1.0 + +[ClickHouse] SHALL support row policy **conditions** that can be any SQL +expression that returns a boolean. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support showing the command of how **row policy** was created. + +### Specific + +##### RQ.SRS-006.RBAC.User.Use.DefaultRole +version: 1.0 + +[ClickHouse] SHALL by default use default role or roles assigned +to the user if specified. + +##### RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole +version: 1.0 + +[ClickHouse] SHALL by default use all the roles assigned to the user +if no default role or roles are specified for the user. + +##### RQ.SRS-006.RBAC.User.Create +version: 1.0 + +[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE USER` statement +to skip raising an exception if a user with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be +raised if a user with the same **name** already exists. + +##### RQ.SRS-006.RBAC.User.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement +to replace existing user account if already exists. + +##### RQ.SRS-006.RBAC.User.Create.Password.NoPassword +version: 1.0 + +[ClickHouse] SHALL support specifying no password when creating +user account using `IDENTIFIED WITH NO_PASSWORD` clause . + +##### RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login +version: 1.0 + +[ClickHouse] SHALL use no password for the user when connecting to the server +when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.PlainText +version: 1.0 + +[ClickHouse] SHALL support specifying plaintext password when creating +user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login +version: 1.0 + +[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server +when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause +and compare the password with the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server +when an account was created with `IDENTIFIED WITH SHA256_PASSWORD` or with 'IDENTIFIED BY' clause +and compare the calculated hash to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA256` of the already calculated hash passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH SHA256_HASH` clause +and compare the calculated hash to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA1 two times +to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA1` two times over the password passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause +and compare the calculated value to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA1 two times +to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH` +clause. + +##### RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login +version: 1.0 + +[ClickHouse] SHALL calculate `SHA1` two times over the hash passed by +the user when connecting to the server +when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause +and compare the calculated value to the one used in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Name +version: 1.0 + +[ClickHouse] SHALL support specifying one or more hostnames from +which user can access the server using the `HOST NAME` clause +in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Regexp +version: 1.0 + +[ClickHouse] SHALL support specifying one or more regular expressions +to match hostnames from which user can access the server +using the `HOST REGEXP` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.IP +version: 1.0 + +[ClickHouse] SHALL support specifying one or more IP address or subnet from +which user can access the server using the `HOST IP` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Any +version: 1.0 + +[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement +to indicate that user can access the server from any host. + +##### RQ.SRS-006.RBAC.User.Create.Host.None +version: 1.0 + +[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Local +version: 1.0 + +[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the +`CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Like +version: 1.0 + +[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the +`HOST LIKE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Host.Default +version: 1.0 + +[ClickHouse] SHALL support user access to server from any host +if no `HOST` clause is specified in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support specifying one or more default roles +using `DEFAULT ROLE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole.None +version: 1.0 + +[ClickHouse] SHALL support specifying no default roles +using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.DefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support specifying all roles to be used as default +using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying settings and profile +using `SETTINGS` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which the user +will be created using `ON CLUSTER` clause in the `CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `CREATE USER` statement. + +```sql +CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] + [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...]] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.User.Alter +version: 1.0 + +[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation +version: 1.0 + +[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right +where things defined on the right override anything that was previously defined on +the left. + +##### RQ.SRS-006.RBAC.User.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement +to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist. If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist. + +##### RQ.SRS-006.RBAC.User.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support specifying the cluster the user is on +when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support specifying a new name for the user when +altering user account using `RENAME` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.PlainText +version: 1.0 + +[ClickHouse] SHALL support specifying plaintext password when altering +user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or +using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying SHA256 +to some password as identification when altering user account using +`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password +version: 1.0 + +[ClickHouse] SHALL support specifying the result of applying Double SHA1 +to some password as identification when altering user account using +`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.AddDrop +version: 1.0 + +[ClickHouse] SHALL support altering user by adding and dropping access to hosts with the `ADD HOST` or the `DROP HOST`in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Local +version: 1.0 + +[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Name +version: 1.0 + +[ClickHouse] SHALL support specifying one or more hostnames from +which user can access the server using the `HOST NAME` clause +in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Regexp +version: 1.0 + +[ClickHouse] SHALL support specifying one or more regular expressions +to match hostnames from which user can access the server +using the `HOST REGEXP` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.IP +version: 1.0 + +[ClickHouse] SHALL support specifying one or more IP address or subnet from +which user can access the server using the `HOST IP` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Like +version: 1.0 + +[ClickHouse] SHALL support specifying sone or more similar hosts using `LIKE` command syntax using the `HOST LIKE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Host.Any +version: 1.0 + +[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement +to indicate that user can access the server from any host. + +##### RQ.SRS-006.RBAC.User.Alter.Host.None +version: 1.0 + +[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the +`ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole +version: 1.0 + +[ClickHouse] SHALL support specifying one or more default roles +using `DEFAULT ROLE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support specifying all roles to be used as default +using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support specifying one or more roles which will not be used as default +using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying one or more variables +using `SETTINGS` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Min +version: 1.0 + +[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement. + + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Max +version: 1.0 + +[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Settings.Profile +version: 1.0 + +[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement. + +##### RQ.SRS-006.RBAC.User.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER USER` statement. + +```sql +ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] + [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.SetDefaultRole +version: 1.0 + +[ClickHouse] SHALL support setting or changing granted roles to default for one or more +users using `SET DEFAULT ROLE` statement which +SHALL permanently change the default roles for the user or users if successful. + +##### RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser +version: 1.0 + +[ClickHouse] SHALL support setting or changing granted roles to default for +the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.All +version: 1.0 + +[ClickHouse] SHALL support setting or changing all granted roles to default +for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support setting or changing all granted roles except those specified +to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.None +version: 1.0 + +[ClickHouse] SHALL support removing all granted roles from default +for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement. + +##### RQ.SRS-006.RBAC.SetDefaultRole.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `SET DEFAULT ROLE` statement. + +```sql +SET DEFAULT ROLE + {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} + TO {user|CURRENT_USER} [,...] + +``` + +##### RQ.SRS-006.RBAC.SetRole +version: 1.0 + +[ClickHouse] SHALL support activating role or roles for the current user +using `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.Default +version: 1.0 + +[ClickHouse] SHALL support activating default roles for the current user +using `DEFAULT` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.None +version: 1.0 + +[ClickHouse] SHALL support activating no roles for the current user +using `NONE` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.All +version: 1.0 + +[ClickHouse] SHALL support activating all roles for the current user +using `ALL` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.AllExcept +version: 1.0 + +[ClickHouse] SHALL support activating all roles except those specified +for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement. + +##### RQ.SRS-006.RBAC.SetRole.Syntax +version: 1.0 + +```sql +SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]} +``` + +##### RQ.SRS-006.RBAC.User.ShowCreateUser +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object +using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument. + +##### RQ.SRS-006.RBAC.User.ShowCreateUser.For +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object +using the `FOR` clause in the `SHOW CREATE USER` statement. + +##### RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax +version: 1.0 + +[ClickHouse] SHALL support showing the following syntax for `SHOW CREATE USER` statement. + +```sql +SHOW CREATE USER [name | CURRENT_USER] +``` + +##### RQ.SRS-006.RBAC.User.Drop +version: 1.0 + +[ClickHouse] SHALL support removing a user account using `DROP USER` statement. + +##### RQ.SRS-006.RBAC.User.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP USER` statement +to skip raising an exception if the user account does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a user does not exist. + +##### RQ.SRS-006.RBAC.User.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement +to specify the name of the cluster the user should be dropped from. + +##### RQ.SRS-006.RBAC.User.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `DROP USER` statement + +```sql +DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.Role.Create +version: 1.0 + +[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROLE` statement +to raising an exception if a role with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be +raised if a role with the same **name** already exists. + +##### RQ.SRS-006.RBAC.Role.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement +to replace existing role if it already exists. + +##### RQ.SRS-006.RBAC.Role.Create.Settings +version: 1.0 + +[ClickHouse] SHALL support specifying settings and profile using `SETTINGS` +clause in the `CREATE ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE ROLE` statement + +``` sql +CREATE ROLE [IF NOT EXISTS | OR REPLACE] name + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.Role.Create.Effect +version: 1.0 + +[ClickHouse] SHALL make the role available to be linked with users, privileges, quotas and +settings profiles after the successful execution of the `CREATE ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Alter +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception +will be thrown if the role does not exist. + +##### RQ.SRS-006.RBAC.Role.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the +cluster location of the specified role. + +##### RQ.SRS-006.RBAC.Role.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the +role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the +`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change. + +##### RQ.SRS-006.RBAC.Role.Alter.Settings +version: 1.0 + +[ClickHouse] SHALL support altering the settings of one **role** using `ALTER ROLE role SETTINGS ...` statement. +Altering variable values, creating max and min values, specifying readonly or writable, and specifying the +profiles for which this alter change shall be applied to, are all supported, using the following syntax. + +```sql +[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +One or more variables and profiles may be specified as shown above. + +##### RQ.SRS-006.RBAC.Role.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL alter the abilities granted by the role +from all the users to which the role was assigned after the successful execution +of the `ALTER ROLE` statement. Operations in progress SHALL be allowed to complete as is, but any new operation that requires the privileges that not otherwise granted to the user SHALL fail. + +##### RQ.SRS-006.RBAC.Role.Alter.Syntax +version: 1.0 + +```sql +ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] +``` + +##### RQ.SRS-006.RBAC.Role.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement. + +##### RQ.SRS-006.RBAC.Role.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP ROLE` statement +to skip raising an exception if the role does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a role does not exist. + +##### RQ.SRS-006.RBAC.Role.Drop.Cluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role. + +##### RQ.SRS-006.RBAC.Role.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL remove the abilities granted by the role +from all the users to which the role was assigned after the successful execution +of the `DROP ROLE` statement. Operations in progress SHALL be allowed to complete +but any new operation that requires the privileges that not otherwise granted to +the user SHALL fail. + +##### RQ.SRS-006.RBAC.Role.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP ROLE` statement + +``` sql +DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.Role.ShowCreate +version: 1.0 + +[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE` +statement. + +##### RQ.SRS-006.RBAC.Role.ShowCreate.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `SHOW CREATE ROLE` command. + +```sql +SHOW CREATE ROLE name +``` + +##### RQ.SRS-006.RBAC.Grant.Privilege.To +version: 1.0 + +[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause +in the `GRANT PRIVILEGE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.To.Effect +version: 1.0 + +[ClickHouse] SHALL grant privileges to any set of users and/or roles specified in the `TO` clause of the grant statement. +Any new operation by one of the specified users or roles with the granted privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser +version: 1.0 + +[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause +in the `GRANT PRIVILEGE` statement. + + +##### RQ.SRS-006.RBAC.Grant.Privilege.Select +version: 1.0 + +[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles +for a database or a table using the `GRANT SELECT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Select.Effect +version: 1.0 + +[ClickHouse] SHALL add the **select** privilege to the specified users or roles +after the successful execution of the `GRANT SELECT` statement. +Any new operation by a user or a user that has the specified role +which requires the **select** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns +version: 1.0 + +[ClickHouse] SHALL support granting the **select columns** privilege to one or more users or roles +for a database or a table using the `GRANT SELECT(columns)` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns.Effect +version: 1.0 + +[ClickHouse] SHALL add the **select columns** privilege to the specified users or roles +after the successful execution of the `GRANT SELECT(columns)` statement. +Any new operation by a user or a user that has the specified role +which requires the **select columns** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Insert +version: 1.0 + +[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles +for a database or a table using the `GRANT INSERT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Insert.Effect +version: 1.0 + +[ClickHouse] SHALL add the **insert** privilege to the specified users or roles +after the successful execution of the `GRANT INSERT` statement. +Any new operation by a user or a user that has the specified role +which requires the **insert** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Alter +version: 1.0 + +[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles +for a database or a table using the `GRANT ALTER` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL add the **alter** privilege to the specified users or roles +after the successful execution of the `GRANT ALTER` statement. +Any new operation by a user or a user that has the specified role +which requires the **alter** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Create +version: 1.0 + +[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles +for a database or a table using the `GRANT CREATE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Create.Effect +version: 1.0 + +[ClickHouse] SHALL add the **create** privilege to the specified users or roles +after the successful execution of the `GRANT CREATE` statement. +Any new operation by a user or a user that has the specified role +which requires the **create** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Drop +version: 1.0 + +[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles +for a database or a table using the `GRANT DROP` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL add the **drop** privilege to the specified users or roles +after the successful execution of the `GRANT DROP` statement. +Any new operation by a user or a user that has the specified role +which requires the **drop** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Truncate +version: 1.0 + +[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles +for a database or a table using `GRANT TRUNCATE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Truncate.Effect +version: 1.0 + +[ClickHouse] SHALL add the **truncate** privilege to the specified users or roles +after the successful execution of the `GRANT TRUNCATE` statement. +Any new operation by a user or a user that has the specified role +which requires the **truncate** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Optimize +version: 1.0 + +[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles +for a database or a table using `GRANT OPTIMIZE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Optimize.Effect +version: 1.0 + +[ClickHouse] SHALL add the **optimize** privilege to the specified users or roles +after the successful execution of the `GRANT OPTIMIZE` statement. +Any new operation by a user or a user that has the specified role +which requires the **optimize** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Show +version: 1.0 + +[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles +for a database or a table using `GRANT SHOW` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Show.Effect +version: 1.0 + +[ClickHouse] SHALL add the **show** privilege to the specified users or roles +after the successful execution of the `GRANT SHOW` statement. +Any new operation by a user or a user that has the specified role +which requires the **show** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.KillQuery +version: 1.0 + +[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles +for a database or a table using `GRANT KILL QUERY` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.KillQuery.Effect +version: 1.0 + +[ClickHouse] SHALL add the **kill query** privilege to the specified users or roles +after the successful execution of the `GRANT KILL QUERY` statement. +Any new operation by a user or a user that has the specified role +which requires the **kill query** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement +version: 1.0 + +[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles +for a database or a table using `GRANT ACCESS MANAGEMENT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement.Effect +version: 1.0 + +[ClickHouse] SHALL add the **access management** privileges to the specified users or roles +after the successful execution of the `GRANT ACCESS MANAGEMENT` statement. +Any new operation by a user or a user that has the specified role +which requires the **access management** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.System +version: 1.0 + +[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles +for a database or a table using `GRANT SYSTEM` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.System.Effect +version: 1.0 + +[ClickHouse] SHALL add the **system** privileges to the specified users or roles +after the successful execution of the `GRANT SYSTEM` statement. +Any new operation by a user or a user that has the specified role +which requires the **system** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Introspection +version: 1.0 + +[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles +for a database or a table using `GRANT INTROSPECTION` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Introspection.Effect +version: 1.0 + +[ClickHouse] SHALL add the **introspection** privileges to the specified users or roles +after the successful execution of the `GRANT INTROSPECTION` statement. +Any new operation by a user or a user that has the specified role +which requires the **introspection** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Sources +version: 1.0 + +[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles +for a database or a table using `GRANT SOURCES` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Sources.Effect +version: 1.0 + +[ClickHouse] SHALL add the **sources** privileges to the specified users or roles +after the successful execution of the `GRANT SOURCES` statement. +Any new operation by a user or a user that has the specified role +which requires the **sources** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.DictGet +version: 1.0 + +[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles +for a database or a table using `GRANT dictGet` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.DictGet.Effect +version: 1.0 + +[ClickHouse] SHALL add the **dictGet** privileges to the specified users or roles +after the successful execution of the `GRANT dictGet` statement. +Any new operation by a user or a user that has the specified role +which requires the **dictGet** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.None +version: 1.0 + +[ClickHouse] SHALL support granting no privileges to one or more users or roles +for a database or a table using `GRANT NONE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.None.Effect +version: 1.0 + +[ClickHouse] SHALL add no privileges to the specified users or roles +after the successful execution of the `GRANT NONE` statement. +Any new operation by a user or a user that has the specified role +which requires no privileges SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.All +version: 1.0 + +[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles +for a database or a table using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements. + +##### RQ.SRS-006.RBAC.Grant.Privilege.All.Effect +version: 1.0 + +[ClickHouse] SHALL add the **all** privileges to the specified users or roles +after the successful execution of the `GRANT ALL` or `GRANT ALL PRIVILEGES` statement. +Any new operation by a user or a user that has the specified role +which requires one or more privileges that are part of the **all** +privileges SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.GrantOption +version: 1.0 + +[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles +for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.GrantOption.Effect +version: 1.0 + +[ClickHouse] SHALL add the **grant option** privilege to the specified users or roles +after the successful execution of the `GRANT` statement with the `WITH GRANT OPTION` clause +for the privilege that was specified in the statement. +Any new `GRANT` statements executed by a user or a user that has the specified role +which requires **grant option** for the privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.On +version: 1.0 + +[ClickHouse] SHALL support the `ON` clause in the `GRANT` privilege statement +which SHALL allow to specify one or more tables to which the privilege SHALL +be granted using the following patterns + +* `*.*` any table in any database +* `database.*` any table in the specified database +* `database.table` specific table in the specified database +* `*` any table in the current database +* `table` specific table in the current database + +##### RQ.SRS-006.RBAC.Grant.Privilege.On.Effect +version: 1.0 + +[ClickHouse] SHALL grant privilege on a table specified in the `ON` clause. +Any new operation by user or role with privilege on the granted table SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns +version: 1.0 + +[ClickHouse] SHALL support granting the privilege **some_privilege** to one or more users or roles +for a database or a table using the `GRANT some_privilege(column)` statement for one column. +Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement. +The privileges will be granted for only the specified columns. + +##### RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns.Effect +version: 1.0 + +[ClickHouse] SHALL grant the privilege **some_privilege** to the specified users or roles +after the successful execution of the `GRANT some_privilege(column)` statement for the specified column. +Granting of the privilege **some_privilege** over multiple columns SHALL happen after the successful +execution of the `GRANT some_privilege(column1, column2...)` statement. +Any new operation by a user or a user that had the specified role +which requires the privilege **some_privilege** over specified columns SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Privilege.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER` +clause in the `GRANT PRIVILEGE` statement. + +##### RQ.SRS-006.RBAC.Grant.Privilege.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `GRANT` statement that +grants explicit privileges to a user or a role. + +```sql +GRANT [ON CLUSTER cluster_name] + privilege {SELECT | SELECT(columns) | INSERT | ALTER | CREATE | DROP | TRUNCATE | OPTIMIZE | SHOW | KILL QUERY | ACCESS MANAGEMENT | SYSTEM | INTROSPECTION | SOURCES | dictGet | NONE |ALL [PRIVILEGES]} [, ...] + ON {*.* | database.* | database.table | * | table} + TO {user | role | CURRENT_USER} [,...] + [WITH GRANT OPTION] +``` + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Cluster +version: 1.0 + +[ClickHouse] SHALL support revoking privileges to one or more users or roles +for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Cluster.Effect +version: 1.0 + +[ClickHouse] SHALL remove some privilege from the specified users or roles +on cluster **cluster_name** after the successful execution of the +`REVOKE ON CLUSTER cluster_name some_privilege` statement. Any new operation by a user or a user +that had the specified role which requires that privilege on cluster **cluster_name** SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Any +version: 1.0 + +[ClickHouse] SHALL support revoking ANY privilege to one or more users or roles +for a database or a table using the `REVOKE some_privilege` statement. +**some_privilege** refers to any Clickhouse defined privilege, whose hierarchy includes +SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT, +SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Any.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **some_privilege** privilege from the specified users or roles +after the successful execution of the `REVOKE some_privilege` statement. +Any new operation by a user or a user that had the specified role +which requires the privilege **some_privilege** SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Select +version: 1.0 + +[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles +for a database or a table using the `REVOKE SELECT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Select.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **select** privilege from the specified users or roles +after the successful execution of the `REVOKE SELECT` statement. +Any new operation by a user or a user that had the specified role +which requires the **select** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Insert +version: 1.0 + +[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles +for a database or a table using the `REVOKE INSERT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Insert.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **insert** privilege from the specified users or roles +after the successful execution of the `REVOKE INSERT` statement. +Any new operation by a user or a user that had the specified role +which requires the **insert** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Alter +version: 1.0 + +[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles +for a database or a table using the `REVOKE ALTER` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **alter** privilege from the specified users or roles +after the successful execution of the `REVOKE ALTER` statement. +Any new operation by a user or a user that had the specified role +which requires the **alter** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Create +version: 1.0 + +[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles +for a database or a table using the `REVOKE CREATE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Create.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **create** privilege from the specified users or roles +after the successful execution of the `REVOKE CREATE` statement. +Any new operation by a user or a user that had the specified role +which requires the **create** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Drop +version: 1.0 + +[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles +for a database or a table using the `REVOKE DROP` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **drop** privilege from the specified users or roles +after the successful execution of the `REVOKE DROP` statement. +Any new operation by a user or a user that had the specified role +which requires the **drop** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Truncate +version: 1.0 + +[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles +for a database or a table using the `REVOKE TRUNCATE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Truncate.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **truncate** privilege from the specified users or roles +after the successful execution of the `REVOKE TRUNCATE` statement. +Any new operation by a user or a user that had the specified role +which requires the **truncate** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Optimize +version: 1.0 + +[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles +for a database or a table using the `REVOKE OPTIMIZE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Optimize.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **optimize** privilege from the specified users or roles +after the successful execution of the `REVOKE OPTMIZE` statement. +Any new operation by a user or a user that had the specified role +which requires the **optimize** privilege SHALL fail if user does not have it otherwise. + + + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Show +version: 1.0 + +[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles +for a database or a table using the `REVOKE SHOW` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Show.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **show** privilege from the specified users or roles +after the successful execution of the `REVOKE SHOW` statement. +Any new operation by a user or a user that had the specified role +which requires the **show** privilege SHALL fail if user does not have it otherwise. + + + +##### RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery +version: 1.0 + +[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles +for a database or a table using the `REVOKE KILL QUERY` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **kill query** privilege from the specified users or roles +after the successful execution of the `REVOKE KILL QUERY` statement. +Any new operation by a user or a user that had the specified role +which requires the **kill query** privilege SHALL fail if user does not have it otherwise. + + +##### RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement +version: 1.0 + +[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles +for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **access management** privilege from the specified users or roles +after the successful execution of the `REVOKE ACCESS MANAGEMENT` statement. +Any new operation by a user or a user that had the specified role +which requires the **access management** privilege SHALL fail if user does not have it otherwise. + + +##### RQ.SRS-006.RBAC.Revoke.Privilege.System +version: 1.0 + +[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles +for a database or a table using the `REVOKE SYSTEM` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.System.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **system** privilege from the specified users or roles +after the successful execution of the `REVOKE SYSTEM` statement. +Any new operation by a user or a user that had the specified role +which requires the **system** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Introspection +version: 1.0 + +[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles +for a database or a table using the `REVOKE INTROSPECTION` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Introspection.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **introspection** privilege from the specified users or roles +after the successful execution of the `REVOKE INTROSPECTION` statement. +Any new operation by a user or a user that had the specified role +which requires the **introspection** privilege SHALL fail if user does not have it otherwise. + + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Sources +version: 1.0 + +[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles +for a database or a table using the `REVOKE SOURCES` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Sources.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **sources** privilege from the specified users or roles +after the successful execution of the `REVOKE SOURCES` statement. +Any new operation by a user or a user that had the specified role +which requires the **sources** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.DictGet +version: 1.0 + +[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles +for a database or a table using the `REVOKE dictGet` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.DictGet.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **dictGet** privilege from the specified users or roles +after the successful execution of the `REVOKE dictGet` statement. +Any new operation by a user or a user that had the specified role +which requires the **dictGet** privilege SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns +version: 1.0 + +[ClickHouse] SHALL support revoking the privilege **some_privilege** to one or more users or roles +for a database or a table using the `REVOKE some_privilege(column)` statement for one column. +Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement. +The privileges will be revoked for only the specified columns. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns.Effect +version: 1.0 + +[ClickHouse] SHALL remove the privilege **some_privilege** from the specified users or roles +after the successful execution of the `REVOKE some_privilege(column)` statement for the specified column. +Removal of the privilege **some_privilege** over multiple columns SHALL happen after the successful +execution of the `REVOKE some_privilege(column1, column2...)` statement. +Any new operation by a user or a user that had the specified role +which requires the privilege **some_privilege** over specified SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Multiple +version: 1.0 + +[ClickHouse] SHALL support revoking MULTIPLE **privileges** to one or more users or roles +for a database or a table using the `REVOKE privilege1, privilege2...` statement. +**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes +SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT, +SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Multiple.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **privileges** from the specified users or roles +after the successful execution of the `REVOKE privilege1, privilege2...` statement. +Any new operation by a user or a user that had the specified role +which requires any of the **privileges** SHALL fail if user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.All +version: 1.0 + +[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles +for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.All.Effect +version: 1.0 + +[ClickHouse] SHALL remove **all** privileges from the specified users or roles +after the successful execution of the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statement. +Any new operation by a user or a user that had the specified role +which requires one or more privileges that are part of **all** +privileges SHALL fail. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.None +version: 1.0 + +[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles +for a database or a table using the `REVOKE NONE` statement. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.None.Effect +version: 1.0 + +[ClickHouse] SHALL remove **no** privileges from the specified users or roles +after the successful execution of the `REVOKE NONE` statement. +Any new operation by a user or a user that had the specified role +shall have the same effect after this command as it did before this command. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.On +version: 1.0 + +[ClickHouse] SHALL support the `ON` clause in the `REVOKE` privilege statement +which SHALL allow to specify one or more tables to which the privilege SHALL +be revoked using the following patterns + +* `db.table` specific table in the specified database +* `db.*` any table in the specified database +* `*.*` any table in any database +* `table` specific table in the current database +* `*` any table in the current database + +##### RQ.SRS-006.RBAC.Revoke.Privilege.On.Effect +version: 1.0 + +[ClickHouse] SHALL remove the specificed priviliges from the specified one or more tables +indicated with the `ON` clause in the `REVOKE` privilege statement. +The tables will be indicated using the following patterns + +* `db.table` specific table in the specified database +* `db.*` any table in the specified database +* `*.*` any table in any database +* `table` specific table in the current database +* `*` any table in the current database + +Any new operation by a user or a user that had the specified role +which requires one or more privileges on the revoked tables SHALL fail. + +##### RQ.SRS-006.RBAC.Revoke.Privilege.From +version: 1.0 + +[ClickHouse] SHALL support the `FROM` clause in the `REVOKE` privilege statement +which SHALL allow to specify one or more users to which the privilege SHALL +be revoked using the following patterns + +* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user +* `ALL` all users +* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern + +##### RQ.SRS-006.RBAC.Revoke.Privilege.From.Effect +version: 1.0 + +[ClickHouse] SHALL remove **priviliges** to any set of users specified in the `FROM` clause +in the `REVOKE` privilege statement. The details of the removed **privileges** will be specified +in the other clauses. Any new operation by one of the specified users whose **privileges** have been +revoked SHALL fail. The patterns that expand the `FROM` clause are listed below + +* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user +* `ALL` all users +* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern + +##### RQ.SRS-006.RBAC.Revoke.Privilege.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `REVOKE` statement that +revokes explicit privileges of a user or a role. + +```sql +REVOKE [ON CLUSTER cluster_name] privilege + [(column_name [,...])] [,...] + ON {db.table|db.*|*.*|table|*} + FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...] +``` + + + +##### RQ.SRS-006.RBAC.PartialRevoke.Syntax +version: 1.0 + +[ClickHouse] SHALL support partial revokes by using `partial_revokes` variable +that can be set or unset using the following syntax. + +To disable partial revokes the `partial_revokes` variable SHALL be set to `0` + +```sql +SET partial_revokes = 0 +``` + +To enable partial revokes the `partial revokes` variable SHALL be set to `1` + +```sql +SET partial_revokes = 1 +``` + +##### RQ.SRS-006.RBAC.PartialRevoke.Effect +version: 1.0 + +FIXME: Need to be defined. + +##### RQ.SRS-006.RBAC.Grant.Role +version: 1.0 + +[ClickHouse] SHALL support granting one or more roles to +one or more users or roles using the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.Effect +version: 1.0 + +[ClickHouse] SHALL add all the privileges that are assigned to the role +which is granted to the user or the role to which `GRANT` role statement is applied. +Any new operation that requires the privileges included in the role +SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Role.CurrentUser +version: 1.0 + +[ClickHouse] SHALL support granting one or more roles to current user using +`TO CURRENT_USER` clause in the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.CurrentUser.Effect +version: 1.0 + +[ClickHouse] SHALL add all the privileges that are assigned to the role +which is granted to the current user via the `GRANT` statement. Any new operation that +requires the privileges included in the role SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Role.AdminOption +version: 1.0 + +[ClickHouse] SHALL support granting `admin option` privilege +to one or more users or roles using the `WITH ADMIN OPTION` clause +in the `GRANT` role statement. + +##### RQ.SRS-006.RBAC.Grant.Role.AdminOption.Effect +version: 1.0 + +[ClickHouse] SHALL add the **admin option** privilege to the specified users or roles +after the successful execution of the `GRANT` role statement with the `WITH ADMIN OPTION` clause. +Any new **system queries** statements executed by a user or a user that has the specified role +which requires the **admin option** privilege SHALL succeed. + +##### RQ.SRS-006.RBAC.Grant.Role.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles +using `ON CLUSTER` clause in the `GRANT` statement. + +##### RQ.SRS-006.RBAC.Grant.Role.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `GRANT` role statement + +``` sql +GRANT + ON CLUSTER cluster_name + role [, role ...] + TO {user | role | CURRENT_USER} [,...] + [WITH ADMIN OPTION] +``` + +##### RQ.SRS-006.RBAC.Revoke.Role +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +one or more users or roles using the `REVOKE` role statement. + +##### RQ.SRS-006.RBAC.Revoke.Role.Effect +version: 1.0 + +[ClickHouse] SHALL remove all the privileges that are assigned to the role +that is being revoked from the user or the role to which the `REVOKE` role statement is applied. +Any new operation, by the user or users that have the role which included the role being revoked, +that requires the privileges included in the role SHALL fail if the user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Role.Keywords +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`, +and `CURRENT_USER` keywords. + +##### RQ.SRS-006.RBAC.Revoke.Role.Keywords.Effect +version: 1.0 + +[ClickHouse] SHALL remove all the privileges that are assigned to the role +that is being revoked from the user or the role to which the `REVOKE` role statement with the specified keywords is applied. +Any new operation, by the user or users that have the role which included the role being revoked, +that requires the privileges included in the role SHALL fail if the user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.Role.Cluster +version: 1.0 + +[ClickHouse] SHALL support revoking one or more roles from +one or more users or roles from one or more clusters +using the `REVOKE ON CLUSTER` role statement. + +##### RQ.SRS-006.RBAC.Revoke.Role.Cluster.Effect +version: 1.0 + +[ClickHouse] SHALL remove all the privileges that are assigned to the role +that is being revoked from the user or the role from the cluster(s) +to which the `REVOKE ON CLUSTER` role statement is applied. +Any new operation, by the user or users that have the role which included the role being revoked, +that requires the privileges included in the role SHALL fail if the user does not have it otherwise. + +##### RQ.SRS-006.RBAC.Revoke.AdminOption +version: 1.0 + +[ClickHouse] SHALL support revoking `admin option` privilege +in one or more users or roles using the `ADMIN OPTION FOR` clause +in the `REVOKE` role statement. + +##### RQ.SRS-006.RBAC.Revoke.AdminOption.Effect +version: 1.0 + +[ClickHouse] SHALL remove the **admin option** privilege from the specified users or roles +after the successful execution of the `REVOKE` role statement with the `ADMIN OPTION FOR` clause. +Any new **system queries** statements executed by a user or a user that has the specified role +which requires the **admin option** privilege SHALL fail. + +##### RQ.SRS-006.RBAC.Revoke.Role.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `REVOKE` role statement + +```sql +REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] + role [,...] + FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] +``` + +##### RQ.SRS-006.RBAC.Show.Grants +version: 1.0 + +[ClickHouse] SHALL support listing all the privileges granted to current user and role +using the `SHOW GRANTS` statement. + +##### RQ.SRS-006.RBAC.Show.Grants.For +version: 1.0 + +[ClickHouse] SHALL support listing all the privileges granted to a user or a role +using the `FOR` clause in the `SHOW GRANTS` statement. + +##### RQ.SRS-006.RBAC.Show.Grants.Syntax +version: 1.0 + +[Clickhouse] SHALL use the following syntax for the `SHOW GRANTS` statement + +``` sql +SHOW GRANTS [FOR user_or_role] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Create +version: 1.0 + +[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Effect +version: 1.0 + +[ClickHouse] SHALL use new profile after the `CREATE SETTINGS PROFILE` statement +is successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE SETTINGS PROFILE` statement +to skip raising an exception if a settings profile with the same **name** already exists. +If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a settings profile with the same **name** already exists. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement +to replace existing settings profile if it already exists. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables +version: 1.0 + +[ClickHouse] SHALL support assigning values and constraints to one or more +variables in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value +version: 1.0 + +[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value.Effect +version: 1.0 + +[ClickHouse] SHALL use new variable values after `CREATE SETTINGS PROFILE` statement is +successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE` +constraints for the variables in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints.Effect +version: 1.0 + +[ClickHouse] SHALL use new variable constraints after `CREATE SETTINGS PROFILE` statement is +successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to one or more users +or roles in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to no users or roles using +`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning settings profile to all current users and roles +using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment to one or more users or roles using +the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit +version: 1.0 + +[ClickHouse] SHALL support inheriting profile settings from indicated profile using +the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying what cluster to create settings profile on +using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE SETTINGS PROFILE` statement. + +``` sql +CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name + [ON CLUSTER cluster_name] + [SET varname [= value] [MIN min] [MAX max] [READONLY|WRITABLE] | [INHERIT 'profile_name'] [,...]] + [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter +version: 1.0 + +[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL use the updated settings profile after `ALTER SETTINGS PROFILE` +is successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned or SHALL raise an exception if the settings profile does not exist. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER SETTINGS PROFILE` statement +to not raise exception if a settings profile does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a settings profile does not exist. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause +in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables +version: 1.0 + +[ClickHouse] SHALL support altering values and constraints of one or more +variables in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value +version: 1.0 + +[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value.Effect +version: 1.0 + +[ClickHouse] SHALL use the new value of the variable after `ALTER SETTINGS PROFILE` +is successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints +version: 1.0 + +[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE` +constraints for the variables in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints.Effect +version: 1.0 + +[ClickHouse] SHALL use new constraints after `ALTER SETTINGS PROFILE` +is successfully executed for any new operations performed by all the users and roles to which +the settings profile is assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to one or more users +or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Effect +version: 1.0 + +[ClickHouse] SHALL unset all the variables and constraints that were defined in the settings profile +in all users and roles to which the settings profile was previously assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to no users or roles using the +`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning settings profile to all current users and roles +using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment to one or more users or roles using +the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit +version: 1.0 + +[ClickHouse] SHALL support altering the settings profile by inheriting settings from +specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster +version: 1.0 + +[ClickHouse] SHALL support altering the settings profile on a specified cluster using +`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER SETTINGS PROFILE` statement. + +``` sql +ALTER SETTINGS PROFILE [IF EXISTS] name + [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] + [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]} +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL unset all the variables and constraints that were defined in the settings profile +in all the users and roles to which the settings profile was assigned. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP SETTINGS PROFILE` statement +to skip raising an exception if the settings profile does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if a settings profile does not exist. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using +`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement. + +##### RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP SETTINGS PROFILE` statement + +``` sql +DROP SETTINGS PROFILE [IF EXISTS] name [,name,...] +``` + +##### RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE SETTINGS PROFILE` statement used to create the settings profile +using the `SHOW CREATE SETTINGS PROFILE` statement with the following syntax + +``` sql +SHOW CREATE SETTINGS PROFILE name +``` + +##### RQ.SRS-006.RBAC.Quota.Create +version: 1.0 + +[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Effect +version: 1.0 + +[ClickHouse] SHALL use new limits specified by the quota after the `CREATE QUOTA` statement +is successfully executed for any new operations performed by all the users and roles to which +the quota is assigned. + +##### RQ.SRS-006.RBAC.Quota.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE QUOTA` statement +to skip raising an exception if a quota with the same **name** already exists. +If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a quota with the same **name** already exists. + +##### RQ.SRS-006.RBAC.Quota.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement +to replace existing quota if it already exists. + +##### RQ.SRS-006.RBAC.Quota.Create.Cluster +version: 1.0 + +[ClickHouse] SHALL support creating quotas on a specific cluster with the +`ON CLUSTER` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Interval +version: 1.0 + +[ClickHouse] SHALL support defining the quota interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR INTERVAL` clause in the `CREATE QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number +to define the interval. + + +##### RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized +version: 1.0 + +[ClickHouse] SHALL support defining the quota randomized interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR RANDOMIZED INTERVAL` clause in the `CREATE QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some +real number to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Create.Queries +version: 1.0 + +[ClickHouse] SHALL support limiting number of requests over a period of time +using the `QUERIES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Errors +version: 1.0 + +[ClickHouse] SHALL support limiting number of queries that threw an exception +using the `ERRORS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ResultRows +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of rows given as the result +using the `RESULT ROWS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ReadRows +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of source rows read from tables +for running the query on all remote servers +using the `READ ROWS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result +using the `RESULT BYTES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support limiting the total number of source bytes read from tables +for running the query on all remote servers +using the `READ BYTES` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.NoLimits +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `NO LIMITS` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.TrackingOnly +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.KeyedBy +version: 1.0 + +[ClickHouse] SHALL support to track quota for some key +following the `KEYED BY` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions +version: 1.0 + +[ClickHouse] SHALL support to track quota separately for some parameter +using the `KEYED BY 'parameter'` clause in the `CREATE QUOTA` statement. + +'parameter' can be one of: +`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}` + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning quota to one or more users +or roles using the `TO` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning quota to no users or roles using +`TO NONE` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning quota to all current users and roles +using `TO ALL` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Assignment.Except +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using +the `EXCEPT` clause in the `CREATE QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CREATE QUOTA` statement + +```sql +CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | + NO LIMITS | TRACKING ONLY} [,...]] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.Quota.Alter +version: 1.0 + +[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL use new limits specified by the updated quota after the `ALTER QUOTA` statement +is successfully executed for any new operations performed by all the users and roles to which +the quota is assigned. + +##### RQ.SRS-006.RBAC.Quota.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER QUOTA` statement +to skip raising an exception if a quota does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be raised if +a quota does not exist. + +##### RQ.SRS-006.RBAC.Quota.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement +to rename the quota to the specified name. + +##### RQ.SRS-006.RBAC.Quota.Alter.Cluster +version: 1.0 + +[ClickHouse] SHALL support altering quotas on a specific cluster with the +`ON CLUSTER` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Interval +version: 1.0 + +[ClickHouse] SHALL support redefining the quota interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR INTERVAL` clause in the `ALTER QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number +to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized +version: 1.0 + +[ClickHouse] SHALL support redefining the quota randomized interval that specifies +a period of time over for which the quota SHALL apply using the +`FOR RANDOMIZED INTERVAL` clause in the `ALTER QUOTA` statement. + +This statement SHALL also support a number and a time period which will be one +of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be: + +`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some +real number to define the interval. + +##### RQ.SRS-006.RBAC.Quota.Alter.Queries +version: 1.0 + +[ClickHouse] SHALL support altering the limit of number of requests over a period of time +using the `QUERIES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Errors +version: 1.0 + +[ClickHouse] SHALL support altering the limit of number of queries that threw an exception +using the `ERRORS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ResultRows +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of rows given as the result +using the `RESULT ROWS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ReadRows +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables +for running the query on all remote servers +using the `READ ROWS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.ALter.ResultBytes +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result +using the `RESULT BYTES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ReadBytes +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables +for running the query on all remote servers +using the `READ BYTES` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime +version: 1.0 + +[ClickHouse] SHALL support altering the limit of the maximum query execution time +using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.NoLimits +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `NO LIMITS` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly +version: 1.0 + +[ClickHouse] SHALL support limiting the maximum query execution time +using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.KeyedBy +version: 1.0 + +[ClickHouse] SHALL support altering quota to track quota separately for some key +following the `KEYED BY` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions +version: 1.0 + +[ClickHouse] SHALL support altering quota to track quota separately for some parameter +using the `KEYED BY 'parameter'` clause in the `ALTER QUOTA` statement. + +'parameter' can be one of: +`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}` + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to one or more users +or roles using the `TO` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to no users or roles using +`TO NONE` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning quota to all current users and roles +using `TO ALL` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using +the `EXCEPT` clause in the `ALTER QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER QUOTA` statement + +``` sql +ALTER QUOTA [IF EXIST] name + {{{QUERIES | ERRORS | RESULT ROWS | READ ROWS | RESULT BYTES | READ BYTES | EXECUTION TIME} number} [, ...] FOR INTERVAL number time_unit} [, ...] + [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY] + [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]] +``` + +##### RQ.SRS-006.RBAC.Quota.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement. + +##### RQ.SRS-006.RBAC.Quota.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL unset all the limits that were defined in the quota +in all the users and roles to which the quota was assigned. + +##### RQ.SRS-006.RBAC.Quota.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP QUOTA` statement +to skip raising an exception when the quota does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if the quota does not exist. + +##### RQ.SRS-006.RBAC.Quota.Drop.Cluster +version: 1.0 + +[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement +to indicate the cluster the quota to be dropped is located on. + +##### RQ.SRS-006.RBAC.Quota.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP QUOTA` statement + +``` sql +DROP QUOTA [IF EXISTS] name [,name...] +``` + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas +version: 1.0 + +[ClickHouse] SHALL support showing all of the current quotas +using the `SHOW QUOTAS` statement with the following syntax + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile +version: 1.0 + +[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal. + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Format +version: 1.0 + +[ClickHouse] SHALL support the `FORMAT` clause in the `SHOW QUOTAS` statement to define a format for the output quota list. + +The types of valid formats are many, listed in output column: +https://clickhouse.tech/docs/en/interfaces/formats/ + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings +version: 1.0 + +[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas. + + +##### RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax +version: 1.0 + +[ClickHouse] SHALL support using the `SHOW QUOTAS` statement +with the following syntax +``` sql +SHOW QUOTAS +``` +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the quota with some given name +using the `SHOW CREATE QUOTA` statement with the following syntax + +``` sql +SHOW CREATE QUOTA name +``` + +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota +using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form +`SHOW CREATE QUOTA` + +##### RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax when +using the `SHOW CREATE QUOTA` statement. + +```sql +SHOW CREATE QUOTA [name | CURRENT] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Create +version: 1.0 + +[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Effect +version: 1.0 + +[ClickHouse] SHALL use the new row policy to control access to the specified table +after the `CREATE ROW POLICY` statement is successfully executed +for any new operations on the table performed by all the users and roles to which +the row policy is assigned. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists +version: 1.0 + +[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROW POLICY` statement +to skip raising an exception if a row policy with the same **name** already exists. +If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if +a row policy with the same **name** already exists. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Replace +version: 1.0 + +[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement +to replace existing row policy if it already exists. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to create the role policy +using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.On +version: 1.0 + +[ClickHouse] SHALL support specifying table on which to create the role policy +using the `ON` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access +version: 1.0 + +[ClickHouse] SHALL support allowing or restricting access to rows using the +`AS` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive +version: 1.0 + +[ClickHouse] SHALL support allowing access to rows using the +`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive +version: 1.0 + +[ClickHouse] SHALL support restricting access to rows using the +`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect +version: 1.0 + +[ClickHouse] SHALL support specifying which rows are affected +using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement. +REQUIRES CONFIRMATION + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Condition +version: 1.0 + +[ClickHouse] SHALL support specifying a condition that +that can be any SQL expression which returns a boolean using the `USING` +clause in the `CREATE ROW POLOCY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Condition.Effect +version: 1.0 + +[ClickHouse] SHALL check the condition specified in the row policy using the +`USING` clause in the `CREATE ROW POLICY` statement. The users or roles +to which the row policy is assigned SHALL only see data for which +the condition evaluates to the boolean value of `true`. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to one or more users +or roles using the `TO` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to no users or roles using +the `TO NONE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support assigning row policy to all current users and roles +using `TO ALL` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using +the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Create.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `CRETE ROW POLICY` statement + +``` sql +CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING condition] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Alter +version: 1.0 + +[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Effect +version: 1.0 + +[ClickHouse] SHALL use the updated row policy to control access to the specified table +after the `ALTER ROW POLICY` statement is successfully executed +for any new operations on the table performed by all the users and roles to which +the row policy is assigned. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists +version: 1.0 + +[ClickHouse] SHALL support the `IF EXISTS` clause in the `ALTER ROW POLICY` statement +to skip raising an exception if a row policy does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be raised if +a row policy does not exist. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect +version: 1.0 + +[ClickHouse] SHALL support modifying rows on which to apply the row policy +using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement. +REQUIRES FUNCTION CONFIRMATION. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster +version: 1.0 + +[ClickHouse] SHALL support specifying cluster on which to alter the row policy +using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.On +version: 1.0 + +[ClickHouse] SHALL support specifying table on which to alter the row policy +using the `ON` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Rename +version: 1.0 + +[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause +in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access +version: 1.0 + +[ClickHouse] SHALL support altering access to rows using the +`AS` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive +version: 1.0 + +[ClickHouse] SHALL support permitting access to rows using the +`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive +version: 1.0 + +[ClickHouse] SHALL support restricting access to rows using the +`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Condition +version: 1.0 + +[ClickHouse] SHALL support re-specifying the row policy condition +using the `USING` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.Effect +version: 1.0 + +[ClickHouse] SHALL check the new condition specified for the row policy using the +`USING` clause in the `ALTER ROW POLICY` statement. The users or roles +to which the row policy is assigned SHALL only see data for which +the new condition evaluates to the boolean value of `true`. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None +version: 1.0 + +[ClickHouse] SHALL support removing the row policy condition +using the `USING NONE` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to one or more users +or roles using the `TO` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to no users or roles using +the `TO NONE` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All +version: 1.0 + +[ClickHouse] SHALL support reassigning row policy to all current users and roles +using the `TO ALL` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept +version: 1.0 + +[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using +the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `ALTER ROW POLICY` statement + +``` sql +ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table + [RENAME TO new_name] + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING {condition | NONE}][,...] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.Drop +version: 1.0 + +[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.Effect +version: 1.0 + +[ClickHouse] SHALL remove checking the condition defined in the row policy +in all the users and roles to which the row policy was assigned. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists +version: 1.0 + +[ClickHouse] SHALL support using the `IF EXISTS` clause in the `DROP ROW POLICY` statement +to skip raising an exception when the row policy does not exist. +If the `IF EXISTS` clause is not specified then an exception SHALL be +raised if the row policy does not exist. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.On +version: 1.0 + +[ClickHouse] SHALL support removing row policy from one or more specified tables +using the `ON` clause in the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster +version: 1.0 + +[ClickHouse] SHALL support removing row policy from specified cluster +using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for the `DROP ROW POLICY` statement. + +``` sql +DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] +``` + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy +version: 1.0 + +[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy +using the `SHOW CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On +version: 1.0 + +[ClickHouse] SHALL support showing statement used to create row policy on specific table +using the `ON` in the `SHOW CREATE ROW POLICY` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `SHOW CREATE ROW POLICY`. + +``` sql +SHOW CREATE [ROW] POLICY name ON [database.]table +``` + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies +version: 1.0 + +[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On +version: 1.0 + +[ClickHouse] SHALL support showing row policies on a specific table +using the `ON` clause in the `SHOW ROW POLICIES` statement. + +##### RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax +version: 1.0 + +[ClickHouse] SHALL support the following syntax for `SHOW ROW POLICIES`. + +```sql +SHOW [ROW] POLICIES [ON [database.]table] +``` + +## References + +* **ClickHouse:** https://clickhouse.tech +* **Gitlab repository:** https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/blob/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md +* **Revision history:** https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/commits/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md +* **Git:** https://git-scm.com/ +* **MySQL:** https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html +* **PostgreSQL:** https://www.postgresql.org/docs/12/user-manag.html + +[ClickHouse]: https://clickhouse.tech +[Gitlab repository]: https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/blob/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md +[Revision history]: https://gitlab.com/altinity-qa/documents/qa-srs006-clickhouse-role-based-access-control/commits/master/QA_SRS006_ClickHouse_Role_Based_Access_Control.md +[Git]: https://git-scm.com/ +[MySQL]: https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html +[PostgreSQL]: https://www.postgresql.org/docs/12/user-manag.html diff --git a/tests/testflows/rbac/requirements/requirements.py b/tests/testflows/rbac/requirements/requirements.py new file mode 100644 index 00000000000..02cb53bf64c --- /dev/null +++ b/tests/testflows/rbac/requirements/requirements.py @@ -0,0 +1,6194 @@ +# These requirements were auto generated +# from software requirements specification (SRS) +# document by TestFlows v1.6.200723.1011705. +# Do not edit by hand but re-generate instead +# using 'tfs requirements generate' command. +from testflows.core import Requirement + +RQ_SRS_006_RBAC = Requirement( + name='RQ.SRS-006.RBAC', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support role based access control.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Login = Requirement( + name='RQ.SRS-006.RBAC.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL only allow access to the server for a given\n' + 'user only when correct username and password are used during\n' + 'the connection to the server.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Login_DefaultUser = Requirement( + name='RQ.SRS-006.RBAC.Login.DefaultUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the **default user** when no username and password\n' + 'are specified during the connection to the server.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User = Requirement( + name='RQ.SRS-006.RBAC.User', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creation and manipulation of\n' + 'one or more **user** accounts to which roles, privileges,\n' + 'settings profile, quotas and row policies can be assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Roles = Requirement( + name='RQ.SRS-006.RBAC.User.Roles', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **roles**\n' + 'to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Privileges = Requirement( + name='RQ.SRS-006.RBAC.User.Privileges', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more privileges to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Variables = Requirement( + name='RQ.SRS-006.RBAC.User.Variables', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more variables to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Variables_Constraints = Requirement( + name='RQ.SRS-006.RBAC.User.Variables.Constraints', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' + 'for the variables that can be set and read by the **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_SettingsProfile = Requirement( + name='RQ.SRS-006.RBAC.User.SettingsProfile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' + 'to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Quotas = Requirement( + name='RQ.SRS-006.RBAC.User.Quotas', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **quotas** to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_RowPolicies = Requirement( + name='RQ.SRS-006.RBAC.User.RowPolicies', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **row policies** to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_AccountLock = Requirement( + name='RQ.SRS-006.RBAC.User.AccountLock', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support locking and unlocking of **user** accounts.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_AccountLock_DenyAccess = Requirement( + name='RQ.SRS-006.RBAC.User.AccountLock.DenyAccess', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL deny access to the user whose account is locked.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_DefaultRole = Requirement( + name='RQ.SRS-006.RBAC.User.DefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning a default role to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_RoleSelection = Requirement( + name='RQ.SRS-006.RBAC.User.RoleSelection', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support selection of one or more **roles** from the available roles\n' + 'that are assigned to a **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_ShowCreate = Requirement( + name='RQ.SRS-006.RBAC.User.ShowCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the command of how **user** account was created.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_ShowPrivileges = Requirement( + name='RQ.SRS-006.RBAC.User.ShowPrivileges', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support listing the privileges of the **user**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role = Requirement( + name='RQ.SRS-006.RBAC.Role', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClikHouse] SHALL support creation and manipulation of **roles**\n' + 'to which privileges, settings profile, quotas and row policies can be\n' + 'assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Privileges = Requirement( + name='RQ.SRS-006.RBAC.Role.Privileges', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more privileges to a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Variables = Requirement( + name='RQ.SRS-006.RBAC.Role.Variables', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more variables to a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_SettingsProfile = Requirement( + name='RQ.SRS-006.RBAC.Role.SettingsProfile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' + 'to a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Quotas = Requirement( + name='RQ.SRS-006.RBAC.Role.Quotas', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **quotas** to a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_RowPolicies = Requirement( + name='RQ.SRS-006.RBAC.Role.RowPolicies', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning one or more **row policies** to a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Usage = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Usage', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **usage** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Select = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Select', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **select** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_SelectColumns = Requirement( + name='RQ.SRS-006.RBAC.Privileges.SelectColumns', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **select columns** privilege\n' + 'for a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Insert = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Insert', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **insert** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Delete = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Delete', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **delete** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Alter = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **alter** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Create = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **create** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_Drop = Requirement( + name='RQ.SRS-006.RBAC.Privileges.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **drop** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_All = Requirement( + name='RQ.SRS-006.RBAC.Privileges.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL include in the **all** privilege the same rights\n' + 'as provided by **usage**, **select**, **select columns**,\n' + '**insert**, **delete**, **alter**, **create**, and **drop** privileges.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_All_GrantRevoke = Requirement( + name='RQ.SRS-006.RBAC.Privileges.All.GrantRevoke', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **all** privileges\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_GrantOption = Requirement( + name='RQ.SRS-006.RBAC.Privileges.GrantOption', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **grant option** privilege\n' + 'for a database or a specific table to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Privileges_AdminOption = Requirement( + name='RQ.SRS-006.RBAC.Privileges.AdminOption', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting or revoking **admin option** privilege\n' + 'to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Insert = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Insert', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `INSERT INTO` statements\n' + 'to be executed unless the user has the **insert** privilege for the destination table\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Select = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Select', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `SELECT` statements\n' + 'to be executed unless the user has the **select** or **select columns** privilege\n' + 'for the destination table either because of the explicit grant\n' + 'or through one of the roles assigned to the user.\n' + 'If the the user only has the **select columns**\n' + 'privilege then only the specified columns SHALL be available for reading.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Create = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `CREATE` statements\n' + 'to be executed unless the user has the **create** privilege for the destination database\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Alter = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `ALTER` statements\n' + 'to be executed unless the user has the **alter** privilege for the destination table\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Drop = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `DROP` statements\n' + 'to be executed unless the user has the **drop** privilege for the destination database\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Drop_Table = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Drop.Table', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `DROP TABLE` statements\n' + 'to be executed unless the user has the **drop** privilege for the destination database or the table\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_GrantRevoke = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.GrantRevoke', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any `GRANT` or `REVOKE` statements\n' + 'to be executed unless the user has the **grant option** privilege\n' + 'for the privilege of the destination table\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Use = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Use', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow the `USE` statement to be executed\n' + 'unless the user has at least one of the privileges for the database\n' + 'or the table inside that database\n' + 'either because of the explicit grant or through one of the roles assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RequiredPrivileges_Admin = Requirement( + name='RQ.SRS-006.RBAC.RequiredPrivileges.Admin', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL not allow any of the following statements\n' + '\n' + '* `SYSTEM`\n' + '* `SHOW`\n' + '* `ATTACH`\n' + '* `CHECK TABLE`\n' + '* `DESCRIBE TABLE`\n' + '* `DETACH`\n' + '* `EXISTS`\n' + '* `KILL QUERY`\n' + '* `KILL MUTATION`\n' + '* `OPTIMIZE`\n' + '* `RENAME`\n' + '* `TRUNCATE`\n' + '\n' + 'to be executed unless the user has the **admin option** privilege\n' + 'through one of the roles with **admin option** privilege assigned to the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_PartialRevokes = Requirement( + name='RQ.SRS-006.RBAC.PartialRevokes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support partial revoking of privileges granted\n' + 'to a **user** or a **role**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creation and manipulation of **settings profiles**\n' + 'that can include value definition for one or more variables and can\n' + 'can be assigned to one or more **users** or **roles**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Constraints = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Constraints', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' + 'for the variables specified in the **settings profile**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_ShowCreate = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the command of how **setting profile** was created.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas = Requirement( + name='RQ.SRS-006.RBAC.Quotas', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creation and manipulation of **quotas**\n' + 'that can be used to limit resource usage by a **user** or a **role**\n' + 'over a period of time.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_Keyed = Requirement( + name='RQ.SRS-006.RBAC.Quotas.Keyed', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating **quotas** that are keyed\n' + 'so that a quota is tracked separately for each key value.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_Queries = Requirement( + name='RQ.SRS-006.RBAC.Quotas.Queries', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_Errors = Requirement( + name='RQ.SRS-006.RBAC.Quotas.Errors', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ResultRows = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ResultRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **result rows** quota to limit the\n' + 'the total number of rows given as the result.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ReadRows = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ReadRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **read rows** quota to limit the total\n' + 'number of source rows read from tables for running the query on all remote servers.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ResultBytes = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ResultBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **result bytes** quota to limit the total number\n' + 'of bytes that can be returned as the result.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ReadBytes = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ReadBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **read bytes** quota to limit the total number\n' + 'of source bytes read from tables for running the query on all remote servers.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ExecutionTime = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ExecutionTime', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting **execution time** quota to limit the maximum\n' + 'query execution time.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quotas_ShowCreate = Requirement( + name='RQ.SRS-006.RBAC.Quotas.ShowCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the command of how **quota** was created.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creation and manipulation of table **row policies**\n' + 'that can be used to limit access to the table contents for a **user** or a **role**\n' + 'using a specified **condition**.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Condition = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Condition', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support row policy **conditions** that can be any SQL\n' + 'expression that returns a boolean.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowCreate = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the command of how **row policy** was created.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Use_DefaultRole = Requirement( + name='RQ.SRS-006.RBAC.User.Use.DefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL by default use default role or roles assigned\n' + 'to the user if specified.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Use_AllRolesWhenNoDefaultRole = Requirement( + name='RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL by default use all the roles assigned to the user\n' + 'if no default role or roles are specified for the user.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create = Requirement( + name='RQ.SRS-006.RBAC.User.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_IfNotExists = Requirement( + name='RQ.SRS-006.RBAC.User.Create.IfNotExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE USER` statement\n' + 'to skip raising an exception if a user with the same **name** already exists.\n' + 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a user with the same **name** already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Replace = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Replace', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement\n' + 'to replace existing user account if already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_NoPassword = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying no password when creating\n' + 'user account using `IDENTIFIED WITH NO_PASSWORD` clause .\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_NoPassword_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use no password for the user when connecting to the server\n' + 'when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_PlainText = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.PlainText', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying plaintext password when creating\n' + 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_PlainText_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server\n' + 'when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause\n' + 'and compare the password with the one used in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_Sha256Password = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying SHA256\n' + 'to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY`\n' + 'clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_Sha256Password_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server\n' + "when an account was created with `IDENTIFIED WITH SHA256_PASSWORD` or with 'IDENTIFIED BY' clause\n" + 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying SHA256\n' + 'to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH`\n' + 'clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL calculate `SHA256` of the already calculated hash passed by\n' + 'the user when connecting to the server\n' + 'when an account was created with `IDENTIFIED WITH SHA256_HASH` clause\n' + 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' + 'to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD`\n' + 'clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL calculate `SHA1` two times over the password passed by\n' + 'the user when connecting to the server\n' + 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause\n' + 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' + 'to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH`\n' + 'clause.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash_Login = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL calculate `SHA1` two times over the hash passed by\n' + 'the user when connecting to the server\n' + 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause\n' + 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Name = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Name', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more hostnames from\n' + 'which user can access the server using the `HOST NAME` clause\n' + 'in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Regexp = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Regexp', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more regular expressions\n' + 'to match hostnames from which user can access the server\n' + 'using the `HOST REGEXP` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_IP = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.IP', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' + 'which user can access the server using the `HOST IP` clause in the\n' + '`CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Any = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Any', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement\n' + 'to indicate that user can access the server from any host.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_None = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' + '`CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Local = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Local', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' + '`CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Like = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Like', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the\n' + '`HOST LIKE` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Host_Default = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Host.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support user access to server from any host\n' + 'if no `HOST` clause is specified in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_DefaultRole = Requirement( + name='RQ.SRS-006.RBAC.User.Create.DefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more default roles\n' + 'using `DEFAULT ROLE` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_DefaultRole_None = Requirement( + name='RQ.SRS-006.RBAC.User.Create.DefaultRole.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying no default roles\n' + 'using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_DefaultRole_All = Requirement( + name='RQ.SRS-006.RBAC.User.Create.DefaultRole.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying all roles to be used as default\n' + 'using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Settings = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Settings', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying settings and profile\n' + 'using `SETTINGS` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.User.Create.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying cluster on which the user\n' + 'will be created using `ON CLUSTER` clause in the `CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Create_Syntax = Requirement( + name='RQ.SRS-006.RBAC.User.Create.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for `CREATE USER` statement.\n' + '\n' + '```sql\n' + 'CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n' + " [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}]\n" + " [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]\n" + ' [DEFAULT ROLE role [,...]]\n' + " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter = Requirement( + name='RQ.SRS-006.RBAC.User.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_OrderOfEvaluation = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right\n' + 'where things defined on the right override anything that was previously defined on\n' + 'the left.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_IfExists = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement\n' + 'to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist. If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Cluster = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the cluster the user is on\n' + 'when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Rename = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Rename', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying a new name for the user when\n' + 'altering user account using `RENAME` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Password_PlainText = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Password.PlainText', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying plaintext password when altering\n' + 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or\n' + 'using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying SHA256\n' + 'to some password as identification when altering user account using\n' + '`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the result of applying Double SHA1\n' + 'to some password as identification when altering user account using\n' + '`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_AddDrop = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.AddDrop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering user by adding and dropping access to hosts with the `ADD HOST` or the `DROP HOST`in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_Local = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.Local', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' + '`ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_Name = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.Name', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more hostnames from\n' + 'which user can access the server using the `HOST NAME` clause\n' + 'in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_Regexp = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.Regexp', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more regular expressions\n' + 'to match hostnames from which user can access the server\n' + 'using the `HOST REGEXP` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_IP = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.IP', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' + 'which user can access the server using the `HOST IP` clause in the\n' + '`ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_Like = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.Like', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying sone or more similar hosts using `LIKE` command syntax using the `HOST LIKE` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_Any = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.Any', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement\n' + 'to indicate that user can access the server from any host.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Host_None = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Host.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' + '`ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_DefaultRole = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.DefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more default roles\n' + 'using `DEFAULT ROLE` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_DefaultRole_All = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying all roles to be used as default\n' + 'using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more roles which will not be used as default\n' + 'using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Settings = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Settings', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying one or more variables\n' + 'using `SETTINGS` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Settings_Min = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Settings.Min', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Settings_Max = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Settings.Max', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Settings_Profile = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Settings.Profile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Alter_Syntax = Requirement( + name='RQ.SRS-006.RBAC.User.Alter.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `ALTER USER` statement.\n' + '\n' + '```sql\n' + 'ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name]\n' + ' [RENAME TO new_name]\n' + " [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}]\n" + " [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]\n" + ' [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]\n' + " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting or changing granted roles to default for one or more\n' + 'users using `SET DEFAULT ROLE` statement which\n' + 'SHALL permanently change the default roles for the user or users if successful.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting or changing granted roles to default for\n' + 'the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole_All = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting or changing all granted roles to default\n' + 'for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting or changing all granted roles except those specified\n' + 'to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole_None = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing all granted roles from default\n' + 'for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetDefaultRole_Syntax = Requirement( + name='RQ.SRS-006.RBAC.SetDefaultRole.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `SET DEFAULT ROLE` statement.\n' + '\n' + '```sql\n' + 'SET DEFAULT ROLE\n' + ' {NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n' + ' TO {user|CURRENT_USER} [,...]\n' + '\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole = Requirement( + name='RQ.SRS-006.RBAC.SetRole', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support activating role or roles for the current user\n' + 'using `SET ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole_Default = Requirement( + name='RQ.SRS-006.RBAC.SetRole.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support activating default roles for the current user\n' + 'using `DEFAULT` clause in the `SET ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole_None = Requirement( + name='RQ.SRS-006.RBAC.SetRole.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support activating no roles for the current user\n' + 'using `NONE` clause in the `SET ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole_All = Requirement( + name='RQ.SRS-006.RBAC.SetRole.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support activating all roles for the current user\n' + 'using `ALL` clause in the `SET ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.SetRole.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support activating all roles except those specified\n' + 'for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SetRole_Syntax = Requirement( + name='RQ.SRS-006.RBAC.SetRole.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '```sql\n' + 'SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_ShowCreateUser = Requirement( + name='RQ.SRS-006.RBAC.User.ShowCreateUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object\n' + 'using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_ShowCreateUser_For = Requirement( + name='RQ.SRS-006.RBAC.User.ShowCreateUser.For', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object\n' + 'using the `FOR` clause in the `SHOW CREATE USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_ShowCreateUser_Syntax = Requirement( + name='RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the following syntax for `SHOW CREATE USER` statement.\n' + '\n' + '```sql\n' + 'SHOW CREATE USER [name | CURRENT_USER]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Drop = Requirement( + name='RQ.SRS-006.RBAC.User.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing a user account using `DROP USER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Drop_IfExists = Requirement( + name='RQ.SRS-006.RBAC.User.Drop.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP USER` statement\n' + 'to skip raising an exception if the user account does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a user does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Drop_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.User.Drop.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement\n' + 'to specify the name of the cluster the user should be dropped from.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_User_Drop_Syntax = Requirement( + name='RQ.SRS-006.RBAC.User.Drop.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for `DROP USER` statement\n' + '\n' + '```sql\n' + 'DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create = Requirement( + name='RQ.SRS-006.RBAC.Role.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create_IfNotExists = Requirement( + name='RQ.SRS-006.RBAC.Role.Create.IfNotExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROLE` statement\n' + 'to raising an exception if a role with the same **name** already exists.\n' + 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a role with the same **name** already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create_Replace = Requirement( + name='RQ.SRS-006.RBAC.Role.Create.Replace', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement\n' + 'to replace existing role if it already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create_Settings = Requirement( + name='RQ.SRS-006.RBAC.Role.Create.Settings', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying settings and profile using `SETTINGS`\n' + 'clause in the `CREATE ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Role.Create.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `CREATE ROLE` statement\n' + '\n' + '``` sql\n' + 'CREATE ROLE [IF NOT EXISTS | OR REPLACE] name\n' + " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.Role.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL make the role available to be linked with users, privileges, quotas and\n' + 'settings profiles after the successful execution of the `CREATE ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_IfExists = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception\n' + 'will be thrown if the role does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the\n' + 'cluster location of the specified role.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_Rename = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.Rename', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the\n' + 'role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the\n' + '`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_Settings = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.Settings', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the settings of one **role** using `ALTER ROLE role SETTINGS ...` statement.\n' + 'Altering variable values, creating max and min values, specifying readonly or writable, and specifying the\n' + 'profiles for which this alter change shall be applied to, are all supported, using the following syntax.\n' + '\n' + '```sql\n' + "[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" + '```\n' + '\n' + 'One or more variables and profiles may be specified as shown above.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL alter the abilities granted by the role\n' + 'from all the users to which the role was assigned after the successful execution\n' + 'of the `ALTER ROLE` statement. Operations in progress SHALL be allowed to complete as is, but any new operation that requires the privileges that not otherwise granted to the user SHALL fail.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Alter_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Role.Alter.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '```sql\n' + 'ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name]\n' + ' [RENAME TO new_name]\n' + " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Drop = Requirement( + name='RQ.SRS-006.RBAC.Role.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Drop_IfExists = Requirement( + name='RQ.SRS-006.RBAC.Role.Drop.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP ROLE` statement\n' + 'to skip raising an exception if the role does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a role does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Drop_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Role.Drop.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.Role.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the abilities granted by the role\n' + 'from all the users to which the role was assigned after the successful execution\n' + 'of the `DROP ROLE` statement. Operations in progress SHALL be allowed to complete\n' + 'but any new operation that requires the privileges that not otherwise granted to\n' + 'the user SHALL fail.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_Drop_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Role.Drop.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `DROP ROLE` statement\n' + '\n' + '``` sql\n' + 'DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_ShowCreate = Requirement( + name='RQ.SRS-006.RBAC.Role.ShowCreate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE`\n' + 'statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Role_ShowCreate_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Role.ShowCreate.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `SHOW CREATE ROLE` command.\n' + '\n' + '```sql\n' + 'SHOW CREATE ROLE name\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_To = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.To', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause\n' + 'in the `GRANT PRIVILEGE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_To_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.To.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL grant privileges to any set of users and/or roles specified in the `TO` clause of the grant statement.\n' + 'Any new operation by one of the specified users or roles with the granted privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause\n' + 'in the `GRANT PRIVILEGE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Select = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Select', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT SELECT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Select_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Select.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **select** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT SELECT` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **select** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_SelectColumns = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **select columns** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT SELECT(columns)` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_SelectColumns_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.SelectColumns.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **select columns** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT SELECT(columns)` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **select columns** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Insert = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Insert', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT INSERT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Insert_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Insert.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **insert** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT INSERT` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **insert** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Alter = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT ALTER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **alter** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT ALTER` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **alter** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Create = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT CREATE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **create** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT CREATE` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **create** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Drop = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles\n' + 'for a database or a table using the `GRANT DROP` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **drop** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT DROP` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **drop** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Truncate = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Truncate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles\n' + 'for a database or a table using `GRANT TRUNCATE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Truncate_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Truncate.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **truncate** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT TRUNCATE` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **truncate** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Optimize = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Optimize', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles\n' + 'for a database or a table using `GRANT OPTIMIZE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Optimize_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Optimize.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **optimize** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT OPTIMIZE` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **optimize** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Show = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Show', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles\n' + 'for a database or a table using `GRANT SHOW` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Show_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Show.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **show** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT SHOW` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **show** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_KillQuery = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.KillQuery', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles\n' + 'for a database or a table using `GRANT KILL QUERY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_KillQuery_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.KillQuery.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **kill query** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT KILL QUERY` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **kill query** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles\n' + 'for a database or a table using `GRANT ACCESS MANAGEMENT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **access management** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT ACCESS MANAGEMENT` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **access management** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_System = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.System', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles\n' + 'for a database or a table using `GRANT SYSTEM` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_System_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.System.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **system** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT SYSTEM` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **system** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Introspection = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Introspection', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles\n' + 'for a database or a table using `GRANT INTROSPECTION` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Introspection_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Introspection.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **introspection** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT INTROSPECTION` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **introspection** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Sources = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Sources', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles\n' + 'for a database or a table using `GRANT SOURCES` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Sources_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Sources.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **sources** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT SOURCES` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **sources** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_DictGet = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.DictGet', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles\n' + 'for a database or a table using `GRANT dictGet` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_DictGet_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.DictGet.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **dictGet** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT dictGet` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires the **dictGet** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_None = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting no privileges to one or more users or roles\n' + 'for a database or a table using `GRANT NONE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_None_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.None.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add no privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT NONE` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires no privileges SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_All = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles\n' + 'for a database or a table using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_All_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.All.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **all** privileges to the specified users or roles\n' + 'after the successful execution of the `GRANT ALL` or `GRANT ALL PRIVILEGES` statement.\n' + 'Any new operation by a user or a user that has the specified role\n' + 'which requires one or more privileges that are part of the **all**\n' + 'privileges SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_GrantOption = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.GrantOption', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles\n' + 'for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_GrantOption_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.GrantOption.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **grant option** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT` statement with the `WITH GRANT OPTION` clause\n' + 'for the privilege that was specified in the statement.\n' + 'Any new `GRANT` statements executed by a user or a user that has the specified role\n' + 'which requires **grant option** for the privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_On = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `ON` clause in the `GRANT` privilege statement\n' + 'which SHALL allow to specify one or more tables to which the privilege SHALL\n' + 'be granted using the following patterns\n' + '\n' + '* `*.*` any table in any database\n' + '* `database.*` any table in the specified database\n' + '* `database.table` specific table in the specified database\n' + '* `*` any table in the current database\n' + '* `table` specific table in the current database\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_On_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.On.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL grant privilege on a table specified in the `ON` clause.\n' + 'Any new operation by user or role with privilege on the granted table SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_PrivilegeColumns = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting the privilege **some_privilege** to one or more users or roles\n' + 'for a database or a table using the `GRANT some_privilege(column)` statement for one column.\n' + 'Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement.\n' + 'The privileges will be granted for only the specified columns.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_PrivilegeColumns_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL grant the privilege **some_privilege** to the specified users or roles\n' + 'after the successful execution of the `GRANT some_privilege(column)` statement for the specified column.\n' + 'Granting of the privilege **some_privilege** over multiple columns SHALL happen after the successful\n' + 'execution of the `GRANT some_privilege(column1, column2...)` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the privilege **some_privilege** over specified columns SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER`\n' + 'clause in the `GRANT PRIVILEGE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Privilege_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Grant.Privilege.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `GRANT` statement that\n' + 'grants explicit privileges to a user or a role.\n' + '\n' + '```sql\n' + 'GRANT [ON CLUSTER cluster_name]\n' + ' privilege {SELECT | SELECT(columns) | INSERT | ALTER | CREATE | DROP | TRUNCATE | OPTIMIZE | SHOW | KILL QUERY | ACCESS MANAGEMENT | SYSTEM | INTROSPECTION | SOURCES | dictGet | NONE |ALL \t[PRIVILEGES]} [, ...]\n' + ' ON {*.* | database.* | database.table | * | table}\n' + ' TO {user | role | CURRENT_USER} [,...]\n' + ' [WITH GRANT OPTION]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking privileges to one or more users or roles\n' + 'for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Cluster_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Cluster.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove some privilege from the specified users or roles\n' + 'on cluster **cluster_name** after the successful execution of the\n' + '`REVOKE ON CLUSTER cluster_name some_privilege` statement. Any new operation by a user or a user\n' + 'that had the specified role which requires that privilege on cluster **cluster_name** SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Any = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Any', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking ANY privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE some_privilege` statement.\n' + '**some_privilege** refers to any Clickhouse defined privilege, whose hierarchy includes\n' + 'SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n' + 'SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Any_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Any.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **some_privilege** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE some_privilege` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the privilege **some_privilege** SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Select = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Select', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE SELECT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Select_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Select.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **select** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE SELECT` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **select** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Insert = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Insert', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE INSERT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Insert_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Insert.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **insert** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE INSERT` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **insert** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Alter = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE ALTER` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **alter** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE ALTER` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **alter** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Create = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE CREATE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **create** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE CREATE` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **create** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Drop = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE DROP` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **drop** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE DROP` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **drop** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Truncate = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Truncate', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE TRUNCATE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Truncate_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Truncate.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **truncate** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE TRUNCATE` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **truncate** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Optimize = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Optimize', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE OPTIMIZE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Optimize_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Optimize.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **optimize** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE OPTMIZE` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **optimize** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Show = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Show', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE SHOW` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Show_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Show.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **show** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE SHOW` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **show** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE KILL QUERY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **kill query** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE KILL QUERY` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **kill query** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **access management** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE ACCESS MANAGEMENT` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **access management** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_System = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.System', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE SYSTEM` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_System_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.System.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **system** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE SYSTEM` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **system** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Introspection = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Introspection', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE INTROSPECTION` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Introspection_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Introspection.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **introspection** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE INTROSPECTION` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **introspection** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Sources = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Sources', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE SOURCES` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Sources_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Sources.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **sources** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE SOURCES` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **sources** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_DictGet = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.DictGet', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles\n' + 'for a database or a table using the `REVOKE dictGet` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_DictGet_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.DictGet.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **dictGet** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE dictGet` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the **dictGet** privilege SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_PrivelegeColumns = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking the privilege **some_privilege** to one or more users or roles\n' + 'for a database or a table using the `REVOKE some_privilege(column)` statement for one column.\n' + 'Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement.\n' + 'The privileges will be revoked for only the specified columns.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_PrivelegeColumns_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.PrivelegeColumns.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the privilege **some_privilege** from the specified users or roles\n' + 'after the successful execution of the `REVOKE some_privilege(column)` statement for the specified column.\n' + 'Removal of the privilege **some_privilege** over multiple columns SHALL happen after the successful\n' + 'execution of the `REVOKE some_privilege(column1, column2...)` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires the privilege **some_privilege** over specified SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Multiple = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Multiple', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking MULTIPLE **privileges** to one or more users or roles\n' + 'for a database or a table using the `REVOKE privilege1, privilege2...` statement.\n' + '**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes\n' + 'SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n' + 'SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Multiple_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Multiple.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **privileges** from the specified users or roles\n' + 'after the successful execution of the `REVOKE privilege1, privilege2...` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires any of the **privileges** SHALL fail if user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_All = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles\n' + 'for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_All_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.All.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove **all** privileges from the specified users or roles\n' + 'after the successful execution of the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires one or more privileges that are part of **all**\n' + 'privileges SHALL fail.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_None = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles\n' + 'for a database or a table using the `REVOKE NONE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_None_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.None.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove **no** privileges from the specified users or roles\n' + 'after the successful execution of the `REVOKE NONE` statement.\n' + 'Any new operation by a user or a user that had the specified role\n' + 'shall have the same effect after this command as it did before this command.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_On = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `ON` clause in the `REVOKE` privilege statement\n' + 'which SHALL allow to specify one or more tables to which the privilege SHALL\n' + 'be revoked using the following patterns\n' + '\n' + '* `db.table` specific table in the specified database\n' + '* `db.*` any table in the specified database\n' + '* `*.*` any table in any database\n' + '* `table` specific table in the current database\n' + '* `*` any table in the current database\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_On_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.On.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the specificed priviliges from the specified one or more tables\n' + 'indicated with the `ON` clause in the `REVOKE` privilege statement.\n' + 'The tables will be indicated using the following patterns\n' + '\n' + '* `db.table` specific table in the specified database\n' + '* `db.*` any table in the specified database\n' + '* `*.*` any table in any database\n' + '* `table` specific table in the current database\n' + '* `*` any table in the current database\n' + '\n' + 'Any new operation by a user or a user that had the specified role\n' + 'which requires one or more privileges on the revoked tables SHALL fail.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_From = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.From', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `FROM` clause in the `REVOKE` privilege statement\n' + 'which SHALL allow to specify one or more users to which the privilege SHALL\n' + 'be revoked using the following patterns\n' + '\n' + '* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user\n' + '* `ALL` all users\n' + '* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_From_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.From.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove **priviliges** to any set of users specified in the `FROM` clause\n' + 'in the `REVOKE` privilege statement. The details of the removed **privileges** will be specified\n' + 'in the other clauses. Any new operation by one of the specified users whose **privileges** have been\n' + 'revoked SHALL fail. The patterns that expand the `FROM` clause are listed below\n' + '\n' + '* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user\n' + '* `ALL` all users\n' + '* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Privilege_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Privilege.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `REVOKE` statement that\n' + 'revokes explicit privileges of a user or a role.\n' + '\n' + '```sql\n' + 'REVOKE [ON CLUSTER cluster_name] privilege\n' + ' [(column_name [,...])] [,...]\n' + ' ON {db.table|db.*|*.*|table|*}\n' + ' FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...]\n' + '```\n' + '\n' + '\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_PartialRevoke_Syntax = Requirement( + name='RQ.SRS-006.RBAC.PartialRevoke.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support partial revokes by using `partial_revokes` variable\n' + 'that can be set or unset using the following syntax.\n' + '\n' + 'To disable partial revokes the `partial_revokes` variable SHALL be set to `0`\n' + '\n' + '```sql\n' + 'SET partial_revokes = 0\n' + '```\n' + '\n' + 'To enable partial revokes the `partial revokes` variable SHALL be set to `1`\n' + '\n' + '```sql\n' + 'SET partial_revokes = 1\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_PartialRevoke_Effect = Requirement( + name='RQ.SRS-006.RBAC.PartialRevoke.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + 'FIXME: Need to be defined.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting one or more roles to\n' + 'one or more users or roles using the `GRANT` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add all the privileges that are assigned to the role\n' + 'which is granted to the user or the role to which `GRANT` role statement is applied.\n' + 'Any new operation that requires the privileges included in the role\n' + 'SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_CurrentUser = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.CurrentUser', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting one or more roles to current user using\n' + '`TO CURRENT_USER` clause in the `GRANT` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_CurrentUser_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.CurrentUser.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add all the privileges that are assigned to the role\n' + 'which is granted to the current user via the `GRANT` statement. Any new operation that\n' + 'requires the privileges included in the role SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_AdminOption = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.AdminOption', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support granting `admin option` privilege\n' + 'to one or more users or roles using the `WITH ADMIN OPTION` clause\n' + 'in the `GRANT` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_AdminOption_Effect = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.AdminOption.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL add the **admin option** privilege to the specified users or roles\n' + 'after the successful execution of the `GRANT` role statement with the `WITH ADMIN OPTION` clause.\n' + 'Any new **system queries** statements executed by a user or a user that has the specified role\n' + 'which requires the **admin option** privilege SHALL succeed.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles\n' + 'using `ON CLUSTER` clause in the `GRANT` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Grant_Role_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Grant.Role.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for `GRANT` role statement\n' + '\n' + '``` sql\n' + 'GRANT\n' + ' ON CLUSTER cluster_name\n' + ' role [, role ...]\n' + ' TO {user | role | CURRENT_USER} [,...]\n' + ' [WITH ADMIN OPTION]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking one or more roles from\n' + 'one or more users or roles using the `REVOKE` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove all the privileges that are assigned to the role\n' + 'that is being revoked from the user or the role to which the `REVOKE` role statement is applied.\n' + 'Any new operation, by the user or users that have the role which included the role being revoked,\n' + 'that requires the privileges included in the role SHALL fail if the user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Keywords = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Keywords', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking one or more roles from\n' + 'special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`,\n' + 'and `CURRENT_USER` keywords.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Keywords_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Keywords.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove all the privileges that are assigned to the role\n' + 'that is being revoked from the user or the role to which the `REVOKE` role statement with the specified keywords is applied.\n' + 'Any new operation, by the user or users that have the role which included the role being revoked,\n' + 'that requires the privileges included in the role SHALL fail if the user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking one or more roles from\n' + 'one or more users or roles from one or more clusters\n' + 'using the `REVOKE ON CLUSTER` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Cluster_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Cluster.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove all the privileges that are assigned to the role\n' + 'that is being revoked from the user or the role from the cluster(s)\n' + 'to which the `REVOKE ON CLUSTER` role statement is applied.\n' + 'Any new operation, by the user or users that have the role which included the role being revoked,\n' + 'that requires the privileges included in the role SHALL fail if the user does not have it otherwise.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_AdminOption = Requirement( + name='RQ.SRS-006.RBAC.Revoke.AdminOption', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support revoking `admin option` privilege\n' + 'in one or more users or roles using the `ADMIN OPTION FOR` clause\n' + 'in the `REVOKE` role statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_AdminOption_Effect = Requirement( + name='RQ.SRS-006.RBAC.Revoke.AdminOption.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove the **admin option** privilege from the specified users or roles\n' + 'after the successful execution of the `REVOKE` role statement with the `ADMIN OPTION FOR` clause.\n' + 'Any new **system queries** statements executed by a user or a user that has the specified role\n' + 'which requires the **admin option** privilege SHALL fail.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Revoke_Role_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Revoke.Role.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `REVOKE` role statement\n' + '\n' + '```sql\n' + 'REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]\n' + ' role [,...]\n' + ' FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Show_Grants = Requirement( + name='RQ.SRS-006.RBAC.Show.Grants', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support listing all the privileges granted to current user and role\n' + 'using the `SHOW GRANTS` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Show_Grants_For = Requirement( + name='RQ.SRS-006.RBAC.Show.Grants.For', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support listing all the privileges granted to a user or a role\n' + 'using the `FOR` clause in the `SHOW GRANTS` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Show_Grants_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Show.Grants.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Clickhouse] SHALL use the following syntax for the `SHOW GRANTS` statement\n' + '\n' + '``` sql\n' + 'SHOW GRANTS [FOR user_or_role]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new profile after the `CREATE SETTINGS PROFILE` statement\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE SETTINGS PROFILE` statement\n' + 'to skip raising an exception if a settings profile with the same **name** already exists.\n' + 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' + 'a settings profile with the same **name** already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Replace = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Replace', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement\n' + 'to replace existing settings profile if it already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Variables = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning values and constraints to one or more\n' + 'variables in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new variable values after `CREATE SETTINGS PROFILE` statement is\n' + 'successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' + 'constraints for the variables in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new variable constraints after `CREATE SETTINGS PROFILE` statement is\n' + 'successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning settings profile to one or more users\n' + 'or roles in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning settings profile to no users or roles using\n' + '`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning settings profile to all current users and roles\n' + 'using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' + 'the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support inheriting profile settings from indicated profile using\n' + 'the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying what cluster to create settings profile on\n' + 'using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Create_Syntax = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `CREATE SETTINGS PROFILE` statement.\n' + '\n' + '``` sql\n' + 'CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name\n' + ' [ON CLUSTER cluster_name]\n' + " [SET varname [= value] [MIN min] [MAX max] [READONLY|WRITABLE] | [INHERIT 'profile_name'] [,...]]\n" + ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the updated settings profile after `ALTER SETTINGS PROFILE`\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned or SHALL raise an exception if the settings profile does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER SETTINGS PROFILE` statement\n' + 'to not raise exception if a settings profile does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a settings profile does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause\n' + 'in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering values and constraints of one or more\n' + 'variables in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the new value of the variable after `ALTER SETTINGS PROFILE`\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' + 'constraints for the variables in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new constraints after `ALTER SETTINGS PROFILE`\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the settings profile is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning settings profile to one or more users\n' + 'or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL unset all the variables and constraints that were defined in the settings profile\n' + 'in all users and roles to which the settings profile was previously assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning settings profile to no users or roles using the\n' + '`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning settings profile to all current users and roles\n' + 'using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' + 'the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the settings profile by inheriting settings from\n' + 'specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the settings profile on a specified cluster using\n' + '`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Alter_Syntax = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `ALTER SETTINGS PROFILE` statement.\n' + '\n' + '``` sql\n' + 'ALTER SETTINGS PROFILE [IF EXISTS] name\n' + ' [ON CLUSTER cluster_name]\n' + ' [RENAME TO new_name]\n' + " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]\n" + ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]}\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Drop = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL unset all the variables and constraints that were defined in the settings profile\n' + 'in all the users and roles to which the settings profile was assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP SETTINGS PROFILE` statement\n' + 'to skip raising an exception if the settings profile does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if a settings profile does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using\n' + '`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_Drop_Syntax = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `DROP SETTINGS PROFILE` statement\n' + '\n' + '``` sql\n' + 'DROP SETTINGS PROFILE [IF EXISTS] name [,name,...]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile = Requirement( + name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE SETTINGS PROFILE` statement used to create the settings profile\n' + 'using the `SHOW CREATE SETTINGS PROFILE` statement with the following syntax\n' + '\n' + '``` sql\n' + 'SHOW CREATE SETTINGS PROFILE name\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new limits specified by the quota after the `CREATE QUOTA` statement\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the quota is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_IfNotExists = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.IfNotExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE QUOTA` statement\n' + 'to skip raising an exception if a quota with the same **name** already exists.\n' + 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' + 'a quota with the same **name** already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Replace = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Replace', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement\n' + 'to replace existing quota if it already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating quotas on a specific cluster with the\n' + '`ON CLUSTER` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Interval = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Interval', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support defining the quota interval that specifies\n' + 'a period of time over for which the quota SHALL apply using the\n' + '`FOR INTERVAL` clause in the `CREATE QUOTA` statement.\n' + '\n' + 'This statement SHALL also support a number and a time period which will be one\n' + 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' + '\n' + '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' + 'to define the interval.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support defining the quota randomized interval that specifies\n' + 'a period of time over for which the quota SHALL apply using the\n' + '`FOR RANDOMIZED INTERVAL` clause in the `CREATE QUOTA` statement.\n' + '\n' + 'This statement SHALL also support a number and a time period which will be one\n' + 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' + '\n' + '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' + 'real number to define the interval.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Queries = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Queries', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting number of requests over a period of time\n' + 'using the `QUERIES` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Errors = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Errors', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting number of queries that threw an exception\n' + 'using the `ERRORS` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_ResultRows = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.ResultRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the total number of rows given as the result\n' + 'using the `RESULT ROWS` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_ReadRows = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.ReadRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the total number of source rows read from tables\n' + 'for running the query on all remote servers\n' + 'using the `READ ROWS` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_ResultBytes = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.ResultBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result\n' + 'using the `RESULT BYTES` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_ReadBytes = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.ReadBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the total number of source bytes read from tables\n' + 'for running the query on all remote servers\n' + 'using the `READ BYTES` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_ExecutionTime = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.ExecutionTime', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the maximum query execution time\n' + 'using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_NoLimits = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.NoLimits', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the maximum query execution time\n' + 'using the `NO LIMITS` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_TrackingOnly = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.TrackingOnly', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the maximum query execution time\n' + 'using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_KeyedBy = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.KeyedBy', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support to track quota for some key\n' + 'following the `KEYED BY` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support to track quota separately for some parameter\n' + "using the `KEYED BY 'parameter'` clause in the `CREATE QUOTA` statement.\n" + '\n' + "'parameter' can be one of:\n" + "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Assignment = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning quota to one or more users\n' + 'or roles using the `TO` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning quota to no users or roles using\n' + '`TO NONE` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning quota to all current users and roles\n' + 'using `TO ALL` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Assignment_Except = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Assignment.Except', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' + 'the `EXCEPT` clause in the `CREATE QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Create_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Quota.Create.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `CREATE QUOTA` statement\n' + '\n' + '```sql\n' + 'CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n' + " [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]\n" + ' [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}\n' + ' {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |\n' + ' NO LIMITS | TRACKING ONLY} [,...]]\n' + ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use new limits specified by the updated quota after the `ALTER QUOTA` statement\n' + 'is successfully executed for any new operations performed by all the users and roles to which\n' + 'the quota is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_IfExists = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER QUOTA` statement\n' + 'to skip raising an exception if a quota does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' + 'a quota does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Rename = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Rename', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement\n' + 'to rename the quota to the specified name.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering quotas on a specific cluster with the\n' + '`ON CLUSTER` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Interval = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Interval', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support redefining the quota interval that specifies\n' + 'a period of time over for which the quota SHALL apply using the\n' + '`FOR INTERVAL` clause in the `ALTER QUOTA` statement.\n' + '\n' + 'This statement SHALL also support a number and a time period which will be one\n' + 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' + '\n' + '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' + 'to define the interval.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support redefining the quota randomized interval that specifies\n' + 'a period of time over for which the quota SHALL apply using the\n' + '`FOR RANDOMIZED INTERVAL` clause in the `ALTER QUOTA` statement.\n' + '\n' + 'This statement SHALL also support a number and a time period which will be one\n' + 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' + '\n' + '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' + 'real number to define the interval.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Queries = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Queries', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of number of requests over a period of time\n' + 'using the `QUERIES` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Errors = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Errors', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of number of queries that threw an exception\n' + 'using the `ERRORS` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_ResultRows = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.ResultRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of the total number of rows given as the result\n' + 'using the `RESULT ROWS` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_ReadRows = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.ReadRows', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables\n' + 'for running the query on all remote servers\n' + 'using the `READ ROWS` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ALter_ResultBytes = Requirement( + name='RQ.SRS-006.RBAC.Quota.ALter.ResultBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result\n' + 'using the `RESULT BYTES` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_ReadBytes = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.ReadBytes', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables\n' + 'for running the query on all remote servers\n' + 'using the `READ BYTES` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering the limit of the maximum query execution time\n' + 'using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_NoLimits = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.NoLimits', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the maximum query execution time\n' + 'using the `NO LIMITS` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support limiting the maximum query execution time\n' + 'using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_KeyedBy = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.KeyedBy', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering quota to track quota separately for some key\n' + 'following the `KEYED BY` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering quota to track quota separately for some parameter\n' + "using the `KEYED BY 'parameter'` clause in the `ALTER QUOTA` statement.\n" + '\n' + "'parameter' can be one of:\n" + "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Assignment = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning quota to one or more users\n' + 'or roles using the `TO` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning quota to no users or roles using\n' + '`TO NONE` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning quota to all current users and roles\n' + 'using `TO ALL` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' + 'the `EXCEPT` clause in the `ALTER QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Alter_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Quota.Alter.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `ALTER QUOTA` statement\n' + '\n' + '``` sql\n' + 'ALTER QUOTA [IF EXIST] name\n' + ' {{{QUERIES | ERRORS | RESULT ROWS | READ ROWS | RESULT BYTES | READ BYTES | EXECUTION TIME} number} [, ...] FOR INTERVAL number time_unit} [, ...]\n' + ' [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY]\n' + ' [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Drop = Requirement( + name='RQ.SRS-006.RBAC.Quota.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.Quota.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL unset all the limits that were defined in the quota\n' + 'in all the users and roles to which the quota was assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Drop_IfExists = Requirement( + name='RQ.SRS-006.RBAC.Quota.Drop.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP QUOTA` statement\n' + 'to skip raising an exception when the quota does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if the quota does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Drop_Cluster = Requirement( + name='RQ.SRS-006.RBAC.Quota.Drop.Cluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement\n' + 'to indicate the cluster the quota to be dropped is located on.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_Drop_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Quota.Drop.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `DROP QUOTA` statement\n' + '\n' + '``` sql\n' + 'DROP QUOTA [IF EXISTS] name [,name...]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowQuotas = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowQuotas', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing all of the current quotas\n' + 'using the `SHOW QUOTAS` statement with the following syntax\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowQuotas_Format = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Format', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `FORMAT` clause in the `SHOW QUOTAS` statement to define a format for the output quota list.\n' + '\n' + 'The types of valid formats are many, listed in output column:\n' + 'https://clickhouse.tech/docs/en/interfaces/formats/\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowQuotas_Settings = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowQuotas_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using the `SHOW QUOTAS` statement\n' + 'with the following syntax\n' + '``` sql\n' + 'SHOW QUOTAS\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the quota with some given name\n' + 'using the `SHOW CREATE QUOTA` statement with the following syntax\n' + '\n' + '``` sql\n' + 'SHOW CREATE QUOTA name\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota\n' + 'using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form\n' + '`SHOW CREATE QUOTA`\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Syntax = Requirement( + name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax when\n' + 'using the `SHOW CREATE QUOTA` statement.\n' + '\n' + '```sql\n' + 'SHOW CREATE QUOTA [name | CURRENT]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Effect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the new row policy to control access to the specified table\n' + 'after the `CREATE ROW POLICY` statement is successfully executed\n' + 'for any new operations on the table performed by all the users and roles to which\n' + 'the row policy is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROW POLICY` statement\n' + 'to skip raising an exception if a row policy with the same **name** already exists.\n' + 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' + 'a row policy with the same **name** already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Replace = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Replace', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement\n' + 'to replace existing row policy if it already exists.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying cluster on which to create the role policy\n' + 'using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_On = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying table on which to create the role policy\n' + 'using the `ON` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Access = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Access', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support allowing or restricting access to rows using the\n' + '`AS` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support allowing access to rows using the\n' + '`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support restricting access to rows using the\n' + '`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying which rows are affected\n' + 'using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement.\n' + 'REQUIRES CONFIRMATION\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Condition = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Condition', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying a condition that\n' + 'that can be any SQL expression which returns a boolean using the `USING`\n' + 'clause in the `CREATE ROW POLOCY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Condition_Effect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Condition.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL check the condition specified in the row policy using the\n' + '`USING` clause in the `CREATE ROW POLICY` statement. The users or roles\n' + 'to which the row policy is assigned SHALL only see data for which\n' + 'the condition evaluates to the boolean value of `true`.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Assignment = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning row policy to one or more users\n' + 'or roles using the `TO` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning row policy to no users or roles using\n' + 'the `TO NONE` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support assigning row policy to all current users and roles\n' + 'using `TO ALL` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' + 'the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Create_Syntax = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Create.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `CRETE ROW POLICY` statement\n' + '\n' + '``` sql\n' + 'CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table\n' + ' [AS {PERMISSIVE | RESTRICTIVE}]\n' + ' [FOR SELECT]\n' + ' [USING condition]\n' + ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Effect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL use the updated row policy to control access to the specified table\n' + 'after the `ALTER ROW POLICY` statement is successfully executed\n' + 'for any new operations on the table performed by all the users and roles to which\n' + 'the row policy is assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the `IF EXISTS` clause in the `ALTER ROW POLICY` statement\n' + 'to skip raising an exception if a row policy does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' + 'a row policy does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support modifying rows on which to apply the row policy\n' + 'using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement.\n' + 'REQUIRES FUNCTION CONFIRMATION.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying cluster on which to alter the row policy\n' + 'using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_On = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support specifying table on which to alter the row policy\n' + 'using the `ON` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Rename = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Rename', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause\n' + 'in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Access = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support altering access to rows using the\n' + '`AS` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support permitting access to rows using the\n' + '`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support restricting access to rows using the\n' + '`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Condition = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support re-specifying the row policy condition\n' + 'using the `USING` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_Effect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL check the new condition specified for the row policy using the\n' + '`USING` clause in the `ALTER ROW POLICY` statement. The users or roles\n' + 'to which the row policy is assigned SHALL only see data for which\n' + 'the new condition evaluates to the boolean value of `true`.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing the row policy condition\n' + 'using the `USING NONE` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning row policy to one or more users\n' + 'or roles using the `TO` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning row policy to no users or roles using\n' + 'the `TO NONE` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support reassigning row policy to all current users and roles\n' + 'using the `TO ALL` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' + 'the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Alter_Syntax = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `ALTER ROW POLICY` statement\n' + '\n' + '``` sql\n' + 'ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table\n' + ' [RENAME TO new_name]\n' + ' [AS {PERMISSIVE | RESTRICTIVE}]\n' + ' [FOR SELECT]\n' + ' [USING {condition | NONE}][,...]\n' + ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop_Effect = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop.Effect', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL remove checking the condition defined in the row policy\n' + 'in all the users and roles to which the row policy was assigned.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support using the `IF EXISTS` clause in the `DROP ROW POLICY` statement\n' + 'to skip raising an exception when the row policy does not exist.\n' + 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' + 'raised if the row policy does not exist.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop_On = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing row policy from one or more specified tables\n' + 'using the `ON` clause in the `DROP ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support removing row policy from specified cluster\n' + 'using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_Drop_Syntax = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for the `DROP ROW POLICY` statement.\n' + '\n' + '``` sql\n' + 'DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy\n' + 'using the `SHOW CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing statement used to create row policy on specific table\n' + 'using the `ON` in the `SHOW CREATE ROW POLICY` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_Syntax = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for `SHOW CREATE ROW POLICY`.\n' + '\n' + '``` sql\n' + 'SHOW CREATE [ROW] POLICY name ON [database.]table\n' + '```\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support showing row policies on a specific table\n' + 'using the `ON` clause in the `SHOW ROW POLICIES` statement.\n' + ), + link=None + ) + +RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_Syntax = Requirement( + name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support the following syntax for `SHOW ROW POLICIES`.\n' + '\n' + '```sql\n' + 'SHOW [ROW] POLICIES [ON [database.]table]\n' + '```\n' + ), + link=None + ) diff --git a/tests/testflows/rbac/tests/errors.py b/tests/testflows/rbac/tests/errors.py new file mode 100755 index 00000000000..749d92e047b --- /dev/null +++ b/tests/testflows/rbac/tests/errors.py @@ -0,0 +1,86 @@ +## Syntax + +# Errors: not found + +not_found = "Exception: There is no {type} `{name}` in [disk, users.xml]" + +def user_not_found_in_disk(name): + return (192,not_found.format(type="user",name=name)) + +def role_not_found_in_disk(name): + return (255,not_found.format(type="role",name=name)) + +def settings_profile_not_found_in_disk(name): + return (180,not_found.format(type="settings profile",name=name)) + +def quota_not_found_in_disk(name): + return (199,not_found.format(type="quota",name=name)) + +def row_policy_not_found_in_disk(name): + return (11,not_found.format(type="row policy",name=name)) + +# Errors: cannot_rename + +cannot_rename = "Exception: {type} `{name}`: cannot rename to `{name_new}` because {type} `{name_new}` already exists in [disk]" +cannot_rename_exitcode = 237 + +def cannot_rename_user(name,name_new): + return (cannot_rename_exitcode, cannot_rename.format(type="user", name=name, name_new=name_new)) + +def cannot_rename_role(name,name_new): + return (cannot_rename_exitcode, cannot_rename.format(type="role", name=name, name_new=name_new)) + +def cannot_rename_settings_profile(name,name_new): + return (cannot_rename_exitcode, cannot_rename.format(type="settings profile", name=name, name_new=name_new)) + +def cannot_rename_quota(name,name_new): + return (cannot_rename_exitcode, cannot_rename.format(type="quota", name=name, name_new=name_new)) + +def cannot_rename_row_policy(name,name_new): + return (cannot_rename_exitcode, cannot_rename.format(type="row policy", name=name, name_new=name_new)) + +# Errors: cannot insert + +cannot_insert = "Exception: {type} `{name}`: cannot insert because {type} `{name}` already exists in [disk]" +cannot_insert_exitcode = 237 + +def cannot_insert_user(name): + return (cannot_insert_exitcode, cannot_insert.format(type="user",name=name)) + +def cannot_insert_role(name): + return (cannot_insert_exitcode, cannot_insert.format(type="role",name=name)) + +def cannot_insert_settings_profile(name): + return (cannot_insert_exitcode, cannot_insert.format(type="settings profile",name=name)) + +def cannot_insert_quota(name): + return (cannot_insert_exitcode, cannot_insert.format(type="quota",name=name)) + +def cannot_insert_row_policy(name): + return (cannot_insert_exitcode, cannot_insert.format(type="row policy",name=name)) + +# Error: default is readonly + +default_readonly_exitcode = 239 +cannot_remove_default = "Exception: Cannot remove {type} `default` from [users.xml] because this storage is readonly" + +def cannot_update_default(): + return (default_readonly_exitcode, "Exception: Cannot update user `default` in [users.xml] because this storage is readonly") + +def cannot_remove_user_default(): + return (default_readonly_exitcode, cannot_remove_default.format(type="user")) + +def cannot_remove_settings_profile_default(): + return (default_readonly_exitcode, cannot_remove_default.format(type="settings profile")) + +def cannot_remove_quota_default(): + return (default_readonly_exitcode, cannot_remove_default.format(type="quota")) + +# Other syntax errors + +def unknown_setting(setting): + return (115, f"Exception: Unknown setting {setting}") + +def cluster_not_found(cluster): + return (170, f"Exception: Requested cluster '{cluster}' not found") + diff --git a/tests/testflows/rbac/tests/syntax/__init__.py b/tests/testflows/rbac/tests/syntax/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/testflows/rbac/tests/syntax/alter_quota.py b/tests/testflows/rbac/tests/syntax/alter_quota.py new file mode 100644 index 00000000000..51f4ca264c0 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/alter_quota.py @@ -0,0 +1,206 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("alter quota") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check alter quota query syntax. + + ```sql + ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | MONTH} + {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | + NO LIMITS | TRACKING ONLY} [,...]] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + ``` + """ + node = self.context.cluster.node(node) + + def cleanup_quota(quota): + with Given(f"I ensure that quota {quota} does not exist"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + try: + with Given("I have a quota, a user, and a role"): + node.query(f"CREATE QUOTA quota0") + node.query(f"CREATE USER user0") + node.query(f"CREATE ROLE role0") + + with Scenario("I alter quota with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter("1.0")]): + with When("I alter quota"): + node.query("ALTER QUOTA quota0") + + with Scenario("I alter quota that does not exist, throws an exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter("1.0")]): + quota = "quota1" + cleanup_quota(quota) + with When(f"I alter quota {quota}, which does not exist"): + exitcode, message = errors.quota_not_found_in_disk(name=quota) + node.query(f"ALTER QUOTA {quota}", exitcode=exitcode, message=message) + del quota + + with Scenario("I alter quota with if exists, quota does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")]): + node.query("ALTER QUOTA IF EXISTS quota0") + + with Scenario("I alter quota with if exists, quota does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")]): + quota = "quota1" + cleanup_quota(quota) + with When(f"I alter quota {quota}, which does not exist, with IF EXISTS"): + node.query(f"ALTER QUOTA IF EXISTS {quota}") + del quota + + with Scenario("I alter quota using rename, target available", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")]): + node.query("ALTER QUOTA quota0 RENAME TO quota0") + + with Scenario("I alter quota using rename, target unavailable", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")]): + new_quota = "quota1" + + try: + with Given(f"Ensure target name {new_quota} is NOT available"): + node.query(f"CREATE QUOTA IF NOT EXISTS {new_quota}") + + with When(f"I try to rename to {new_quota}"): + exitcode, message = errors.cannot_rename_quota(name="quota0", name_new=new_quota) + node.query(f"ALTER QUOTA quota0 RENAME TO {new_quota}", exitcode=exitcode, message=message) + finally: + with Finally(f"I cleanup target name {new_quota}"): + node.query(f"DROP QUOTA IF EXISTS {new_quota}") + + del new_quota + + keys = ['none', 'user name', 'ip address', 'client key', 'client key or user name', 'client key or ip address'] + for key in keys: + with Scenario(f"I alter quota keyed by {key}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_KeyedBy("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions("1.0")]): + with When("I alter quota with a key"): + node.query(f"ALTER QUOTA quota0 KEYED BY '{key}'") + + with Scenario("I alter quota for randomized interval", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized("1.0")]): + with When("I alter quota on a randomized interval"): + node.query("ALTER QUOTA quota0 FOR RANDOMIZED INTERVAL 1 DAY NO LIMITS") + + intervals = ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'MONTH'] + for i, interval in enumerate(intervals): + with Scenario(f"I alter quota for interval {interval}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Interval("1.0")]): + with When(f"I alter quota for {interval}"): + node.query(f"ALTER QUOTA quota0 FOR INTERVAL 1 {interval} NO LIMITS") + + constraints = ['MAX QUERIES', 'MAX ERRORS', 'MAX RESULT ROWS', + 'MAX RESULT BYTES', 'MAX READ ROWS', 'MAX READ BYTES', 'MAX EXECUTION TIME', + 'NO LIMITS', 'TRACKING ONLY'] + for i, constraint in enumerate(constraints): + with Scenario(f"I alter quota for {constraint.lower()}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Queries("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_Errors("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_ResultRows("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_ReadRows("1.0"), + RQ_SRS_006_RBAC_Quota_ALter_ResultBytes("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_ReadBytes("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_NoLimits("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly("1.0")]): + with When("I alter quota for a constraint"): + node.query(f"ALTER QUOTA quota0 FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}") + + with Scenario("I create quota for multiple constraints", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Interval("1.0"), + RQ_SRS_006_RBAC_Quota_Alter_Queries("1.0")]): + node.query("ALTER QUOTA quota0 \ + FOR INTERVAL 1 DAY NO LIMITS, \ + FOR INTERVAL 2 DAY MAX QUERIES 124, \ + FOR INTERVAL 1 MONTH TRACKING ONLY") + + with Scenario("I alter quota to assign to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with When("I alter quota to a role"): + node.query("ALTER QUOTA quota0 TO role0") + + with Scenario("I alter quota to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a quota, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER QUOTA quota0 TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter quota to assign to all except role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a quota, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER QUOTA quota0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter quota to assign to one role and one user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with When("I alter quota to a role and a user"): + node.query("ALTER QUOTA quota0 TO role0, user0") + + with Scenario("I alter quota assigned to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment_None("1.0")]): + with When("I alter quota to none"): + node.query("ALTER QUOTA quota0 TO NONE") + + with Scenario("I alter quota to assign to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment_All("1.0")]): + with When("I alter quota to all"): + node.query("ALTER QUOTA quota0 TO ALL") + + with Scenario("I alter quota to assign to all except one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")]): + with When("I alter quota to all except one role"): + node.query("ALTER QUOTA quota0 TO ALL EXCEPT role0") + + with Scenario("I alter quota to assign to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")]): + with When("I alter quota to all except one multiple roles"): + node.query("ALTER QUOTA quota0 TO ALL EXCEPT role0, user0") + + with Scenario("I alter quota on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")]): + try: + with Given("I have a quota on a cluster"): + node.query("CREATE QUOTA quota1 ON CLUSTER sharded_cluster") + + with When("I run alter quota command on a cluster"): + node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster") + with And("I run alter quota command on a cluster with a key"): + node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster KEYED BY 'none'") + with And("I run alter quota command on a cluster with an interval"): + node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY") + with And("I run alter quota command on a cluster for all"): + node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster TO ALL") + finally: + with Finally("I drop the quota"): + node.query("DROP QUOTA IF EXISTS quota1 ON CLUSTER sharded_cluster") + + with Scenario("I alter quota on nonexistent cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")]): + with When("I run alter quota on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("ALTER QUOTA quota0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + finally: + with Finally("I drop the quota and all the users and roles"): + node.query(f"DROP QUOTA IF EXISTS quota0") + node.query(f"DROP USER IF EXISTS user0") + node.query(f"DROP ROLE IF EXISTS role0") diff --git a/tests/testflows/rbac/tests/syntax/alter_role.py b/tests/testflows/rbac/tests/syntax/alter_role.py new file mode 100644 index 00000000000..839fe57c8df --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/alter_role.py @@ -0,0 +1,196 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("alter role") +def feature(self, node="clickhouse1"): + """Check alter role query syntax. + + ```sql + ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(role, profile=None): + try: + with Given("I have a role"): + node.query(f"CREATE ROLE OR REPLACE {role}") + if profile != None: #create profile when name is given + with Given("And I have a profile"): + node.query(f"CREATE SETTINGS PROFILE OR REPLACE {profile}") + yield + finally: + with Finally("I drop the role"): + node.query(f"DROP ROLE IF EXISTS {role}") + if profile != "": + with Finally("I drop the profile"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + def cleanup_role(role): + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + + with Scenario("I alter role with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter("1.0")]): + with setup("role0"): + with When("I alter role"): + node.query("ALTER ROLE role0") + + with Scenario("I alter role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter("1.0")]): + role = "role0" + cleanup_role(role) + with When(f"I alter role {role} that does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER ROLE {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter role if exists, role does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")]): + with setup("role1"): + with When("I alter role with if exists"): + node.query("ALTER ROLE IF EXISTS role1") + + with Scenario("I alter role if exists, role does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")]): + role = "role0" + cleanup_role(role) + with When(f"I alter role {role} that does not exist"): + node.query(f"ALTER ROLE IF EXISTS {role}") + del role + + with Scenario("I alter role on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")]): + try: + with Given("I have a role on a cluster"): + node.query("CREATE ROLE role1 ON CLUSTER sharded_cluster") + with When("I run alter role on a cluster"): + node.query("ALTER ROLE role1 ON CLUSTER sharded_cluster") + with And("I rename role on a cluster"): + node.query("ALTER ROLE role1 ON CLUSTER sharded_cluster RENAME TO role2") + with And("I alter role with settings on a cluster"): + node.query("ALTER ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY") + finally: + with Finally("I drop the role"): + node.query("DROP ROLE IF EXISTS role1,role2 ON CLUSTER sharded_cluster") + + with Scenario("I alter role on nonexistent cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")]): + with When("I run alter role on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("ALTER ROLE role1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + with Scenario("I alter role to rename, new name is available", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")]): + with setup("role2"): + new_role = "role3" + try: + with Given(f"Ensure target name {new_role} is available"): + node.query(f"DROP ROLE IF EXISTS {new_role}") + with When(f"I try to rename to {new_role}"): + node.query(f"ALTER ROLE role2 RENAME TO {new_role}") + finally: + with Finally(f"I cleanup new name {new_role}"): + node.query(f"DROP ROLE IF EXISTS {new_role}") + del new_role + + with Scenario("I alter role to rename, new name is not available, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")]): + with setup("role2a"): + new_role = "role3a" + try: + with Given(f"Ensure target name {new_role} is NOT available"): + node.query(f"CREATE ROLE IF NOT EXISTS {new_role}") + with When(f"I try to rename to {new_role}"): + exitcode, message = errors.cannot_rename_role(name="role2a", name_new=new_role) + node.query(f"ALTER ROLE role2a RENAME TO {new_role}", exitcode=exitcode, message=message) + finally: + with Finally(f"I cleanup target name {new_role}"): + node.query(f"DROP ROLE IF EXISTS {new_role}") + del new_role + + with Scenario("I alter role settings profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role4"): + with When("I alter role with settings profile"): + node.query("ALTER ROLE role4 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY") + + with Scenario("I alter role settings profile, profile does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role4a"): + with Given("I ensure profile profile0 does not exist"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile0") + with When("I alter role with settings profile that does not exist"): + exitcode, message = errors.settings_profile_not_found_in_disk("profile0") + node.query("ALTER ROLE role4a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + + with Scenario("I alter role settings profile multiple", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role4b", profile="profile0"): + with When("I alter role with multiple profiles"): + node.query("ALTER ROLE role4b SETTINGS PROFILE default, PROFILE profile0, \ + max_memory_usage=10000000 READONLY") + + with Scenario("I alter role settings without profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role5"): + with When("I alter role with settings and no profile"): + node.query("ALTER ROLE role5 SETTINGS max_memory_usage=10000000 READONLY") + + with Scenario("I alter role settings, variable does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role5a"): + with When("I alter role using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query("ALTER ROLE role5a SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + + + with Scenario("I alter role settings without profile multiple", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role6"): + with When("I alter role with multiple settings and no profile"): + node.query("ALTER ROLE role6 SETTINGS max_memory_usage=10000000 READONLY, \ + max_rows_to_read MIN 20 MAX 25") + + with Scenario("I alter role settings with multiple profiles multiple variables", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role7", profile="profile1"): + with When("I alter role with multiple settings and profiles"): + node.query("ALTER ROLE role7 SETTINGS PROFILE default, PROFILE profile1, \ + max_memory_usage=10000000 READONLY, max_rows_to_read MIN 20 MAX 25") + + with Scenario("I alter role settings readonly", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role8"): + with When("I alter role with readonly"): + node.query("ALTER ROLE role8 SETTINGS max_memory_usage READONLY") + + with Scenario("I alter role settings writable", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role9"): + with When("I alter role with writable"): + node.query("ALTER ROLE role9 SETTINGS max_memory_usage WRITABLE") + + with Scenario("I alter role settings min, with and without = sign", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role10"): + with When("I set min, no equals"): + node.query("ALTER ROLE role10 SETTINGS max_memory_usage MIN 200") + with When("I set min, yes equals"): + node.query("ALTER ROLE role10 SETTINGS max_memory_usage MIN = 200") + + with Scenario("I alter role settings max, with and without = sign", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with setup("role11"): + with When("I set max, no equals"): + node.query("ALTER ROLE role11 SETTINGS max_memory_usage MAX 2000") + with When("I set max, yes equals"): + node.query("ALTER ROLE role11 SETTINGS max_memory_usage MAX = 200") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/alter_row_policy.py b/tests/testflows/rbac/tests/syntax/alter_row_policy.py new file mode 100644 index 00000000000..79af04db771 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/alter_row_policy.py @@ -0,0 +1,244 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("alter row policy") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check alter row policy query syntax. + + ```sql + ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table + [RENAME TO new_name] + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING {condition | NONE}][,...] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(policy): + try: + with Given("I have a row policy"): + node.query(f"CREATE ROW POLICY {policy} ON default.foo") + yield + finally: + with Finally("I drop the row policy"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON default.foo") + + def cleanup_policy(policy): + with Given(f"I ensure that policy {policy} does not exist"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON default.foo") + + try: + with Given("I have a table and some roles"): + node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") + node.query(f"CREATE ROLE role0") + node.query(f"CREATE ROLE role1") + + with Scenario("I alter row policy with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy0"): + with When("I alter row policy"): + node.query("ALTER ROW POLICY policy0 ON default.foo") + + with Scenario("I alter row policy using short syntax with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy1"): + with When("I alter row policy short form"): + node.query("ALTER POLICY policy1 ON default.foo") + + with Scenario("I alter row policy, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + policy = "policy2" + cleanup_policy(policy) + with When(f"I alter row policy {policy} that doesn't exist"): + exitcode, message = errors.row_policy_not_found_in_disk(name=f"{policy} ON default.foo") + node.query(f"ALTER ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + del policy + + with Scenario("I alter row policy if exists", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy2"): + with When("I alter row policy using if exists"): + node.query("ALTER ROW POLICY IF EXISTS policy2 ON default.foo") + + with Scenario("I alter row policy if exists, policy does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + policy = "policy2" + cleanup_policy(policy) + with When(f"I alter row policy {policy} that doesn't exist"): + node.query(f"ALTER ROW POLICY IF EXISTS {policy} ON default.foo") + del policy + + with Scenario("I alter row policy to rename, target available", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy3"): + with When("I alter row policy with rename"): + node.query("ALTER ROW POLICY policy3 ON default.foo RENAME TO policy3") + + with Scenario("I alter row policy to rename, target unavailable", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy3"): + new_policy = "policy4" + try: + with Given(f"Ensure target name {new_policy} is NOT available"): + node.query(f"CREATE ROW POLICY IF NOT EXISTS {new_policy} ON default.foo") + with When(f"I try to rename to {new_policy}"): + exitcode, message = errors.cannot_rename_row_policy(name="policy3 ON default.foo", + name_new=f"{new_policy} ON default.foo") + node.query(f"ALTER ROW POLICY policy3 ON default.foo RENAME TO {new_policy}", exitcode=exitcode, message=message) + finally: + with Finally(f"I cleanup target name {new_policy}"): + node.query(f"DROP ROW POLICY IF EXISTS {new_policy} ON default.foo") + del new_policy + + with Scenario("I alter row policy to permissive", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy4"): + with When("I alter row policy as permissive"): + node.query("ALTER ROW POLICY policy4 ON default.foo AS PERMISSIVE") + + with Scenario("I alter row policy to restrictive", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy5"): + with When("I alter row policy as restrictive"): + node.query("ALTER ROW POLICY policy5 ON default.foo AS RESTRICTIVE") + + with Scenario("I alter row policy for select", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy6"): + with When("I alter row policy using for select"): + node.query("ALTER ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10") + + with Scenario("I alter row policy using condition", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Condition("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy6"): + with When("I alter row policy wtih condition"): + node.query("ALTER ROW POLICY policy6 ON default.foo USING x > 10") + + with Scenario("I alter row policy using condition none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy7"): + with When("I alter row policy using no condition"): + node.query("ALTER ROW POLICY policy7 ON default.foo USING NONE") + + with Scenario("I alter row policy to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy8"): + with When("I alter row policy to a role"): + node.query("ALTER ROW POLICY policy8 ON default.foo TO role0") + + with Scenario("I alter row policy to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")]): + role = "role2" + with cleanup("policy8a"): + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a row policy, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter row policy to assign to all excpet role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")]): + role = "role2" + with cleanup("policy8a"): + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a row policy, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter row policy assigned to multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy9"): + with When("I alter row policy to multiple roles"): + node.query("ALTER ROW POLICY policy9 ON default.foo TO role0, role1") + + with Scenario("I alter row policy assigned to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy10"): + with When("I alter row policy to all"): + node.query("ALTER ROW POLICY policy10 ON default.foo TO ALL") + + with Scenario("I alter row policy assigned to all except one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy11"): + with When("I alter row policy to all except"): + node.query("ALTER ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0") + + with Scenario("I alter row policy assigned to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy12"): + with When("I alter row policy to all except multiple roles"): + node.query("ALTER ROW POLICY policy12 ON default.foo TO ALL EXCEPT role0, role1") + + with Scenario("I alter row policy assigned to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with cleanup("policy12"): + with When("I alter row policy to no assignment"): + node.query("ALTER ROW POLICY policy12 ON default.foo TO NONE") + + # Official syntax: ON CLUSTER cluster_name ON database.table + # Working syntax: both orderings of ON CLUSTER and TABLE clauses work + + with Scenario("I alter row policy on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + try: + with Given("I have a row policy"): + node.query("CREATE ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo") + with When("I run alter row policy command"): + node.query("ALTER ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo") + finally: + with Finally("I drop the row policy"): + node.query("DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo") + + with Scenario("I alter row policy on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + with When("I run alter row policy command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("ALTER ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message) + + with Scenario("I alter row policy on cluster after table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + try: + with Given("I have a row policy"): + node.query("CREATE ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster") + with When("I run create row policy command"): + node.query("ALTER ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the row policy"): + node.query("DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the table and the roles"): + node.query(f"DROP TABLE IF EXISTS default.foo") + node.query(f"DROP ROLE IF EXISTS role0, role1") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/alter_settings_profile.py b/tests/testflows/rbac/tests/syntax/alter_settings_profile.py new file mode 100644 index 00000000000..d4ce65da785 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/alter_settings_profile.py @@ -0,0 +1,232 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("alter settings profile") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check alter settings profile query syntax. + + ```sql + ALTER SETTINGS PROFILE [IF EXISTS] name + [ON CLUSTER cluster_name] + [RENAME TO new_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...] + [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]} + ``` + """ + node = self.context.cluster.node(node) + + def cleanup_profile(profile): + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + try: + with Given("I have a profile and some users and roles"): + node.query(f"CREATE SETTINGS PROFILE profile0") + node.query(f"CREATE USER user0") + node.query(f"CREATE ROLE role0") + + with Scenario("I alter settings profile with no options", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + with When("I alter settings profile"): + node.query("ALTER SETTINGS PROFILE profile0") + + with Scenario("I alter settings profile short form", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + with When("I short form alter settings profile"): + node.query("ALTER PROFILE profile0") + + with Scenario("I alter settings profile that does not exist, throws exception", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + profile = "profile1" + + cleanup_profile(profile) + with When(f"I alter settings profile {profile} that doesn't exist"): + exitcode, message = errors.settings_profile_not_found_in_disk(name=profile) + node.query(f"ALTER SETTINGS PROFILE {profile}", exitcode=exitcode, message=message) + del profile + + with Scenario("I alter settings profile if exists", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")]): + with When("I alter settings profile using if exists"): + node.query("ALTER SETTINGS PROFILE IF EXISTS profile0") + + with Scenario("I alter settings profile if exists, profile does not exist", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")]): + profile = "profile1" + + cleanup_profile(profile) + with When(f"I alter settings profile {profile} using if exists"): + node.query(f"ALTER SETTINGS PROFILE IF EXISTS {profile}") + + del profile + + with Scenario("I alter settings profile to rename, target available", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")]): + with When("I alter settings profile by renaming it"): + node.query("ALTER SETTINGS PROFILE profile0 RENAME TO profile0") + + with Scenario("I alter settings profile to rename, target unavailable", flags=TE, requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")]): + new_profile = "profile1" + + try: + with Given(f"Ensure target name {new_profile} is NOT available"): + node.query(f"CREATE SETTINGS PROFILE IF NOT EXISTS {new_profile}") + + with When(f"I try to rename to {new_profile}"): + exitcode, message = errors.cannot_rename_settings_profile(name="profile0", name_new=new_profile) + node.query(f"ALTER SETTINGS PROFILE profile0 RENAME TO {new_profile}", exitcode=exitcode, message=message) + finally: + with Finally(f"I cleanup target name {new_profile}"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {new_profile}") + + del new_profile + + with Scenario("I alter settings profile with a setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + with When("I alter settings profile using settings"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage = 100000001") + + with Scenario("I alter settings profile with a setting value, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + with When("I alter settings profile using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + + with Scenario("I alter settings profile with a min setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with When("I alter settings profile using 2 minimum formats"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001") + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN = 100000001") + + with Scenario("I alter settings profile with a max setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with When("I alter settings profile using 2 maximum formats"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX 100000001") + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX = 100000001") + + with Scenario("I alter settings profile with min and max setting values", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with When("I alter settings profile with both min and max"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001 MAX 200000001") + + with Scenario("I alter settings profile with a readonly setting", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with When("I alter settings profile with with readonly"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage READONLY") + + with Scenario("I alter settings profile with a writable setting", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with When("I alter settings profile with writable"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage WRITABLE") + + with Scenario("I alter settings profile with inherited settings", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0")]): + with When("I alter settings profile with inherit"): + node.query("ALTER SETTINGS PROFILE profile0 SETTINGS INHERIT 'default'") + + with Scenario("I alter settings profile with inherit, parent profile does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0")]): + profile = "profile3" + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + with When("I alter settings profile inherit from nonexistant parent"): + exitcode, message = errors.settings_profile_not_found_in_disk(profile) + node.query(f"ALTER PROFILE profile0 SETTINGS INHERIT {profile}", exitcode=exitcode, message=message) + del profile + + with Scenario("I alter settings profile with multiple settings", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + with When("I alter settings profile with multiple settings"): + node.query("ALTER SETTINGS PROFILE profile0" + " SETTINGS max_memory_usage = 100000001" + " SETTINGS max_memory_usage_for_user = 100000001") + + with Scenario("I alter settings profile with multiple settings short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + with When("I alter settings profile with short form multiple settings"): + node.query("ALTER SETTINGS PROFILE profile0" + " SETTINGS max_memory_usage = 100000001," + " max_memory_usage_for_user = 100000001") + + with Scenario("I alter settings profile assigned to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with When("I alter settings profile with assignment to role"): + node.query("ALTER SETTINGS PROFILE profile0 TO role0") + + with Scenario("I alter settings profile to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a settings profile, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER SETTINGS PROFILE profile0 TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter settings profile to assign to all except role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I alter a settings profile, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I alter settings profile assigned to multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with When("I alter settings profile with assignment to multiple roles"): + node.query("ALTER SETTINGS PROFILE profile0 TO role0, user0") + + with Scenario("I alter settings profile assigned to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All("1.0")]): + with When("I alter settings profile with assignment to all"): + node.query("ALTER SETTINGS PROFILE profile0 TO ALL") + + with Scenario("I alter settings profile assigned to all except one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0")]): + with When("I alter settings profile with assignment to all except a role"): + node.query("ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT role0") + + with Scenario("I alter settings profile assigned to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0")]): + with When("I alter settings profile with assignmentto all except multiple roles"): + node.query("ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT role0, user0") + + with Scenario("I alter settings profile assigned to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None("1.0")]): + with When("I alter settings profile with assignment to none"): + node.query("ALTER SETTINGS PROFILE profile0 TO NONE") + + with Scenario("I alter settings profile on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0")]): + try: + with Given("I have a settings profile on cluster"): + node.query("CREATE SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster") + with When("I run alter settings profile command"): + node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster") + with And("I alter settings profile with settings"): + node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001") + with And("I alter settings profile with inherit"): + node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'") + with And("I alter settings profile to all"): + node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster TO ALL") + finally: + with Finally("I drop the settings profile"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile1 ON CLUSTER sharded_cluster") + + with Scenario("I alter settings profile on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0")]): + with When("I run alter settings profile command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + finally: + with Finally("I drop the profile and all the users and roles"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS profile0") + node.query(f"DROP USER IF EXISTS user0") + node.query(f"DROP ROLE IF EXISTS role0") diff --git a/tests/testflows/rbac/tests/syntax/alter_user.py b/tests/testflows/rbac/tests/syntax/alter_user.py new file mode 100644 index 00000000000..aab610c8f79 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/alter_user.py @@ -0,0 +1,324 @@ +import hashlib +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("alter user") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check alter user query syntax. + + ```sql + ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name] + [RENAME TO new_name] + [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}] + [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(user): + try: + with Given("I have a user"): + node.query(f"CREATE USER OR REPLACE {user}") + yield + finally: + with Finally("I drop the user", flags=TE): + node.query(f"DROP USER IF EXISTS {user}") + + with Scenario("I alter user, base command", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter("1.0")]): + with setup("user0"): + with When("I alter user"): + node.query("ALTER USER user0") + + with Scenario("I alter user that does not exist without if exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter("1.0")]): + with When("I run alter user command, expecting error 192"): + exitcode, message = errors.user_not_found_in_disk(name="user0") + node.query(f"ALTER USER user0",exitcode=exitcode, message=message) + + with Scenario("I alter user with if exists", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]): + with setup("user0"): + with When(f"I alter user with if exists"): + node.query(f"ALTER USER IF EXISTS user0") + + with Scenario("I alter user that does not exist with if exists", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]): + user = "user0" + with Given("I don't have a user"): + node.query(f"DROP USER IF EXISTS {user}") + with When(f"I alter user {user} with if exists"): + node.query(f"ALTER USER IF EXISTS {user}") + del user + + with Scenario("I alter user on a cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]): + with Given("I have a user on a cluster"): + node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") + with When("I alter user on a cluster"): + node.query("ALTER USER user0 ON CLUSTER sharded_cluster") + with Finally("I drop user from cluster"): + node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") + + with Scenario("I alter user on a fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]): + with When("I alter user on a fake cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("ALTER USER user0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + with Scenario("I alter user to rename, target available", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]): + with setup("user15"): + with When("I alter user name"): + node.query("ALTER USER user15 RENAME TO user15") + + with Scenario("I alter user to rename, target unavailable", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]): + with setup("user15"): + new_user = "user16" + try: + with Given(f"Ensure target name {new_user} is NOT available"): + node.query(f"CREATE USER IF NOT EXISTS {new_user}") + with When(f"I try to rename to {new_user}"): + exitcode, message = errors.cannot_rename_user(name="user15", name_new=new_user) + node.query(f"ALTER USER user15 RENAME TO {new_user}", exitcode=exitcode, message=message) + finally: + with Finally(f"I cleanup target name {new_user}"): + node.query(f"DROP USER IF EXISTS {new_user}") + del new_user + + with Scenario("I alter user password plaintext password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Password_PlainText("1.0")]): + with setup("user1"): + with When("I alter user with plaintext password"): + node.query("ALTER USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'", step=When) + + with Scenario("I alter user password to sha256", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password("1.0")]): + with setup("user2"): + with When("I alter user with sha256_password"): + password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() + node.query(f"ALTER USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'",step=When) + + with Scenario("I alter user password to double_sha1_password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password("1.0")]): + with setup("user3"): + with When("I alter user with double_sha1_password"): + def hash(password): + return hashlib.sha1(password.encode("utf-8")).hexdigest() + password = hash(hash("mypassword")) + node.query(f"ALTER USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY '{password}'", step=When) + + with Scenario("I alter user host local", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Local("1.0")]): + with setup("user4"): + with When("I alter user with host local"): + node.query("ALTER USER user4 HOST LOCAL") + + with Scenario("I alter user host name", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Name("1.0")]): + with setup("user5"): + with When("I alter user with host name"): + node.query("ALTER USER user5 HOST NAME 'localhost', NAME 'clickhouse.com'") + + with Scenario("I alter user host regexp", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Regexp("1.0")]): + with setup("user6"): + with When("I alter user with host regexp"): + node.query("ALTER USER user6 HOST REGEXP 'lo..*host', 'lo*host'") + + with Scenario("I alter user host ip", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_IP("1.0")]): + with setup("user7"): + with When("I alter user with host ip"): + node.query("ALTER USER user7 HOST IP '127.0.0.1', IP '127.0.0.2'") + + with Scenario("I alter user host like", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]): + with setup("user8"): + with When("I alter user with host like"): + node.query("ALTER USER user8 HOST LIKE '%.clickhouse.com'") + + with Scenario("I alter user host any", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Any("1.0")]): + with setup("user9"): + with When("I alter user with host any"): + node.query("ALTER USER user9 HOST ANY") + + with Scenario("I alter user host many hosts", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]): + with setup("user11"): + with When("I alter user with multiple hosts"): + node.query("ALTER USER user11 HOST LIKE '%.clickhouse.com', \ + IP '127.0.0.2', NAME 'localhost', REGEXP 'lo*host'") + + with Scenario("I alter user default role set to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_None("1.0")]): + with setup("user12"): + with When("I alter user with default role none"): + node.query("ALTER USER user12 DEFAULT ROLE NONE") + + with Scenario("I alter user default role set to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole_All("1.0")]): + with setup("user13"): + with When("I alter user with all roles set to default"): + node.query("ALTER USER user13 DEFAULT ROLE ALL") + + @contextmanager + def setup_role(role): + try: + with Given(f"I have a role {role}"): + node.query(f"CREATE ROLE OR REPLACE {role}") + yield + finally: + with Finally(f"I drop the role {role}", flags=TE): + node.query(f"DROP ROLE IF EXISTS {role}") + + with Scenario("I alter user default role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with setup("user14"), setup_role("role2"): + with Given("I have a user with a role"): + node.query("GRANT role2 TO user14") + with When("I alter user default role"): + node.query("ALTER USER user14 DEFAULT ROLE role2") + + with Scenario("I alter user default role, setting default role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with setup("user14a"), setup_role("default"): + with Given("I grant default role to the user"): + node.query("GRANT default TO user14a") + with When("I alter user default role"): + node.query("ALTER USER user14a DEFAULT ROLE default") + + with Scenario("I alter user default role, role doesn't exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with setup("user12"): + role = "role0" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I alter user with default role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"ALTER USER user12 DEFAULT ROLE {role}",exitcode=exitcode, message=message) + del role + + with Scenario("I alter user default role, all except role doesn't exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with setup("user12"): + role = "role0" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I alter user with default role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"ALTER USER user12 DEFAULT ROLE ALL EXCEPT {role}",exitcode=exitcode, message=message) + del role + + with Scenario("I alter user default role multiple", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with setup("user15"), setup_role("second"), setup_role("third"): + with Given("I have a user with multiple roles"): + node.query("GRANT second,third TO user15") + with When("I alter user default role to second, third"): + node.query("ALTER USER user15 DEFAULT ROLE second, third") + + with Scenario("I alter user default role set to all except", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]): + with setup("user16"), setup_role("second"): + with Given("I have a user with a role"): + node.query("GRANT second TO user16") + with When("I alter user default role"): + node.query("ALTER USER user16 DEFAULT ROLE ALL EXCEPT second") + + with Scenario("I alter user default role multiple all except", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]): + with setup("user17"), setup_role("second"), setup_role("third"): + with Given("I have a user with multiple roles"): + node.query("GRANT second,third TO user17") + with When("I alter user default role to all except second"): + node.query("ALTER USER user17 DEFAULT ROLE ALL EXCEPT second") + + with Scenario("I alter user settings profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), \ + RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]): + with setup("user18"): + try: + with Given("I have a profile"): + node.query(f"CREATE SETTINGS PROFILE profile10") + with When("I alter user with settings and set profile to profile1"): + node.query("ALTER USER user18 SETTINGS PROFILE profile10, max_memory_usage = 100 MIN 0 MAX 1000 READONLY") + finally: + with Finally("I drop the profile"): + node.query(f"DROP SETTINGS PROFILE profile10") + + with Scenario("I alter user settings profile, fake profile, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]): + with setup("user18a"): + profile = "profile0" + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + with When(f"I alter user with Settings and set profile to fake profile {profile}"): + exitcode, message = errors.settings_profile_not_found_in_disk(profile) + node.query("ALTER USER user18a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + del profile + + with Scenario("I alter user settings with a fake setting, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0")]): + with setup("user18b"): + with When("I alter settings profile using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query("ALTER USER user18b SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + + with Scenario("I alter user settings without profile (no equals)", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]): + with setup("user19"): + with When("I alter user with settings without profile using no equals"): + node.query("ALTER USER user19 SETTINGS max_memory_usage=10000000 MIN 100000 MAX 1000000000 READONLY") + + #equals sign (=) syntax verify + with Scenario("I alter user settings without profile (yes equals)", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]): + with setup("user20"): + with When("I alter user with settings without profile using equals"): + node.query("ALTER USER user20 SETTINGS max_memory_usage=10000000 MIN=100000 MAX=1000000000 READONLY") + + #Add requirement to host: add/drop + with Scenario("I alter user to add host", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]): + with setup("user21"): + with When("I alter user by adding local host"): + node.query("ALTER USER user21 ADD HOST LOCAL") + with And("I alter user by adding no host"): + node.query("ALTER USER user21 ADD HOST NONE") + with And("I alter user by adding host like"): + node.query("ALTER USER user21 ADD HOST LIKE 'local%'") + with And("I alter user by adding host ip"): + node.query("ALTER USER user21 ADD HOST IP '127.0.0.1'") + with And("I alter user by adding host name"): + node.query("ALTER USER user21 ADD HOST NAME 'localhost'") + + with Scenario("I alter user to remove host", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]): + with setup("user22"): + with When("I alter user by removing local host"): + node.query("ALTER USER user22 DROP HOST LOCAL") + with And("I alter user by removing no host"): + node.query("ALTER USER user22 DROP HOST NONE") + with And("I alter user by removing like host"): + node.query("ALTER USER user22 DROP HOST LIKE 'local%'") + with And("I alter user by removing host ip"): + node.query("ALTER USER user22 DROP HOST IP '127.0.0.1'") + with And("I alter user by removing host name"): + node.query("ALTER USER user22 DROP HOST NAME 'localhost'") diff --git a/tests/testflows/rbac/tests/syntax/create_quota.py b/tests/testflows/rbac/tests/syntax/create_quota.py new file mode 100644 index 00000000000..9697da2ac29 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/create_quota.py @@ -0,0 +1,227 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("create quota") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check create quota query syntax. + + ```sql + CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}] + [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY} + {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] | + NO LIMITS | TRACKING ONLY} [,...]] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(quota): + try: + with Given("I ensure the quota does not already exist"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + yield + finally: + with Finally("I drop the quota"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + def create_quota(quota): + with And(f"I ensure I do have quota {quota}"): + node.query(f"CREATE QUOTA OR REPLACE {quota}") + + try: + with Given("I have a user and a role"): + node.query(f"CREATE USER user0") + node.query(f"CREATE ROLE role0") + + with Scenario("I create quota with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create("1.0")]): + with cleanup("quota0"): + with When("I create a quota with no options"): + node.query("CREATE QUOTA quota0") + + with Scenario("I create quota that already exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create("1.0")]): + quota = "quota0" + with cleanup(quota): + create_quota(quota) + with When(f"I create a quota {quota} that already exists without IF EXISTS, throws exception"): + exitcode, message = errors.cannot_insert_quota(name=quota) + node.query(f"CREATE QUOTA {quota}", exitcode=exitcode, message=message) + del quota + + with Scenario("I create quota if not exists, quota does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")]): + quota = "quota1" + with cleanup(quota): + with When(f"I create a quota {quota} with if not exists"): + node.query(f"CREATE QUOTA IF NOT EXISTS {quota}") + del quota + + with Scenario("I create quota if not exists, quota does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")]): + quota = "quota1" + with cleanup(quota): + create_quota(quota) + with When(f"I create a quota {quota} with if not exists"): + node.query(f"CREATE QUOTA IF NOT EXISTS {quota}") + del quota + + with Scenario("I create quota or replace, quota does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")]): + quota = "quota2" + with cleanup(quota): + with When(f"I create a quota {quota} with or replace"): + node.query(f"CREATE QUOTA OR REPLACE {quota}") + del quota + + with Scenario("I create quota or replace, quota does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")]): + quota = "quota2" + with cleanup(quota): + create_quota(quota) + with When(f"I create a quota {quota} with or replace"): + node.query(f"CREATE QUOTA OR REPLACE {quota}") + del quota + + keys = ['none', 'user name', 'ip address', 'client key', 'client key or user name', 'client key or ip address'] + for i, key in enumerate(keys): + with Scenario(f"I create quota keyed by {key}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_KeyedBy("1.0"), + RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions("1.0")]): + name = f'quota{3 + i}' + with cleanup(name): + with When(f"I create a quota with {key}"): + node.query(f"CREATE QUOTA {name} KEYED BY '{key}'") + + with Scenario("I create quota for randomized interval", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized("1.0")]): + with cleanup("quota9"): + with When("I create a quota for randomized interval"): + node.query("CREATE QUOTA quota9 FOR RANDOMIZED INTERVAL 1 DAY NO LIMITS") + + intervals = ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'MONTH'] + for i, interval in enumerate(intervals): + with Scenario(f"I create quota for interval {interval}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Interval("1.0")]): + name = f'quota{10 + i}' + with cleanup(name): + with When(f"I create a quota for {interval} interval"): + node.query(f"CREATE QUOTA {name} FOR INTERVAL 1 {interval} NO LIMITS") + + constraints = ['MAX QUERIES', 'MAX ERRORS', 'MAX RESULT ROWS', + 'MAX RESULT BYTES', 'MAX READ ROWS', 'MAX READ BYTES', 'MAX EXECUTION TIME', + 'NO LIMITS', 'TRACKING ONLY'] + for i, constraint in enumerate(constraints): + with Scenario(f"I create quota for {constraint.lower()}", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Queries("1.0"), + RQ_SRS_006_RBAC_Quota_Create_Errors("1.0"), + RQ_SRS_006_RBAC_Quota_Create_ResultRows("1.0"), + RQ_SRS_006_RBAC_Quota_Create_ResultBytes("1.0"), + RQ_SRS_006_RBAC_Quota_Create_ReadRows("1.0"), + RQ_SRS_006_RBAC_Quota_Create_ReadBytes("1.0"), + RQ_SRS_006_RBAC_Quota_Create_ExecutionTime("1.0"), + RQ_SRS_006_RBAC_Quota_Create_NoLimits("1.0"), + RQ_SRS_006_RBAC_Quota_Create_TrackingOnly("1.0")]): + name = f'quota{15 + i}' + with cleanup(name): + with When(f"I create quota for {constraint.lower()}"): + node.query(f"CREATE QUOTA {name} FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}") + + with Scenario("I create quota for multiple constraints", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Interval("1.0"), + RQ_SRS_006_RBAC_Quota_Create_Queries("1.0")]): + with cleanup("quota23"): + with When(f"I create quota for multiple constraints"): + node.query('CREATE QUOTA quota23 \ + FOR INTERVAL 1 DAY NO LIMITS, \ + FOR INTERVAL 2 DAY MAX QUERIES 124, \ + FOR INTERVAL 1 HOUR TRACKING ONLY') + + with Scenario("I create quota assigned to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with cleanup("quota24"): + with When("I create quota for role"): + node.query("CREATE QUOTA quota24 TO role0") + + with Scenario("I create quota to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a quota, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE QUOTA quota0 TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create quota to assign to all except role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a quota, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE QUOTA quota0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create quota assigned to no role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment_None("1.0")]): + with When("I create quota for no role"): + node.query("CREATE QUOTA quota24 TO NONE") + + with Scenario("I create quota assigned to multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with cleanup("quota25"): + with When("I create quota for multiple roles"): + node.query("CREATE QUOTA quota25 TO role0, user0") + + with Scenario("I create quota assigned to all", flags=TE,requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment_All("1.0")]): + with cleanup("quota26"): + with When("I create quota for all"): + node.query("CREATE QUOTA quota26 TO ALL") + + with Scenario("I create quota assigned to all except one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")]): + with cleanup("quota27"): + with When("I create quota for all except one role"): + node.query("CREATE QUOTA quota27 TO ALL EXCEPT role0") + + with Scenario("I create quota assigned to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")]): + with cleanup("quota28"): + with When("I create quota for all except multiple roles"): + node.query("CREATE QUOTA quota28 TO ALL EXCEPT role0, user0") + + with Scenario("I create quota on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")]): + try: + with When("I run create quota command on cluster"): + node.query("CREATE QUOTA quota29 ON CLUSTER sharded_cluster") + with When("I run create quota command on cluster, keyed"): + node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster KEYED BY 'none'") + with When("I run create quota command on cluster, interval"): + node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY") + with When("I run create quota command on cluster, assign"): + node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster TO ALL") + finally: + with Finally("I drop the quota from cluster"): + node.query("DROP QUOTA IF EXISTS quota29 ON CLUSTER sharded_cluster") + + with Scenario("I create quota on nonexistent cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")]): + with When("I run create quota on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("CREATE QUOTA quota0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + finally: + with Finally("I drop all the users and roles"): + node.query(f"DROP USER IF EXISTS user0") + node.query(f"DROP ROLE IF EXISTS role0") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/create_role.py b/tests/testflows/rbac/tests/syntax/create_role.py new file mode 100644 index 00000000000..f87710f992b --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/create_role.py @@ -0,0 +1,122 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("create role") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check create role query syntax. + + ```sql + CREATE ROLE [IF NOT EXISTS | OR REPLACE] name + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(role): + try: + with Given("I ensure the role doesn't already exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + yield + finally: + with Finally("I drop the role"): + node.query(f"DROP ROLE IF EXISTS {role}") + + def create_role(role): + with Given(f"I ensure I do have role {role}"): + node.query(f"CREATE ROLE OR REPLACE {role}") + + with Scenario("I create role with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create("1.0")]): + with cleanup("role0"): + with When("I create role"): + node.query("CREATE ROLE role0") + + with Scenario("I create role that already exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create("1.0")]): + role = "role0" + with cleanup(role): + with When(f"I create role {role}"): + exitcode, message = errors.cannot_insert_role(name=role) + node.query(f"CREATE ROLE {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create role if not exists, role does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")]): + role = "role1" + with cleanup(role): + with When(f"I create role {role} with if not exists"): + node.query(f"CREATE ROLE IF NOT EXISTS {role}") + del role + + with Scenario("I create role if not exists, role does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")]): + role = "role1" + with cleanup(role): + create_role(role) + with When(f"I create role {role} with if not exists"): + node.query(f"CREATE ROLE IF NOT EXISTS {role}") + del role + + with Scenario("I create role or replace, role does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_Replace("1.0")]): + role = "role2" + with cleanup(role): + with When(f"I create role {role} with or replace"): + node.query(f"CREATE ROLE OR REPLACE {role}") + del role + + with Scenario("I create role or replace, role does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_Replace("1.0")]): + role = "role2" + with cleanup(role): + create_role(role) + with When(f"I create role {role} with or replace"): + node.query(f"CREATE ROLE OR REPLACE {role}") + del role + + with Scenario("I create role on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create("1.0")]): + try: + with When("I have a role on a cluster"): + node.query("CREATE ROLE role1 ON CLUSTER sharded_cluster") + with And("I run create role or replace on a cluster"): + node.query("CREATE ROLE OR REPLACE role1 ON CLUSTER sharded_cluster") + with And("I create role with settings on a cluster"): + node.query("CREATE ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY") + finally: + with Finally("I drop the role"): + node.query("DROP ROLE IF EXISTS role1,role2 ON CLUSTER sharded_cluster") + + with Scenario("I create role on nonexistent cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create("1.0")]): + with When("I run create role on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("CREATE ROLE role1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + + with Scenario("I create role with settings profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with cleanup("role3"): + with When("I create role with settings profile"): + node.query("CREATE ROLE role3 SETTINGS PROFILE default, max_memory_usage=10000000 WRITABLE") + + with Scenario("I create role settings profile, fake profile, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with cleanup("role4a"): + with Given("I ensure profile profile0 does not exist"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile0") + with When("I create role with settings profile that does not exist"): + exitcode, message = errors.settings_profile_not_found_in_disk("profile0") + node.query("CREATE ROLE role4a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + + with Scenario("I create role with settings without profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with cleanup("role4"): + with When("I create role with settings without profile"): + node.query("CREATE ROLE role4 SETTINGS max_memory_usage=10000000 READONLY") diff --git a/tests/testflows/rbac/tests/syntax/create_row_policy.py b/tests/testflows/rbac/tests/syntax/create_row_policy.py new file mode 100644 index 00000000000..458b205e6c1 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/create_row_policy.py @@ -0,0 +1,225 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("create row policy") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check create row policy query syntax. + + ```sql + CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table + [AS {PERMISSIVE | RESTRICTIVE}] + [FOR SELECT] + [USING condition] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(policy, on="default.foo"): + try: + with Given(f"I ensure the row policy does not already exist on {on}"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}") + yield + finally: + with Finally(f"I drop the row policy on {on}"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}") + + def create_policy(policy, on="default.foo"): + with Given(f"I ensure I do have policy {policy} on {on}"): + node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON {on}") + + try: + with Given("I have a table and some roles"): + node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") + node.query(f"CREATE ROLE role0") + node.query(f"CREATE ROLE role1") + + with Scenario("I create row policy with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy0"): + with When("I create row policy"): + node.query("CREATE ROW POLICY policy0 ON default.foo") + + with Scenario("I create row policy using short syntax with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy1"): + with When("I create row policy short form"): + node.query("CREATE POLICY policy1 ON default.foo") + + with Scenario("I create row policy that already exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + policy = "policy0" + with cleanup(policy): + create_policy(policy) + with When(f"I create row policy {policy}"): + exitcode, message = errors.cannot_insert_row_policy(name=f"{policy} ON default.foo") + node.query(f"CREATE ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + del policy + + with Scenario("I create row policy if not exists, policy does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy2"): + with When("I create row policy with if not exists"): + node.query("CREATE ROW POLICY IF NOT EXISTS policy2 ON default.foo") + + with Scenario("I create row policy if not exists, policy does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + policy = "policy2" + with cleanup(policy): + create_policy(policy) + with When(f"I create row policy {policy} with if not exists"): + node.query(f"CREATE ROW POLICY IF NOT EXISTS {policy} ON default.foo") + del policy + + with Scenario("I create row policy or replace, policy does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy3"): + with When("I create row policy with or replace"): + node.query("CREATE ROW POLICY OR REPLACE policy3 ON default.foo") + + with Scenario("I create row policy or replace, policy does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + policy = "policy3" + with cleanup(policy): + create_policy(policy) + with When(f"I create row policy {policy} with or replace"): + node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON default.foo") + del policy + + with Scenario("I create row policy as permissive", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy4"): + with When("I create row policy as permissive"): + node.query("CREATE ROW POLICY policy4 ON default.foo AS PERMISSIVE") + + with Scenario("I create row policy as restrictive", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy5"): + with When("I create row policy as restrictive"): + node.query("CREATE ROW POLICY policy5 ON default.foo AS RESTRICTIVE") + + with Scenario("I create row policy for select", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0")]): + with cleanup("policy6"): + with When("I create row policy with for select"): + node.query("CREATE ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10") + + with Scenario("I create row policy using condition", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy6"): + with When("I create row policy with condition"): + node.query("CREATE ROW POLICY policy6 ON default.foo USING x > 10") + + with Scenario("I create row policy assigned to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy7"): + with When("I create row policy for one role"): + node.query("CREATE ROW POLICY policy7 ON default.foo TO role0") + + with Scenario("I create row policy to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]): + role = "role2" + with cleanup("policy8a"): + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a row policy, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create row policy to assign to all excpet role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]): + role = "role2" + with cleanup("policy8a"): + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a row policy, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create row policy assigned to multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy8b"): + with When("I create row policy for multiple roles"): + node.query("CREATE ROW POLICY policy8b ON default.foo TO role0, role1") + + with Scenario("I create row policy assigned to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy9"): + with When("I create row policy for all"): + node.query("CREATE ROW POLICY policy9 ON default.foo TO ALL") + + with Scenario("I create row policy assigned to all except one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy10"): + with When("I create row policy for all except one"): + node.query("CREATE ROW POLICY policy10 ON default.foo TO ALL EXCEPT role0") + + with Scenario("I create row policy assigned to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy11"): + with When("I create row policy for all except multiple roles"): + node.query("CREATE ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0, role1") + + with Scenario("I create row policy assigned to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with cleanup("policy11"): + with When("I create row policy for none"): + node.query("CREATE ROW POLICY policy11 ON default.foo TO NONE") + + with Scenario("I create row policy on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + try: + with When("I run create row policy command on cluster"): + node.query("CREATE ROW POLICY policy12 ON CLUSTER sharded_cluster ON default.foo") + finally: + with Finally("I drop the row policy from cluster"): + node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + + with Scenario("I create row policy on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + with When("I run create row policy command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("CREATE ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message) + + with Scenario("I create row policy on cluster after table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + try: + with When("I run create row policy command on cluster"): + node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the row policy from cluster"): + node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the table and the roles"): + node.query(f"DROP TABLE IF EXISTS default.foo") + node.query(f"DROP ROLE IF EXISTS role0, role1") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/create_settings_profile.py b/tests/testflows/rbac/tests/syntax/create_settings_profile.py new file mode 100644 index 00000000000..1cd5289db26 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/create_settings_profile.py @@ -0,0 +1,254 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("create settings profile") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check create settings profile query syntax. + + ```sql + CREATE [SETTINGS] PROFILE [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] + [READONLY] | [INHERIT|PROFILE 'profile_name']] [,...] + [TO {role [,...] | ALL | ALL EXCEPT role [,...]}] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(profile): + try: + with Given(f"I ensure the profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + yield + finally: + with Finally("I drop the profile"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + def create_profile(profile): + with Given(f"I ensure I do have profile {profile}"): + node.query(f"CREATE SETTINGS PROFILE OR REPLACE {profile}") + + try: + with Given("I have a user and a role"): + node.query(f"CREATE USER user0") + node.query(f"CREATE ROLE role0") + + with Scenario("I create settings profile with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + with cleanup("profile0"): + with When("I create settings profile"): + node.query("CREATE SETTINGS PROFILE profile0") + + with Scenario("I create settings profile that already exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + profile = "profile0" + with cleanup(profile): + create_profile(profile) + with When(f"I create settings profile {profile} that already exists"): + exitcode, message = errors.cannot_insert_settings_profile(name=profile) + node.query(f"CREATE SETTINGS PROFILE {profile}", exitcode=exitcode, message=message) + del profile + + with Scenario("I create settings profile if not exists, profile does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")]): + with cleanup("profile1"): + with When("I create settings profile with if not exists"): + node.query("CREATE SETTINGS PROFILE IF NOT EXISTS profile1") + + with Scenario("I create settings profile if not exists, profile does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")]): + profile = "profile1" + with cleanup(profile): + create_profile(profile) + with When(f"I create settings profile {profile} with if not exists"): + node.query(f"CREATE SETTINGS PROFILE IF NOT EXISTS {profile}") + del profile + + with Scenario("I create settings profile or replace, profile does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")]): + with cleanup("profile2"): + with When("I create settings policy with or replace"): + node.query("CREATE SETTINGS PROFILE OR REPLACE profile2") + + with Scenario("I create settings profile or replace, profile does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")]): + with cleanup("profile2"): + create_profile("profile2") + with When("I create settings policy with or replace"): + node.query("CREATE SETTINGS PROFILE OR REPLACE profile2") + + with Scenario("I create settings profile short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + with cleanup("profile3"): + with When("I create settings profile short form"): + node.query("CREATE PROFILE profile3") + + with Scenario("I create settings profile with a setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0")]): + with cleanup("profile4"): + with When("I create settings profile with settings"): + node.query("CREATE SETTINGS PROFILE profile4 SETTINGS max_memory_usage = 100000001") + + with Scenario("I create settings profile with a setting value, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables("1.0"), + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0")]): + with When("I create settings profile using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query("CREATE SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + + with Scenario("I create settings profile with a min setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile5"), cleanup("profile6"): + with When("I create settings profile with min setting with and without equals"): + node.query("CREATE SETTINGS PROFILE profile5 SETTINGS max_memory_usage MIN 100000001") + node.query("CREATE SETTINGS PROFILE profile6 SETTINGS max_memory_usage MIN = 100000001") + + with Scenario("I create settings profile with a max setting value", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile7"), cleanup("profile8"): + with When("I create settings profile with max setting with and without equals"): + node.query("CREATE SETTINGS PROFILE profile7 SETTINGS max_memory_usage MAX 100000001") + node.query("CREATE SETTINGS PROFILE profile8 SETTINGS max_memory_usage MAX = 100000001") + + with Scenario("I create settings profile with min and max setting values", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile9"): + with When("I create settings profile with min and max setting"): + node.query("CREATE SETTINGS PROFILE profile9 SETTINGS max_memory_usage MIN 100000001 MAX 200000001") + + with Scenario("I create settings profile with a readonly setting", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile10"): + with When("I create settings profile with readonly"): + node.query("CREATE SETTINGS PROFILE profile10 SETTINGS max_memory_usage READONLY") + + with Scenario("I create settings profile with a writable setting", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile21"): + with When("I create settings profile with writable"): + node.query("CREATE SETTINGS PROFILE profile21 SETTINGS max_memory_usage WRITABLE") + + with Scenario("I create settings profile with inherited settings", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + with cleanup("profile11"): + with When("I create settings profile with inherit"): + node.query("CREATE SETTINGS PROFILE profile11 SETTINGS INHERIT 'default'") + + with Scenario("I create settings profile with inherit/from profile, fake profile, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + profile = "profile3" + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + sources = {"INHERIT","PROFILE"} + for source in sources: + with When(f"I create settings profile {source} from nonexistant parent"): + exitcode, message = errors.settings_profile_not_found_in_disk(profile) + node.query(f"CREATE PROFILE profile0 SETTINGS {source} {profile}", exitcode=exitcode, message=message) + del profile + + with Scenario("I create settings profile with inherited settings other form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + with cleanup("profile12"): + with When("I create settings profile with inherit short form"): + node.query("CREATE PROFILE profile12 SETTINGS PROFILE 'default'") + + with Scenario("I create settings profile with multiple settings", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile13"): + with When("I create settings profile with multiple settings"): + node.query("CREATE SETTINGS PROFILE profile13" + " SETTINGS max_memory_usage = 100000001" + " SETTINGS max_memory_usage_for_user = 100000001") + + with Scenario("I create settings profile with multiple settings short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with cleanup("profile14"): + with When("I create settings profile with multiple settings short form"): + node.query("CREATE SETTINGS PROFILE profile14" + " SETTINGS max_memory_usage = 100000001," + " max_memory_usage_for_user = 100000001") + + with Scenario("I create settings profile assigned to one role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with cleanup("profile15"): + with When("I create settings profile for a role"): + node.query("CREATE SETTINGS PROFILE profile15 TO role0") + + with Scenario("I create settings profile to assign to role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a settings profile, assign to role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE SETTINGS PROFILE profile0 TO {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create settings profile to assign to all except role that does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + role = "role1" + with Given(f"I drop {role} if it exists"): + node.query(f"DROP ROLE IF EXISTS {role}") + with Then(f"I create a settings profile, assign to all except role {role}, which does not exist"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"CREATE SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I create settings profile assigned to multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with cleanup("profile16"): + with When("I create settings profile for multiple roles"): + node.query("CREATE SETTINGS PROFILE profile16 TO role0, user0") + + with Scenario("I create settings profile assigned to all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All("1.0")]): + with cleanup("profile17"): + with When("I create settings profile for all"): + node.query("CREATE SETTINGS PROFILE profile17 TO ALL") + + with Scenario("I create settings profile assigned to all except one role", flags=TE,requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0")]): + with cleanup("profile18"): + with When("I create settings profile for all except one role"): + node.query("CREATE SETTINGS PROFILE profile18 TO ALL EXCEPT role0") + + with Scenario("I create settings profile assigned to all except multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0")]): + with cleanup("profile19"): + with When("I create settings profile for all except multiple roles"): + node.query("CREATE SETTINGS PROFILE profile19 TO ALL EXCEPT role0, user0") + + with Scenario("I create settings profile assigned to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None("1.0")]): + with cleanup("profile22"): + with When("I create settings profile for none"): + node.query("CREATE SETTINGS PROFILE profile22 TO NONE") + + with Scenario("I create settings profile on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")]): + try: + with When("I run create settings profile command"): + node.query("CREATE SETTINGS PROFILE profile20 ON CLUSTER sharded_cluster") + node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001") + node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'") + node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster TO ALL") + finally: + with Finally("I drop the settings profile"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile20 ON CLUSTER sharded_cluster") + + with Scenario("I create settings profile on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")]): + with When("I run create settings profile command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("CREATE SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + finally: + with Finally("I drop all the users and roles"): + node.query(f"DROP USER IF EXISTS user0") + node.query(f"DROP ROLE IF EXISTS role0") diff --git a/tests/testflows/rbac/tests/syntax/create_user.py b/tests/testflows/rbac/tests/syntax/create_user.py new file mode 100644 index 00000000000..874421f2c81 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/create_user.py @@ -0,0 +1,271 @@ +import hashlib +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("create user") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check create user query syntax. + + ```sql + CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] + [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}] + [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] + [DEFAULT ROLE role [,...]] + [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(user): + try: + with Given("I ensure the user does not already exist", flags=TE): + node.query(f"DROP USER IF EXISTS {user}") + yield + finally: + with Finally("I drop the user", flags=TE): + node.query(f"DROP USER IF EXISTS {user}") + + def create_user(user): + with Given(f"I ensure I do have user {user}"): + node.query(f"CREATE USER OR REPLACE {user}") + + with Scenario("I create user with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create("1.0"), + RQ_SRS_006_RBAC_User_Create_Host_Default("1.0")]): + with cleanup("user0"): + with When("I create a user with no options"): + node.query("CREATE USER user0") + + with Scenario("I create user that already exists, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create("1.0"), + RQ_SRS_006_RBAC_User_Create_Host_Default("1.0")]): + user = "user0" + with cleanup(user): + create_user(user) + with When(f"I create a user {user} that already exists without IF EXISTS, throws exception"): + exitcode, message = errors.cannot_insert_user(name=user) + node.query(f"CREATE USER {user}", exitcode=exitcode, message=message) + del user + + with Scenario("I create user with if not exists, user does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")]): + user = "user0" + with cleanup(user): + with When(f"I create a user {user} with if not exists"): + node.query(f"CREATE USER IF NOT EXISTS {user}") + del user + + #Bug exists, mark as xfail + with Scenario("I create user with if not exists, user does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")]): + user = "user0" + with cleanup(user): + create_user(user) + with When(f"I create a user {user} with if not exists"): + node.query(f"CREATE USER IF NOT EXISTS {user}") + del user + + with Scenario("I create user or replace, user does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Replace("1.0")]): + user = "user0" + with cleanup(user): + with When(f"I create a user {user} with or replace"): + node.query(f"CREATE USER OR REPLACE {user}") + del user + + with Scenario("I create user or replace, user does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Replace("1.0")]): + user = "user0" + with cleanup(user): + create_user(user) + with When(f"I create a user {user} with or replace"): + node.query(f"CREATE USER OR REPLACE {user}") + del user + + with Scenario("I create user with no password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_NoPassword("1.0")]): + with cleanup("user1"): + with When("I create a user with no password"): + node.query("CREATE USER user1 IDENTIFIED WITH NO_PASSWORD") + + with Scenario("I create user with plaintext password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_PlainText("1.0")]): + with cleanup("user1"): + with When("I create a user with plaintext password"): + node.query("CREATE USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'") + + with Scenario("I create user with sha256 password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")]): + with cleanup("user2"): + with When("I create a user with sha256 password"): + password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() + node.query(f"CREATE USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'") + + with Scenario("I create user with sha256 password using IDENTIFIED BY", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")]): + with cleanup("user2"): + with When("I create a user with sha256 password using short form"): + password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() + node.query(f"CREATE USER user2 IDENTIFIED BY '{password}'") + + with Scenario("I create user with sha256_hash password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash("1.0")]): + with cleanup("user3"): + with When("I create a user with sha256_hash"): + def hash(password): + return hashlib.sha256(password.encode("utf-8")).hexdigest() + password = hash(hash("mypassword")) + node.query(f"CREATE USER user3 IDENTIFIED WITH SHA256_HASH BY '{password}'") + + with Scenario("I create user with double sha1 password", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password("1.0")]): + with cleanup("user3"): + with When("I create a user with double_sha1_password"): + node.query(f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY 'mypassword'") + + with Scenario("I create user with double sha1 hash", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash("1.0")]): + with cleanup("user3"): + with When("I create a user with double_sha1_hash"): + def hash(password): + return hashlib.sha1(password.encode("utf-8")).hexdigest() + password = hash(hash("mypassword")) + node.query(f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_HASH BY '{password}'") + + with Scenario("I create user with host name", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_Name("1.0")]): + with cleanup("user4"): + with When("I create a user with host name"): + node.query("CREATE USER user4 HOST NAME 'localhost', NAME 'clickhouse.com'") + + with Scenario("I create user with host regexp", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_Regexp("1.0")]): + with cleanup("user5"): + with When("I create a user with host regexp"): + node.query("CREATE USER user5 HOST REGEXP 'lo.?*host', REGEXP 'lo*host'") + + with Scenario("I create user with host ip", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_IP("1.0")]): + with cleanup("user6"): + with When("I create a user with host ip"): + node.query("CREATE USER user6 HOST IP '127.0.0.1', IP '127.0.0.2'") + + with Scenario("I create user with host like", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_Like("1.0")]): + with cleanup("user7"): + with When("I create a user with host like"): + node.query("CREATE USER user7 HOST LIKE 'local%'") + + with Scenario("I create user with host none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_None("1.0")]): + with cleanup("user7"): + with When("I create a user with host none"): + node.query("CREATE USER user7 HOST NONE") + + with Scenario("I create user with host local", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_Local("1.0")]): + with cleanup("user7"): + with When("I create a user with host local"): + node.query("CREATE USER user7 HOST LOCAL") + + with Scenario("I create user with host any", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Host_Any("1.0")]): + with cleanup("user7"): + with When("I create a user with host any"): + node.query("CREATE USER user7 HOST ANY") + + with Scenario("I create user with default role set to none", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_DefaultRole_None("1.0")]): + with cleanup("user8"): + with When("I create a user with no default role"): + node.query("CREATE USER user8 DEFAULT ROLE NONE") + + with Scenario("I create user with default role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with Given("I have a role"): + node.query("CREATE ROLE default") + with cleanup("user9"): + with When("I create a user with a default role"): + node.query("CREATE USER user9 DEFAULT ROLE default") + with Finally("I drop the role"): + node.query("DROP ROLE default") + + with Scenario("I create user default role, role doesn't exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with cleanup("user12"): + role = "role0" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I create user with default role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"CREATE USER user12 DEFAULT ROLE {role}",exitcode=exitcode, message=message) + del role + + with Scenario("I create user default role, all except role doesn't exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with cleanup("user12"): + role = "role0" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I create user with default role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"CREATE USER user12 DEFAULT ROLE ALL EXCEPT {role}",exitcode=exitcode, message=message) + del role + + with Scenario("I create user with all roles set to default", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_DefaultRole_All("1.0")]): + with cleanup("user10"): + with When("I create a user with all roles as default"): + node.query("CREATE USER user10 DEFAULT ROLE ALL") + + with Scenario("I create user with settings profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with cleanup("user11"): + with When("I create a user with a settings profile"): + node.query("CREATE USER user11 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY") + + with Scenario("I create user settings profile, fake profile, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with cleanup("user18a"): + profile = "profile0" + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + with When(f"I create user with Settings and set profile to fake profile {profile}"): + exitcode, message = errors.settings_profile_not_found_in_disk(profile) + node.query("CREATE USER user18a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + del profile + + with Scenario("I create user settings with a fake setting, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with cleanup("user18b"): + with When("I create settings profile using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query("CREATE USER user18b SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + + with Scenario("I create user with settings without profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with cleanup("user12"): + with When("I create a user with settings and no profile"): + node.query("CREATE USER user12 SETTINGS max_memory_usage=10000000 READONLY") + + with Scenario("I create user on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")]): + try: + with When("I create user on cluster"): + node.query("CREATE USER user13 ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the user"): + node.query("DROP USER user13 ON CLUSTER sharded_cluster") + + with Scenario("I create user on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")]): + with When("I create user on fake cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("CREATE USER user14 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/drop_quota.py b/tests/testflows/rbac/tests/syntax/drop_quota.py new file mode 100644 index 00000000000..a90294800c2 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/drop_quota.py @@ -0,0 +1,87 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("drop quota") +def feature(self, node="clickhouse1"): + """Check drop quota query syntax. + + ```sql + DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(quota): + try: + with Given("I have a quota"): + node.query(f"CREATE QUOTA {quota}") + yield + finally: + with Finally("I drop the quota"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + def cleanup_quota(quota): + with Given(f"I ensure that quota {quota} does not exist"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + with Scenario("I drop quota with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with cleanup("quota0"): + with When("I run drop quota command"): + node.query("DROP QUOTA quota0") + + with Scenario("I drop quota, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + quota = "quota0" + cleanup_quota(quota) + with When("I run drop quota command, throws exception"): + exitcode, message = errors.quota_not_found_in_disk(name=quota) + node.query(f"DROP QUOTA {quota}", exitcode=exitcode, message=message) + del quota + + with Scenario("I drop quota if exists, quota exists", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")]): + with cleanup("quota1"): + with When("I run drop quota command"): + node.query("DROP QUOTA IF EXISTS quota1") + + with Scenario("I drop quota if exists, quota does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")]): + cleanup_quota("quota2") + with When("I run drop quota command, quota does not exist"): + node.query("DROP QUOTA IF EXISTS quota2") + + with Scenario("I drop default quota, throws error", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with When("I drop default quota"): + exitcode, message = errors.cannot_remove_quota_default() + node.query("DROP QUOTA default", exitcode=exitcode, message=message) + + with Scenario("I drop multiple quotas", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with cleanup("quota2"), cleanup("quota3"): + with When("I run drop quota command"): + node.query("DROP QUOTA quota2, quota3") + + with Scenario("I drop quota on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")]): + try: + with Given("I have a quota"): + node.query("CREATE QUOTA quota4 ON CLUSTER sharded_cluster") + with When("I run drop quota command"): + node.query("DROP QUOTA quota4 ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the quota in case it still exists"): + node.query("DROP QUOTA IF EXISTS quota4 ON CLUSTER sharded_cluster") + + with Scenario("I drop quota on fake cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")]): + with When("I run drop quota command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("DROP QUOTA quota5 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/drop_role.py b/tests/testflows/rbac/tests/syntax/drop_role.py new file mode 100644 index 00000000000..e3f89298a50 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/drop_role.py @@ -0,0 +1,84 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("drop role") +def feature(self, node="clickhouse1"): + """Check drop role query syntax. + + ```sql + DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(role): + try: + with Given("I have a role"): + node.query(f"CREATE ROLE OR REPLACE {role}") + yield + finally: + with Finally("I confirm the role is dropped"): + node.query(f"DROP ROLE IF EXISTS {role}") + + def cleanup_role(role): + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + + + with Scenario("I drop role with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop("1.0")]): + with setup("role0"): + with When("I drop role"): + node.query("DROP ROLE role0") + + with Scenario("I drop role that doesn't exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop("1.0")]): + role = "role0" + cleanup_role(role) + with When(f"I drop role {role}"): + exitcode, message = errors.role_not_found_in_disk(name=role) + node.query(f"DROP ROLE {role}", exitcode=exitcode, message=message) + del role + + with Scenario("I drop multiple roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop("1.0")]): + with setup("role1"), setup("role2"): + with When("I drop multiple roles"): + node.query("DROP ROLE role1, role2") + + with Scenario("I drop role that does not exist, using if exists", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with When("I drop role if exists"): + node.query("DROP ROLE IF EXISTS role3") + + with Scenario("I drop multiple roles where one does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with setup("role5"): + with When("I drop multiple roles where one doesnt exist"): + node.query("DROP ROLE IF EXISTS role3, role5") + + with Scenario("I drop multiple roles where both do not exist", flags = TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with Given("I ensure role does not exist"): + node.query("DROP ROLE IF EXISTS role6") + with When("I drop the nonexistant roles"): + node.query("DROP USER IF EXISTS role5, role6") + + with Scenario("I drop role on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")]): + with Given("I have a role on cluster"): + node.query("CREATE ROLE role0 ON CLUSTER sharded_cluster") + with When("I drop the role from the cluster"): + node.query("DROP ROLE role0 ON CLUSTER sharded_cluster") + + with Scenario("I drop role on fake cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")]): + with When("I run drop role command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("DROP ROLE role2 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/drop_row_policy.py b/tests/testflows/rbac/tests/syntax/drop_row_policy.py new file mode 100644 index 00000000000..37831ea9d27 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/drop_row_policy.py @@ -0,0 +1,135 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("drop row policy") +def feature(self, node="clickhouse1"): + """Check drop row policy query syntax. + + ```sql + DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(policy, on=["default.foo"]): + try: + with Given("I have a row policy"): + for i in policy: + for j in on: + node.query(f"CREATE ROW POLICY OR REPLACE {i} ON {j}") + yield + finally: + with Finally("I drop the row policy"): + for i in policy: + for j in on: + node.query(f"DROP ROW POLICY IF EXISTS {i} ON {j}") + + def cleanup_policy(policy, on="default.foo"): + with Given(f"I ensure that policy {policy} does not exist"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}") + + try: + with Given("I have some tables"): + node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") + node.query(f"CREATE TABLE default.foo2 (x UInt64, y String) Engine=Memory") + + with Scenario("I drop row policy with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy1"]): + with When("I drop row policy"): + node.query("DROP ROW POLICY policy1 ON default.foo") + + with Scenario("I drop row policy using short syntax with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy2"]): + with When("I drop row policy short form"): + node.query("DROP POLICY policy2 ON default.foo") + + with Scenario("I drop row policy, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + policy = "policy1" + cleanup_policy(policy) + with When("I drop row policy"): + exitcode, message = errors.row_policy_not_found_in_disk(name=f"{policy} ON default.foo") + node.query(f"DROP ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + del policy + + with Scenario("I drop row policy if exists, policy does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy3"]): + with When("I drop row policy if exists"): + node.query("DROP ROW POLICY IF EXISTS policy3 ON default.foo") + + with Scenario("I drop row policy if exists, policy doesn't exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + cleanup_policy("policy3") + with When("I drop row policy if exists"): + node.query("DROP ROW POLICY IF EXISTS policy3 ON default.foo") + + with Scenario("I drop multiple row policies", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy3", "policy4"]): + with When("I drop multiple row policies"): + node.query("DROP ROW POLICY policy3, policy4 ON default.foo") + + with Scenario("I drop row policy on multiple tables", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy3"], ["default.foo","default.foo2"]): + with When("I drop row policy on multiple tables"): + node.query("DROP ROW POLICY policy3 ON default.foo, default.foo2") + + with Scenario("I drop multiple row policies on multiple tables", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with cleanup(["policy3", "policy4"], ["default.foo","default.foo2"]): + with When("I drop the row policies from the tables"): + node.query("DROP ROW POLICY policy3 ON default.foo, policy4 ON default.foo2") + + with Scenario("I drop row policy on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + try: + with Given("I have a row policy"): + node.query("CREATE ROW POLICY policy13 ON default.foo ON CLUSTER sharded_cluster") + with When("I run drop row policy command"): + node.query("DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo") + finally: + with Finally("I drop the row policy in case it still exists"): + node.query("DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster") + + with Scenario("I drop row policy on cluster after table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + try: + with Given("I have a row policy"): + node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster") + with When("I run drop row policy command"): + node.query("DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the row policy in case it still exists"): + node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + + with Scenario("I drop row policy on fake cluster throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + with When("I run drop row policy command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER fake_cluster", + exitcode=exitcode, message=message) + finally: + with Finally("I drop the tables"): + node.query(f"DROP TABLE IF EXISTS default.foo") + node.query(f"DROP TABLE IF EXISTS default.foo2") diff --git a/tests/testflows/rbac/tests/syntax/drop_settings_profile.py b/tests/testflows/rbac/tests/syntax/drop_settings_profile.py new file mode 100644 index 00000000000..aea5194c9c9 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/drop_settings_profile.py @@ -0,0 +1,93 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("drop settings profile") +def feature(self, node="clickhouse1"): + """Check drop settings profile query syntax. + + ```sql + DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(profile): + try: + with Given("I have a settings profile"): + node.query(f"CREATE SETTINGS PROFILE {profile}") + yield + finally: + with Finally("I drop the settings profile"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + def cleanup_profile(profile): + with Given(f"I ensure that profile {profile} does not exist"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + with Scenario("I drop settings profile with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with cleanup("profile0"): + with When("I drop settings profile"): + node.query("DROP SETTINGS PROFILE profile0") + + with Scenario("I drop settings profile, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + profile = "profile0" + cleanup_profile(profile) + with When("I drop settings profile"): + exitcode, message = errors.settings_profile_not_found_in_disk(name=profile) + node.query("DROP SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + del profile + + with Scenario("I drop settings profile short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with cleanup("profile1"): + with When("I drop settings profile short form"): + node.query("DROP PROFILE profile1") + + with Scenario("I drop settings profile if exists, profile does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")]): + with cleanup("profile2"): + with When("I drop settings profile if exists"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile2") + + with Scenario("I drop settings profile if exists, profile does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")]): + cleanup_profile("profile2") + with When("I drop settings profile if exists"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile2") + + with Scenario("I drop default settings profile, throws error", requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with When("I drop default profile"): + exitcode, message = errors.cannot_remove_settings_profile_default() + node.query("DROP SETTINGS PROFILE default", exitcode=exitcode, message=message) + + with Scenario("I drop multiple settings profiles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with cleanup("profile3"), cleanup("profile4"): + with When("I drop multiple settings profiles"): + node.query("DROP SETTINGS PROFILE profile3, profile4") + + with Scenario("I drop settings profile on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")]): + try: + with Given("I have a settings profile"): + node.query("CREATE SETTINGS PROFILE profile5 ON CLUSTER sharded_cluster") + with When("I run drop settings profile command"): + node.query("DROP SETTINGS PROFILE profile5 ON CLUSTER sharded_cluster") + finally: + with Finally("I drop the profile in case it still exists"): + node.query("DROP SETTINGS PROFILE IF EXISTS profile5 ON CLUSTER sharded_cluster") + + with Scenario("I drop settings profile on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")]): + with When("I run drop settings profile command"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("DROP SETTINGS PROFILE profile6 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/drop_user.py b/tests/testflows/rbac/tests/syntax/drop_user.py new file mode 100644 index 00000000000..c5a2d16d7e1 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/drop_user.py @@ -0,0 +1,98 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("drop user") +def feature(self, node="clickhouse1"): + """Check drop user query syntax. + + ```sql + DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(user): + try: + with Given("I have a user"): + node.query(f"CREATE USER {user}") + yield + finally: + with Finally("I drop the user"): + node.query(f"DROP USER IF EXISTS {user}") + + def cleanup_user(user): + with Given(f"I ensure that user {user} does not exist"): + node.query(f"DROP USER IF EXISTS {user}") + + with Scenario("I drop user with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop("1.0")]): + with setup("user0"): + with When("I drop user"): + node.query("DROP USER user0") + + with Scenario("I drop user, does not exist, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop("1.0")]): + user = "user0" + cleanup_user(user) + with When(f"I drop user {user}"): + exitcode, message = errors.user_not_found_in_disk(name=user) + node.query(f"DROP USER {user}", exitcode=exitcode, message=message) + del user + + with Scenario("I drop multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop("1.0")]): + with setup("user1"), setup("user2"): + with When("I drop multiple users"): + node.query("DROP USER user1, user2") + + with Scenario("I drop user if exists, user does exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with setup("user3"): + with When("I drop user that exists"): + node.query("DROP USER IF EXISTS user3") + + with Scenario("I drop user if exists, user does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + cleanup_user("user3") + with When("I drop nonexistant user"): + node.query("DROP USER IF EXISTS user3") + + with Scenario("I drop default user, throws error", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop("1.0")]): + with When("I drop user"): + exitcode, message = errors.cannot_remove_user_default() + node.query("DROP USER default", exitcode=exitcode, message=message) + + with Scenario("I drop multiple users where one does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with setup("user3"): + with When("I drop multiple users where one does not exist"): + node.query("DROP USER IF EXISTS user3, user4") + + with Scenario("I drop multiple users where both do not exist", requirements=[ + RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with When("I drop the nonexistant users"): + node.query("DROP USER IF EXISTS user5, user6") + + with Scenario("I drop user from specific cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")]): + try: + with Given("I have a user on cluster"): + node.query("CREATE USER user4 ON CLUSTER sharded_cluster") + with When("I drop a user from the cluster"): + node.query("DROP USER user4 ON CLUSTER sharded_cluster") + finally: + with Finally("I make sure the user is dropped"): + node.query("DROP USER IF EXISTS user4 ON CLUSTER sharded_cluster") + + with Scenario("I drop user from fake cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")]): + with When("I drop a user from the fake cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("DROP USER user5 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/feature.py b/tests/testflows/rbac/tests/syntax/feature.py new file mode 100644 index 00000000000..aac786ff85c --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/feature.py @@ -0,0 +1,34 @@ +from testflows.core import * + +@TestFeature +@Name("syntax") +def feature(self): + Feature(run=load("rbac.tests.syntax.create_user", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.alter_user", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.drop_user", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_create_user", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.create_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.alter_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.drop_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_create_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.grant_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.grant_privilege","feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_grants", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.revoke_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.revoke_privilege","feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.create_row_policy", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.alter_row_policy", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.drop_row_policy", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_create_row_policy", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_row_policies", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.create_quota", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.alter_quota", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.drop_quota", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_create_quota", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_quotas", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.create_settings_profile", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.alter_settings_profile", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.drop_settings_profile", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.show_create_settings_profile", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.set_default_role", "feature"), flags=TE) + Feature(run=load("rbac.tests.syntax.set_role","feature"), flags=TE) \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/grant_privilege.py b/tests/testflows/rbac/tests/syntax/grant_privilege.py new file mode 100644 index 00000000000..fdb3224de8e --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/grant_privilege.py @@ -0,0 +1,134 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@contextmanager +def setup(node): + try: + with Given("I have some users and roles"): + node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") + node.query("CREATE USER OR REPLACE user1") + node.query("CREATE ROLE OR REPLACE role1") + yield + finally: + with Finally("I drop the users and roles"): + node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") + node.query("DROP USER IF EXISTS user1") + node.query("DROP ROLE IF EXISTS role1") + + +@TestOutline(Scenario) +@Examples("privilege on allow_introspection", [ + ("dictGet", ("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_DictGet("1.0"))), + ("INTROSPECTION", ("*.*",), True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Introspection("1.0"))), + ("SELECT", ("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Select("1.0"))), + ("INSERT",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Insert("1.0"))), + ("ALTER",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Alter("1.0"))), + ("CREATE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Create("1.0"))), + ("DROP",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Drop("1.0"))), + ("TRUNCATE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Truncate("1.0"))), + ("OPTIMIZE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Optimize("1.0"))), + ("SHOW",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Show("1.0"))), + ("KILL QUERY",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_KillQuery("1.0"))), + ("ACCESS MANAGEMENT",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement("1.0"))), + ("SYSTEM",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_System("1.0"))), + ("SOURCES",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Sources("1.0"))), + ("ALL",("*.*",), True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0"))), + ("ALL PRIVILEGES",("*.*",), True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0"))), #alias for all + ],) +def grant_privileges(self, privilege, on, allow_introspection, node="clickhouse1"): + grant_privilege(privilege=privilege, on=on, allow_introspection=allow_introspection, node=node) + +@TestOutline(Scenario) +@Requirements(RQ_SRS_006_RBAC_Grant_Privilege_GrantOption("1.0")) +def grant_privilege(self, privilege, on, allow_introspection, node="clickhouse1"): + node = self.context.cluster.node(node) + + for on_ in on: + with When(f"I grant {privilege} privilege to user on {on_}"): + with setup(node): + settings = [] + if allow_introspection: + settings.append(("allow_introspection_functions", 1)) + node.query("SET allow_introspection_functions = 1") + with When("I grant privilege without grant option"): + node.query(f"GRANT {privilege} ON {on_} TO user0", settings=settings) + with When("I grant privilege with grant option"): + node.query(f"GRANT {privilege} ON {on_} TO user1 WITH GRANT OPTION", settings=settings) + + #grant column specific for some column 'x' + with When("I grant privilege with columns"): + node.query(f"GRANT {privilege}(x) ON {on_} TO user0", settings=settings) + +@TestFeature +@Name("grant privilege") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check grant privilege syntax. + + ```sql + GRANT [ON CLUSTER cluster_name] + privilege {SELECT | SELECT(columns) | INSERT | ALTER | CREATE | DROP | TRUNCATE | OPTIMIZE | SHOW | KILL QUERY | ACCESS MANAGEMENT | SYSTEM | INTROSPECTION | SOURCES | dictGet | NONE |ALL [PRIVILEGES]} [, ...] + ON {*.* | database.* | database.table | * | table} + TO {user | role | CURRENT_USER} [,...] + [WITH GRANT OPTION] + ``` + """ + node = self.context.cluster.node(node) + + Scenario(run=grant_privileges) + + # with nonexistant object name, GRANT assumes type role + with Scenario("I grant privilege to role that does not exist", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with Given("I ensure that role does not exist"): + node.query("DROP ROLE IF EXISTS role0") + with When("I grant privilege ON CLUSTER"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("GRANT NONE TO role0", exitcode=exitcode, message=message) + + with Scenario("I grant privilege ON CLUSTER", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_OnCluster("1.0"), + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with setup(node): + with When("I grant privilege ON CLUSTER"): + node.query("GRANT ON CLUSTER sharded_cluster NONE TO user0") + + with Scenario("I grant privilege on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_OnCluster("1.0")]): + with setup(node): + with When("I grant privilege ON CLUSTER"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("GRANT ON CLUSTER fake_cluster NONE TO user0", exitcode=exitcode, message=message) + + with Scenario("I grant privilege to multiple users and roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_To("1.0"), + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with setup(node): + with When("I grant privilege to several users"): + node.query("GRANT NONE TO user0, user1, role1") + + with Scenario("I grant privilege to current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser("1.0"), + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with setup(node): + with When("I grant privilege to current user"): + node.query("GRANT NONE TO CURRENT_USER", settings = [("user","user0")]) + + with Scenario("I grant privilege NONE to default user, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser("1.0"), + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with setup(node): + with When("I grant privilege to current user"): + exitcode, message = errors.cannot_update_default() + node.query("GRANT NONE TO CURRENT_USER", exitcode=exitcode, message=message) + + with Scenario("I grant privilege with grant option", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Privilege_GrantOption("1.0"), + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with setup(node): + with When("I grant privilege with grant option"): + node.query("GRANT NONE ON *.* TO user0 WITH GRANT OPTION") diff --git a/tests/testflows/rbac/tests/syntax/grant_role.py b/tests/testflows/rbac/tests/syntax/grant_role.py new file mode 100644 index 00000000000..48e5101e6cf --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/grant_role.py @@ -0,0 +1,115 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("grant role") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check grant query syntax. + + ```sql + GRANT ON CLUSTER [cluster_name] role [,...] TO {user | another_role | CURRENT_USER} [,...] [WITH ADMIN OPTION] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(users=0,roles=0): + try: + with Given("I have some users and roles"): + for i in range(users): + node.query(f"CREATE USER OR REPLACE user{i}") + for j in range(roles): + node.query(f"CREATE ROLE OR REPLACE role{j}") + yield + finally: + with Finally("I drop the users and roles"): + for i in range(users): + node.query(f"DROP USER IF EXISTS user{i}") + for j in range(roles): + node.query(f"DROP ROLE IF EXISTS role{j}") + + with Scenario("I grant a role to a user",flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(1,1): + with When("I grant a role"): + node.query("GRANT role0 TO user0") + + with Scenario("I grant a nonexistent role to user", requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(1,0): + with When("I grant nonexistent role to a user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) + + # with nonexistent object name, GRANT assumes type role (treats user0 as role) + with Scenario("I grant a role to a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(0,1): + with When("I grant role to a nonexistent user"): + exitcode, message = errors.role_not_found_in_disk(name="user0") + node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) + + with Scenario("I grant a nonexistent role to a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(0,0): + with When("I grant nonexistent role to a nonexistent user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) + + with Scenario("I grant a role to multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(2,1): + with When("I grant role to a multiple users"): + node.query("GRANT role0 TO user0, user1") + + with Scenario("I grant multiple roles to multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role("1.0")]): + with setup(2,2): + with When("I grant multiple roles to multiple users"): + node.query("GRANT role0, role1 TO user0, user1") + + with Scenario("I grant role to current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")]): + with setup(1,1): + with Given("I have a user with access management privilege"): + node.query("GRANT ACCESS MANAGEMENT ON *.* TO user0") + with When("I grant role to current user"): + node.query("GRANT role0 TO CURRENT_USER", settings = [("user","user0")]) + + with Scenario("I grant role to default user, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")]): + with setup(1,1): + with When("I grant role to default user"): + exitcode, message = errors.cannot_update_default() + node.query("GRANT role0 TO CURRENT_USER", exitcode=exitcode, message=message) + + with Scenario("I grant role to user with admin option", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role_AdminOption("1.0")]): + with setup(1,1): + with When("I grant role to a user with admin option"): + node.query("GRANT role0 TO user0 WITH ADMIN OPTION") + + with Scenario("I grant role to user on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")]): + try: + with Given("I have a user and a role on a cluster"): + node.query("CREATE USER user0 ON CLUSTER sharded_cluster") + node.query("CREATE ROLE role0 ON CLUSTER sharded_cluster") + with When("I grant the role to the user"): + node.query("GRANT ON CLUSTER sharded_cluster role0 TO user0") + finally: + with Finally("I drop the user and role"): + node.query("DROP USER user0 ON CLUSTER sharded_cluster") + node.query("DROP ROLE role0 ON CLUSTER sharded_cluster") + + with Scenario("I grant role to user on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")]): + with setup(1,1): + with When("I grant the role to the user"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("GRANT ON CLUSTER fake_cluster role0 TO user0", exitcode=exitcode, message=message) \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/revoke_privilege.py b/tests/testflows/rbac/tests/syntax/revoke_privilege.py new file mode 100644 index 00000000000..62130c4bf9a --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/revoke_privilege.py @@ -0,0 +1,159 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@contextmanager +def setup(node): + try: + with Given("I have some users and roles"): + node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") + node.query("CREATE USER OR REPLACE user1") + node.query("CREATE ROLE OR REPLACE role1") + yield + finally: + with Finally("I drop the users and roles"): + node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") + node.query("DROP USER IF EXISTS user1") + node.query("DROP ROLE IF EXISTS role1") + + +@TestOutline(Scenario) +@Examples("privilege on allow_introspection", [ + ("dictGet", ("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_DictGet("1.0"))), + ("INTROSPECTION", ("*.*",), True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Introspection("1.0"))), + ("SELECT", ("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Select("1.0"))), + ("INSERT",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Insert("1.0"))), + ("ALTER",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Alter("1.0"))), + ("CREATE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Create("1.0"))), + ("DROP",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Drop("1.0"))), + ("TRUNCATE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Truncate("1.0"))), + ("OPTIMIZE",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Optimize("1.0"))), + ("SHOW",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Show("1.0"))), + ("KILL QUERY",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery("1.0"))), + ("ACCESS MANAGEMENT",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement("1.0"))), + ("SYSTEM",("db0.table0","db0.*","*.*","tb0","*"), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_System("1.0"))), + ("SOURCES",("*.*",), False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Sources("1.0"))), + ("ALL",("*.*",), True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0"))), + ("ALL PRIVILEGES",("*.*",), True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0"))), #alias for all + ],) +def revoke_privileges(self, privilege, on, allow_introspection, node="clickhouse1"): + revoke_privilege(privilege=privilege, on=on, allow_introspection=allow_introspection, node=node) + +@TestOutline(Scenario) +@Requirements([RQ_SRS_006_RBAC_Revoke_Privilege_Any("1.0") , RQ_SRS_006_RBAC_Revoke_Privilege_PrivelegeColumns("1.0")]) +def revoke_privilege(self, privilege, on, allow_introspection, node="clickhouse1"): + node = self.context.cluster.node(node) + for on_ in on: + with When(f"I revoke {privilege} privilege from user on {on_}"): + with setup(node): + settings = [] + if allow_introspection: + settings.append(("allow_introspection_functions", 1)) + node.query("SET allow_introspection_functions = 1") + with When("I revoke privilege without columns"): + node.query(f"REVOKE {privilege} ON {on_} FROM user0", settings=settings) + + #revoke column specific for some column 'x' + with When("I revoke privilege with columns"): + node.query(f"REVOKE {privilege}(x) ON {on_} FROM user0", settings=settings) + +@TestFeature +@Name("revoke privilege") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check revoke privilege syntax. + + ```sql + REVOKE [ON CLUSTER cluster_name] privilege + [(column_name [,...])] [,...] + ON {db.table|db.*|*.*|table|*} + FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...] + ``` + """ + node = self.context.cluster.node(node) + + Scenario(run=revoke_privileges) + + with Scenario("I revoke privilege ON CLUSTER", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_Cluster("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege ON CLUSTER"): + node.query("REVOKE ON CLUSTER sharded_cluster NONE FROM user0") + + with Scenario("I revoke privilege ON fake CLUSTER, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_Cluster("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege ON CLUSTER"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("REVOKE ON CLUSTER fake_cluster NONE FROM user0", + exitcode=exitcode, message=message) + + with Scenario("I revoke privilege from multiple users and roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege from multiple users"): + node.query("REVOKE NONE FROM user0, user1, role1") + + with Scenario("I revoke privilege from current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege from current user"): + node.query("REVOKE NONE FROM CURRENT_USER", settings = [("user","user0")]) + + with Scenario("I revoke privilege from all users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege from all users"): + exitcode, message = errors.cannot_update_default() + node.query("REVOKE NONE FROM ALL", exitcode=exitcode,message=message) + + with Scenario("I revoke privilege from default user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege from default user"): + exitcode, message = errors.cannot_update_default() + node.query("REVOKE NONE FROM default", exitcode=exitcode,message=message) + + #By default, ClickHouse treats unnamed object as role + with Scenario("I revoke privilege from nonexistent role, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + role = "role5" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I revoke privilege from nonexistent role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"REVOKE NONE FROM {role}", exitcode=exitcode,message=message) + + with Scenario("I revoke privilege from ALL EXCEPT nonexistent role, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + role = "role5" + with Given(f"I ensure that role {role} does not exist"): + node.query(f"DROP ROLE IF EXISTS {role}") + with When(f"I revoke privilege from nonexistent role {role}"): + exitcode, message = errors.role_not_found_in_disk(role) + node.query(f"REVOKE NONE FROM ALL EXCEPT {role}", exitcode=exitcode,message=message) + + with Scenario("I revoke privilege from all except some users and roles", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege all except some users"): + node.query("REVOKE NONE FROM ALL EXCEPT default, user0, role1") + + with Scenario("I revoke privilege from all except current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + with setup(node): + with When("I revoke privilege from all except current user"): + node.query("REVOKE NONE FROM ALL EXCEPT CURRENT_USER") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/revoke_role.py b/tests/testflows/rbac/tests/syntax/revoke_role.py new file mode 100644 index 00000000000..8b150c74705 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/revoke_role.py @@ -0,0 +1,198 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("revoke role") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check revoke query syntax. + + ```sql + REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR] + role [,...] FROM {user | role | CURRENT_USER} [,...] + | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(users=2,roles=2): + try: + with Given("I have some users"): + for i in range(users): + node.query(f"CREATE USER OR REPLACE user{i}") + with And("I have some roles"): + for i in range(roles): + node.query(f"CREATE ROLE OR REPLACE role{i}") + yield + finally: + with Finally("I drop the users"): + for i in range(users): + node.query(f"DROP USER IF EXISTS user{i}") + with And("I drop the roles"): + for i in range(roles): + node.query(f"DROP ROLE IF EXISTS role{i}") + + with Scenario("I revoke a role from a user",flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(): + with When("I revoke a role"): + node.query("REVOKE role0 FROM user0") + + with Scenario("I revoke a nonexistent role from user", requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(1,0): + with When("I revoke nonexistent role from a user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + + # with nonexistent object name, REVOKE assumes type role (treats user0 as role) + with Scenario("I revoke a role from a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(0,1): + with When("I revoke role from a nonexistent user"): + exitcode, message = errors.role_not_found_in_disk(name="user0") + node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + + # with nonexistent object name, REVOKE assumes type role (treats user0 as role) + with Scenario("I revoke a role from ALL EXCEPT nonexistent user", requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(0,1): + with When("I revoke role from a nonexistent user"): + exitcode, message = errors.role_not_found_in_disk(name="user0") + node.query("REVOKE role0 FROM ALL EXCEPT user0", exitcode=exitcode, message=message) + + with Scenario("I revoke a nonexistent role from a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(0,0): + with When("I revoke nonexistent role from a nonexistent user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + + with Scenario("I revoke a role from multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(): + with When("I revoke a role from multiple users"): + node.query("REVOKE role0 FROM user0, user1") + + with Scenario("I revoke multiple roles from multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(): + node.query("REVOKE role0, role1 FROM user0, user1") + + #user is default, expect exception + with Scenario("I revoke a role from default user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke a role from default user"): + exitcode, message = errors.cannot_update_default() + node.query("REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message) + + #user is user0 + with Scenario("I revoke a role from current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke a role from current user"): + node.query("REVOKE role0 FROM CURRENT_USER", settings = [("user","user0")]) + + #user is default, expect exception + with Scenario("I revoke a role from all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke a role from all"): + exitcode, message = errors.cannot_update_default() + node.query("REVOKE role0 FROM ALL", exitcode=exitcode, message=message) + + #user is default, expect exception + with Scenario("I revoke multiple roles from all", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke multiple roles from all"): + exitcode, message = errors.cannot_update_default() + node.query("REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message) + + with Scenario("I revoke a role from all but current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke a role from all except current"): + node.query("REVOKE role0 FROM ALL EXCEPT CURRENT_USER") + + with Scenario("I revoke a role from all but default user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke a role from all except default"): + node.query("REVOKE role0 FROM ALL EXCEPT default", + settings = [("user","user0")]) + + with Scenario("I revoke multiple roles from all but default user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + with setup(): + with When("I revoke multiple roles from all except default"): + node.query("REVOKE role0, role1 FROM ALL EXCEPT default", settings = [("user","user0")]) + + with Scenario("I revoke a role from a role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(): + with When("I revoke a role from a role"): + node.query("REVOKE role0 FROM role1") + + with Scenario("I revoke a role from a role and a user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with setup(): + with When("I revoke a role from multiple roles"): + node.query("REVOKE role0 FROM role1, user0") + + with Scenario("I revoke a role from a user on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + with Given("I have a role and a user on a cluster"): + node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") + node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster") + with When("I revoke a role from user on a cluster"): + node.query("REVOKE ON CLUSTER sharded_cluster role0 FROM user0") + with Finally("I drop the user and role"): + node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") + node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster") + + with Scenario("I revoke a role on fake cluster, throws exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + with When("I revoke a role from user on a cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message) + + with Scenario("I revoke multiple roles from multiple users on cluster", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + with Given("I have multiple roles and multiple users on a cluster"): + for i in range(2): + node.query(f"CREATE USER OR REPLACE user{i} ON CLUSTER sharded_cluster") + node.query(f"CREATE ROLE OR REPLACE role{i} ON CLUSTER sharded_cluster") + with When("I revoke multiple roles from multiple users on cluster"): + node.query("REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1") + with Finally("I drop the roles and users"): + for i in range(2): + node.query(f"DROP USER IF EXISTS user{i} ON CLUSTER sharded_cluster") + node.query(f"DROP ROLE IF EXISTS role{i} ON CLUSTER sharded_cluster") + + with Scenario("I revoke admin option for role from a user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]): + with setup(): + with When("I revoke admin option for role from a user"): + node.query("REVOKE ADMIN OPTION FOR role0 FROM user0") + + with Scenario("I revoke admin option for multiple roles from multiple users", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Revoke_Role("1.0"), + RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]): + with setup(): + with When("I revoke admin option for multiple roles from multiple users"): + node.query("REVOKE ADMIN OPTION FOR role0, role1 FROM user0, user1") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/set_default_role.py b/tests/testflows/rbac/tests/syntax/set_default_role.py new file mode 100644 index 00000000000..ec0d41554da --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/set_default_role.py @@ -0,0 +1,120 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("set default role") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check set default role query syntax. + + ```sql + SET DEFAULT ROLE {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} TO {user|CURRENT_USER} [,...] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(users=2,roles=2): + try: + with Given("I have some users"): + for i in range(users): + node.query(f"CREATE USER OR REPLACE user{i}") + with And("I have some roles"): + for i in range(roles): + node.query(f"CREATE ROLE OR REPLACE role{i}") + yield + finally: + with Finally("I drop the users"): + for i in range(users): + node.query(f"DROP USER IF EXISTS user{i}") + with And("I drop the roles"): + for i in range(roles): + node.query(f"DROP ROLE IF EXISTS role{i}") + + with Scenario("I set default a nonexistent role to user", requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with setup(1,0): + with When("I set default nonexistent role to a user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + + with Scenario("I set default ALL EXCEPT a nonexistent role to user", requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with setup(1,0): + with When("I set default nonexistent role to a user"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("SET DEFAULT ROLE ALL EXCEPT role0 TO user0", exitcode=exitcode, message=message) + + with Scenario("I set default a role to a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with setup(0,1): + with When("I set default role to a nonexistent user"): + exitcode, message = errors.user_not_found_in_disk(name="user0") + node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + + #in SET DEFAULT ROLE, the nonexistent user is noticed first and becomes the thrown exception + with Scenario("I set default a nonexistent role to a nonexistent user", requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with setup(0,0): + with When("I set default nonexistent role to a nonexistent user"): + exitcode, message = errors.user_not_found_in_disk(name="user0") + node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + + try: + with Given("I have some roles and some users"): + for i in range(2): + node.query(f"CREATE ROLE role{i}") + node.query(f"CREATE USER user{i}") + node.query(f"GRANT role0, role1 TO user0, user1") + + with Scenario("I set default role for a user to none", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole_None("1.0")]): + with When("I set no roles default for user"): + node.query("SET DEFAULT ROLE NONE TO user0") + + with Scenario("I set one default role for a user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with When("I set a default role for user "): + node.query("SET DEFAULT ROLE role0 TO user0") + + with Scenario("I set one default role for user default, throws exception", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with When("I set a default role for default"): + exitcode, message = errors.cannot_update_default() + node.query("SET DEFAULT ROLE role0 TO default", exitcode=exitcode, message=message) + + with Scenario("I set multiple default roles for a user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with When("I set multiple default roles to user"): + node.query("SET DEFAULT ROLE role0, role1 TO user0") + + with Scenario("I set multiple default roles for multiple users", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with When("I set multiple default roles to multiple users"): + node.query("SET DEFAULT ROLE role0, role1 TO user0, user1") + + with Scenario("I set all roles as default for a user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole_All("1.0")]): + with When("I set all roles default to user"): + node.query("SET DEFAULT ROLE ALL TO user0") + + with Scenario("I set all roles except one for a user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole_AllExcept("1.0")]): + with When("I set all except one role default to user"): + node.query("SET DEFAULT ROLE ALL EXCEPT role0 TO user0") + + with Scenario("I set default role for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser("1.0")]): + with When("I set default role to current user"): + node.query("GRANT ACCESS MANAGEMENT ON *.* TO user0") + node.query("SET DEFAULT ROLE role0 TO CURRENT_USER", settings = [("user","user0")]) + + finally: + with Finally("I drop the roles and users"): + for i in range(2): + node.query(f"DROP ROLE IF EXISTS role{i}") + node.query(f"DROP USER IF EXISTS user{i}") diff --git a/tests/testflows/rbac/tests/syntax/set_role.py b/tests/testflows/rbac/tests/syntax/set_role.py new file mode 100644 index 00000000000..fdd1c95112d --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/set_role.py @@ -0,0 +1,91 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("set role") +@Args(format_description=False) +def feature(self, node="clickhouse1"): + """Check set role query syntax. + + ``` + SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]} + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(roles=0): + try: + with Given("I have some roles"): + for i in range(roles): + node.query(f"CREATE ROLE role{i}") + yield + finally: + with Finally("I drop the roles"): + for i in range(roles): + node.query(f"DROP ROLE IF EXISTS role{i}") + + with Scenario("I set default role for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_Default("1.0")]): + with When("I set default role for current user"): + node.query("SET ROLE DEFAULT") + + with Scenario("I set no role for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_None("1.0")]): + with When("I set no role for current user"): + node.query("SET ROLE NONE") + + with Scenario("I set nonexistent role, throws exception", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_None("1.0")]): + with Given("I ensure that role role5 does not exist"): + node.query("DROP ROLE IF EXISTS role5") + with When("I set nonexistent role for current user"): + exitcode, message = errors.role_not_found_in_disk("role5") + node.query("SET ROLE role5", exitcode=exitcode, message=message) + + with Scenario("I set nonexistent role, throws exception", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_None("1.0")]): + with Given("I ensure that role role5 does not exist"): + node.query("DROP ROLE IF EXISTS role5") + with When("I set nonexistent role for current user"): + exitcode, message = errors.role_not_found_in_disk("role5") + node.query("SET ROLE ALL EXCEPT role5", exitcode=exitcode, message=message) + + with Scenario("I set one role for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole("1.0")]): + with setup(1): + with Given("I have a user"): + node.query("CREATE USER OR REPLACE user0") + with And("I grant user a role"): + node.query("GRANT role0 TO user0") + with When("I set role for the user"): + node.query("SET ROLE role0", settings = [("user","user0")]) + with Finally("I drop the user"): + node.query("DROP USER user0") + + with Scenario("I set multiple roles for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole("1.0")]): + with setup(2): + with Given("I have a user"): + node.query("CREATE USER OR REPLACE user0") + with And("I grant user a role"): + node.query("GRANT role0, role1 TO user0") + with When("I set roles for the user"): + node.query("SET ROLE role0, role1", settings = [("user","user0")]) + with Finally("I drop the user"): + node.query("DROP USER user0") + + with Scenario("I set all roles for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_All("1.0")]): + with When("I set all roles for current user"): + node.query("SET ROLE ALL") + + with Scenario("I set all roles except one for current user", flags = TE, requirements=[ + RQ_SRS_006_RBAC_SetRole_AllExcept("1.0")]): + with setup(1): + with When("I run set role command"): + node.query("SET ROLE ALL EXCEPT role0") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/show_create_quota.py b/tests/testflows/rbac/tests/syntax/show_create_quota.py new file mode 100644 index 00000000000..0954a24d2db --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_create_quota.py @@ -0,0 +1,44 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show create quota") +def feature(self, node="clickhouse1"): + """Check show create quota query syntax. + + ```sql + SHOW CREATE QUOTA [name | CURRENT] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(quota): + try: + with Given("I have a quota"): + node.query(f"CREATE QUOTA {quota}") + yield + finally: + with Finally("I drop the quota"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + with Scenario("I show create quota", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name("1.0")]): + with cleanup("quota0"): + with When("I run show create quota command"): + node.query("SHOW CREATE QUOTA quota0") + + with Scenario("I show create quota current", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")]): + with cleanup("quota1"): + with When("I run show create quota command"): + node.query("SHOW CREATE QUOTA CURRENT") + + with Scenario("I show create quota current short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")]): + with cleanup("quota2"): + with When("I run show create quota command"): + node.query("SHOW CREATE QUOTA") diff --git a/tests/testflows/rbac/tests/syntax/show_create_role.py b/tests/testflows/rbac/tests/syntax/show_create_role.py new file mode 100644 index 00000000000..d7b77bce944 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_create_role.py @@ -0,0 +1,39 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * +import rbac.tests.errors as errors + +@TestFeature +@Name("show create role") +def feature(self, node="clickhouse1"): + """Check show create role query syntax. + + ```sql + SHOW CREATE ROLE name + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(role): + try: + with Given("I have a role"): + node.query(f"CREATE ROLE OR REPLACE {role}") + yield + finally: + with Finally("I drop the role"): + node.query(f"DROP ROLE IF EXISTS {role}") + + with Scenario("I show create role", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_ShowCreate("1.0")]): + with setup("role0"): + with When("I run show create role command"): + node.query("SHOW CREATE ROLE role0") + + with Scenario("I show create role, role doesn't exist, exception", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Role_ShowCreate("1.0")]): + with When("I run show create role to catch an exception"): + exitcode, message = errors.role_not_found_in_disk(name="role0") + node.query("SHOW CREATE ROLE role0", exitcode=exitcode, message=message) \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/show_create_row_policy.py b/tests/testflows/rbac/tests/syntax/show_create_row_policy.py new file mode 100644 index 00000000000..5d8b104540c --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_create_row_policy.py @@ -0,0 +1,51 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show create row policy") +def feature(self, node="clickhouse1"): + """Check show create row policy query syntax. + + ```sql + SHOW CREATE [ROW] POLICY name ON [database.]table + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(policy, on="default.foo"): + try: + with Given("I have a row policy"): + node.query(f"CREATE ROW POLICY {policy} ON {on}") + yield + finally: + with Finally("I drop the row policy"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}") + + try: + with Given("I have a table"): + node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") + + with Scenario("I show create row policy", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy("1.0")]): + with cleanup("policy0"): + with When("I run show create row policy command"): + node.query("SHOW CREATE ROW POLICY policy0 ON default.foo") + + with Scenario("I show create row policy on a table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")]): + with cleanup("policy0"): + with When("I run show create row policy command"): + node.query("SHOW CREATE ROW POLICY policy0 ON default.foo") + + with Scenario("I show create row policy using short syntax on a table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")]): + with cleanup("policy1",on="foo"): + with When("I run show create row policy command"): + node.query("SHOW CREATE POLICY policy1 ON foo") + finally: + with Finally("I drop the table"): + node.query(f"DROP TABLE IF EXISTS default.foo") diff --git a/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py b/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py new file mode 100644 index 00000000000..6f715463539 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py @@ -0,0 +1,38 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show create settings profile") +def feature(self, node="clickhouse1"): + """Check show create settings profile query syntax. + + ```sql + SHOW CREATE [SETTINGS] PROFILE name + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(profile): + try: + with Given("I have a settings profile"): + node.query(f"CREATE SETTINGS PROFILE {profile}") + yield + finally: + with Finally("I drop the settings profile"): + node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") + + with Scenario("I show create settings profile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")]): + with cleanup("profile0"): + with When("I run show create settings profile command"): + node.query("SHOW CREATE SETTINGS PROFILE profile0") + + with Scenario("I show create settings profile short form", flags=TE, requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")]): + with cleanup("profile1"): + with When("I run show create settings profile command"): + node.query("SHOW CREATE PROFILE profile1") diff --git a/tests/testflows/rbac/tests/syntax/show_create_user.py b/tests/testflows/rbac/tests/syntax/show_create_user.py new file mode 100644 index 00000000000..804b7e06959 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_create_user.py @@ -0,0 +1,37 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show create user") +def feature(self, node="clickhouse1"): + """Check show create user query syntax. + + ``` + SHOW CREATE USER [name | CURRENT_USER] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(user): + try: + with Given("I have a user"): + node.query(f"CREATE USER {user}") + yield + finally: + with Finally("I drop the user"): + node.query(f"DROP USER IF EXISTS {user}") + + with Scenario("I run show create on user with no options", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_ShowCreateUser_For("1.0")]): + with setup("user0"): + with When("I run show create user command"): + node.query("SHOW CREATE USER user0") + + with Scenario("I run show create on current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_User_ShowCreateUser("1.0")]): + with When("I show create the current user"): + node.query("SHOW CREATE USER CURRENT_USER") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/show_grants.py b/tests/testflows/rbac/tests/syntax/show_grants.py new file mode 100644 index 00000000000..f6c797a6d76 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_grants.py @@ -0,0 +1,37 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show grants") +def feature(self, node="clickhouse1"): + """Check show grants query syntax. + + ```sql + SHOW GRANTS [FOR user_or_role] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def setup(user): + try: + with Given("I have a user"): + node.query(f"CREATE USER {user}") + yield + finally: + with Finally("I drop the user"): + node.query(f"DROP USER IF EXISTS {user}") + + with Scenario("I show grants for user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Show_Grants_For("1.0")]): + with setup("user0"): + with When("I run show grants command"): + node.query("SHOW GRANTS FOR user0") + + with Scenario("I show grants for current user", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Show_Grants("1.0")]): + with When("I show grants"): + node.query("SHOW GRANTS") \ No newline at end of file diff --git a/tests/testflows/rbac/tests/syntax/show_quotas.py b/tests/testflows/rbac/tests/syntax/show_quotas.py new file mode 100644 index 00000000000..4003207354d --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_quotas.py @@ -0,0 +1,50 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show quotas") +def feature(self, node="clickhouse1"): + """Check show quotas query syntax. + + ```sql + SHOW QUOTAS + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(quota): + try: + with Given("I have a quota"): + node.query(f"CREATE QUOTA OR REPLACE {quota}") + yield + finally: + with Finally("I drop the quota"): + node.query(f"DROP QUOTA IF EXISTS {quota}") + + with Scenario("I show quotas", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")]): + with cleanup("quota0"), cleanup("quota1"): + with When("I run show quota command"): + node.query("SHOW QUOTAS") + + with Scenario("I show quotas into outfile", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile("1.0")]): + with cleanup("quota0"), cleanup("quota1"): + with When("I run show quota command"): + node.query("SHOW QUOTAS INTO OUTFILE 'quotas.txt'") + + with Scenario("I show quotas with format", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowQuotas_Format("1.0")]): + with cleanup("quota0"), cleanup("quota1"): + with When("I run show quota command"): + node.query("SHOW QUOTAS FORMAT TabSeparated") + + with Scenario("I show quotas with settings", flags=TE, requirements=[ + RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")]): + with cleanup("quota0"), cleanup("quota1"): + with When("I run show quota command"): + node.query("SHOW QUOTAS SETTINGS max_memory_usage=5") diff --git a/tests/testflows/rbac/tests/syntax/show_row_policies.py b/tests/testflows/rbac/tests/syntax/show_row_policies.py new file mode 100644 index 00000000000..2bc1471fbe1 --- /dev/null +++ b/tests/testflows/rbac/tests/syntax/show_row_policies.py @@ -0,0 +1,58 @@ +from contextlib import contextmanager + +from testflows.core import * + +from rbac.requirements import * + +@TestFeature +@Name("show row policies") +def feature(self, node="clickhouse1"): + """Check show row polices query syntax. + + ```sql + SHOW [ROW] POLICIES [ON [database.]table] + ``` + """ + node = self.context.cluster.node(node) + + @contextmanager + def cleanup(policy, on="default.foo"): + try: + with Given("I have a row policy"): + node.query(f"CREATE ROW POLICY {policy} ON {on}") + yield + finally: + with Finally("I drop the row policy"): + node.query(f"DROP ROW POLICY IF EXISTS {policy} ON {on}") + + try: + with Given("I have a table"): + node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") + + with Scenario("I show row policies", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")]): + with cleanup("policy0"): + with When("I run drop row policy command"): + node.query("SHOW ROW POLICIES") + + with Scenario("I show row policies using short syntax", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")]): + with cleanup("policy1"): + with When("I run drop row policy command"): + node.query("SHOW POLICIES") + + with Scenario("I show row policies on a database table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")]): + with cleanup("policy0"): + with When("I run drop row policy command"): + node.query("SHOW ROW POLICIES ON default.foo") + + with Scenario("I show row policies on a table", flags=TE, requirements=[ + RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")]): + with cleanup("policy0"): + with When("I run drop row policy command"): + node.query("SHOW ROW POLICIES ON foo") + + finally: + with Finally("I drop the table"): + node.query(f"DROP TABLE IF EXISTS default.foo") diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index 850c0be2433..a80ae7fc07b 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -2,7 +2,7 @@ import sys from testflows.core import * -append_path(sys.path, "."), +append_path(sys.path, ".") from helpers.argparser import argparser @@ -16,6 +16,7 @@ def regression(self, local, clickhouse_binary_path): Feature(test=load("example.regression", "regression"))(**args) Feature(test=load("ldap.regression", "regression"))(**args) + Feature(test=load("rbac.regression", "regression"))(**args) if main(): regression() diff --git a/website/benchmark/dbms/results/012_omnisci.json b/website/benchmark/dbms/results/012_omnisci.json new file mode 100644 index 00000000000..3dad155f996 --- /dev/null +++ b/website/benchmark/dbms/results/012_omnisci.json @@ -0,0 +1,56 @@ +[ + { + "system": "OmniSci", + "version": "2020-08-27", + "data_size": 100000000, + "time": "", + "comments": "", + "result": + [ + [23.471, 0.043, 0.035], + [17.329, 0.059, 0.059], + [17.31, 0.115, 0.13], + [26.091, 0.089, 0.072], + [21.72, 0.364, 0.345], + [19.315, 0.386, 0.382], + [19.432, 0.131, 0.148], + [20.661, 0.064, 0.089], + [21.472, 1.504, 1.505], + [22.285, 1.655, 1.658], + [22.344, 0.123, 0.118], + [21.695, 0.31, 0.292], + [23.36, 0.631, 0.624], + [68.618, 47.296, 46.866], + [25.984, 2.728, 2.789], + [26.953, 6.158, 6.286], + [33.581, 10.509, 10.047], + [39.575, 8.785, 8.665], + [57.764, 28.199, 27.336], + [18.976, 0.136, 0.136], + [32.445, 0.126, 0.136], + [null, null, null], + [null, null, null], + [96.166, 0.314, 0.305], + [27.494, 0.216, 0.222], + [38.308, 17.256, 17.31], + [115.714, 96.041, 94.383], + [31.779, 2.647, 2.937], + [null, null, null], + [28.854, 5.655, 5.581], + [31.925, 4.142, 4.162], + [44.297, 8.825, 8.711], + [null, null, null], + [29.715, 1.669, 1.586], + [35.201, 7.414, 7.567], + [26.724, 6.026, 5.92], + [31.908, 1.154, 1.168], + [27.997, 0.724, 0.744], + [34.661, 1.2, 1.159], + [30.136, 0.467, 0.451], + [19.991, 0.327, 0.326], + [18.66, 0.266, 0.255], + [25.227, 0.212, 0.2] + ] + } +] +