diff --git a/contrib/snappy-cmake/CMakeLists.txt b/contrib/snappy-cmake/CMakeLists.txt
index 0997ea207e0..50cdc8732a1 100644
--- a/contrib/snappy-cmake/CMakeLists.txt
+++ b/contrib/snappy-cmake/CMakeLists.txt
@@ -1,6 +1,10 @@
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy")
-set (SNAPPY_IS_BIG_ENDIAN 0)
+if (ARCH_S390X)
+ set (SNAPPY_IS_BIG_ENDIAN 1)
+else ()
+ set (SNAPPY_IS_BIG_ENDIAN 0)
+endif()
set (HAVE_BYTESWAP_H 1)
set (HAVE_SYS_MMAN_H 1)
diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh
index c606a8049bd..aa242bfa98d 100644
--- a/docker/test/stress/run.sh
+++ b/docker/test/stress/run.sh
@@ -11,6 +11,18 @@ set -x
# core.COMM.PID-TID
sysctl kernel.core_pattern='core.%e.%p-%P'
+OK="\tOK\t\\N\t"
+FAIL="\tFAIL\t\\N\t"
+function escaped()
+{
+ # That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language.
+ clickhouse local -S 's String' --input-format=LineAsString -q "select * from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'"
+}
+
+function head_escaped()
+{
+ head -50 $1 | escaped
+}
function install_packages()
{
@@ -33,7 +45,9 @@ function configure()
ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag
# avoid too slow startup
- sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|100000|10000|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp
+ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
+ | sed "s|100000|10000|" \
+ > /etc/clickhouse-server/config.d/keeper_port.xml.tmp
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
@@ -136,6 +150,7 @@ function stop()
clickhouse stop --max-tries "$max_tries" --do-not-kill && return
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
+ echo -e "Possible deadlock on shutdown (see gdb.log)$FAIL" >> /test_output/test_results.tsv
kill -TERM "$(pidof gdb)" ||:
sleep 5
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
@@ -151,10 +166,11 @@ function start()
if [ "$counter" -gt ${1:-120} ]
then
echo "Cannot start clickhouse-server"
- echo -e "Cannot start clickhouse-server\tFAIL" >> /test_output/test_results.tsv
+ rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
+ echo -e "Cannot start clickhouse-server$FAIL$(head_escaped /test_output/application_errors.txt)" >> /test_output/test_results.tsv
cat /var/log/clickhouse-server/stdout.log
- tail -n1000 /var/log/clickhouse-server/stderr.log
- tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e ' RaftInstance:' -e ' RaftInstance' | tail -n1000
+ tail -n100 /var/log/clickhouse-server/stderr.log
+ tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e ' RaftInstance:' -e ' RaftInstance' | tail -n100
break
fi
# use root to match with current uid
@@ -252,9 +268,92 @@ start
clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "SHOW TABLES FROM test"
-clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
-clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
-clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
+clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
+ EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
+ UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String,
+ Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
+ RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8,
+ FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2),
+ CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String,
+ IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8,
+ WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8,
+ SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32,
+ IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8,
+ IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8,
+ Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32,
+ RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2),
+ BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32,
+ DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32,
+ RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
+ LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
+ RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
+ ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
+ OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
+ UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
+ URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
+ ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
+ IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
+ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
+clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
+ EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
+ UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String,
+ RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16),
+ URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8,
+ FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16,
+ UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8,
+ MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16,
+ SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16,
+ ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32,
+ SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8,
+ FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8,
+ HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8,
+ GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32,
+ HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String,
+ HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32,
+ FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
+ LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
+ RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
+ ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
+ OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
+ UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
+ URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
+ ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
+ IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
+ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
+clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8,
+ VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32,
+ Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String,
+ EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String,
+ AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
+ RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32,
+ SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32,
+ ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32,
+ SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16,
+ UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16,
+ FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8,
+ FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8,
+ Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8,
+ BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16),
+ Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32),
+ WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64,
+ ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32,
+ ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32,
+ ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32,
+ ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16,
+ ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32,
+ OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String,
+ UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime,
+ PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8,
+ PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16),
+ CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64,
+ StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64,
+ OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64,
+ UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32,
+ ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
+ Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32,
+ DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16))
+ ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
+ SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
@@ -275,7 +374,9 @@ export ZOOKEEPER_FAULT_INJECTION=1
configure
# But we still need default disk because some tables loaded only into it
-sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml | sed "s|s3|s3default|" > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
+sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
+ | sed "s|s3|s3default|" \
+ > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
@@ -283,8 +384,12 @@ sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_defau
start
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
- && echo -e 'Test script exit code\tOK' >> /test_output/test_results.tsv \
- || echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv
+ && echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \
+ || echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv
+
+# NOTE Hung check is implemented in docker/tests/stress/stress
+rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \
+ || echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log)"
stop
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
@@ -295,9 +400,10 @@ unset "${!THREAD_@}"
start
-clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \
- || (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
- && rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
+clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
+ || (rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
+ && echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(head_escaped /test_output/application_errors.txt)" \
+ >> /test_output/test_results.tsv)
stop
@@ -310,49 +416,49 @@ stop
rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
- && echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv
+ && echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
+ || echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
rm -f /test_output/tmp
# OOM
rg -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
- && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
+ && echo -e "Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \
+ || echo -e "No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Logical errors
-rg -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
- && echo -e 'Logical error thrown (see clickhouse-server.log or logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv
+rg -Fa "Code: 49. DB::Exception: " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/logical_errors.txt \
+ && echo -e "Logical error thrown (see clickhouse-server.log or logical_errors.txt)$FAIL$(head_escaped /test_output/logical_errors.txt)" >> /test_output/test_results.tsv \
+ || echo -e "No logical errors$OK" >> /test_output/test_results.tsv
# Remove file logical_errors.txt if it's empty
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
# No such key errors
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
- && echo -e 'S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No lost s3 keys\tOK' >> /test_output/test_results.tsv
+ && echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(head_escaped /test_output/no_such_key_errors.txt)" >> /test_output/test_results.tsv \
+ || echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
# Remove file no_such_key_errors.txt if it's empty
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
# Crash
rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
- && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv
+ && echo -e "Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \
+ || echo -e "Not crashed$OK" >> /test_output/test_results.tsv
# It also checks for crash without stacktrace (printed by watchdog)
rg -Fa " " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
- && echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
+ && echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(head_escaped /test_output/fatal_messages.txt)" >> /test_output/test_results.tsv \
+ || echo -e "No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Remove file fatal_messages.txt if it's empty
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
rg -Fa "########################################" /test_output/* > /dev/null \
- && echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv
+ && echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
rg -Fa " received signal " /test_output/gdb.log > /dev/null \
- && echo -e 'Found signal in gdb.log\tFAIL' >> /test_output/test_results.tsv
+ && echo -e "Found signal in gdb.log$FAIL$(rg -A50 -Fa " received signal " /test_output/gdb.log | escaped)" >> /test_output/test_results.tsv
if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
echo -e "Backward compatibility check\n"
@@ -367,8 +473,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
echo "Download clickhouse-server from the previous release"
mkdir previous_release_package_folder
- echo $previous_release_tag | download_release_packages && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \
- || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv
+ echo $previous_release_tag | download_release_packages && echo -e "Download script exit code$OK" >> /test_output/test_results.tsv \
+ || echo -e "Download script failed$FAIL" >> /test_output/test_results.tsv
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.clean.log
for table in query_log trace_log
@@ -381,13 +487,13 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
# Check if we cloned previous release repository successfully
if ! [ "$(ls -A previous_release_repository/tests/queries)" ]
then
- echo -e "Backward compatibility check: Failed to clone previous release tests\tFAIL" >> /test_output/test_results.tsv
+ echo -e "Backward compatibility check: Failed to clone previous release tests$FAIL" >> /test_output/test_results.tsv
elif ! [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ]
then
- echo -e "Backward compatibility check: Failed to download previous release packages\tFAIL" >> /test_output/test_results.tsv
+ echo -e "Backward compatibility check: Failed to download previous release packages$FAIL" >> /test_output/test_results.tsv
else
- echo -e "Successfully cloned previous release tests\tOK" >> /test_output/test_results.tsv
- echo -e "Successfully downloaded previous release packages\tOK" >> /test_output/test_results.tsv
+ echo -e "Successfully cloned previous release tests$OK" >> /test_output/test_results.tsv
+ echo -e "Successfully downloaded previous release packages$OK" >> /test_output/test_results.tsv
# Uninstall current packages
dpkg --remove clickhouse-client
@@ -446,9 +552,10 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
mkdir tmp_stress_output
- ./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
- && echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv
+ ./stress --test-cmd="/usr/bin/clickhouse-test --queries=\"previous_release_repository/tests/queries\"" \
+ --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \
+ && echo -e "Backward compatibility check: Test script exit code$OK" >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: Test script failed$FAIL" >> /test_output/test_results.tsv
rm -rf tmp_stress_output
# We experienced deadlocks in this command in very rare cases. Let's debug it:
@@ -470,9 +577,9 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
export ZOOKEEPER_FAULT_INJECTION=0
configure
start 500
- clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
- || (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
- && rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
+ clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
+ || (rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt \
+ && echo -e "Backward compatibility check: Server failed to start$FAIL$(head_escaped /test_output/bc_check_application_errors.txt)" >> /test_output/test_results.tsv)
clickhouse-client --query="SELECT 'Server version: ', version()"
@@ -488,8 +595,6 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
# FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server.
# Let's just ignore all errors from queries ("} TCPHandler: Code:", "} executeQuery: Code:")
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
- # NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
- # ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
echo "Check for Error messages in server log:"
rg -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
@@ -519,7 +624,6 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
-e "} TCPHandler: Code:" \
-e "} executeQuery: Code:" \
-e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \
- -e "This engine is deprecated and is not supported in transactions" \
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
-e "The set of parts restored in place of" \
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
@@ -530,8 +634,9 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
-e "Session expired" \
-e "TOO_MANY_PARTS" \
/var/log/clickhouse-server/clickhouse-server.backward.dirty.log | rg -Fa "" > /test_output/bc_check_error_messages.txt \
- && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
+ && echo -e "Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)$FAIL$(head_escaped /test_output/bc_check_error_messages.txt)" \
+ >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: No Error messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Remove file bc_check_error_messages.txt if it's empty
[ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt
@@ -540,34 +645,36 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
rg -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
- && echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv
+ && echo -e "Backward compatibility check: Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: No sanitizer asserts$OK" >> /test_output/test_results.tsv
rm -f /test_output/tmp
# OOM
rg -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
- && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
+ && echo -e "Backward compatibility check: Signal 9 in clickhouse-server.log$FAIL" >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: No OOM messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Logical errors
echo "Check for Logical errors in server log:"
- rg -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
- && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv
+ rg -Fa -A20 "Code: 49. DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
+ && echo -e "Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)$FAIL$(head_escaped /test_output/bc_check_logical_errors.txt)" \
+ >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: No logical errors$OK" >> /test_output/test_results.tsv
# Remove file bc_check_logical_errors.txt if it's empty
[ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt
# Crash
rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /dev/null \
- && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv
+ && echo -e "Backward compatibility check: Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: Not crashed$OK" >> /test_output/test_results.tsv
# It also checks for crash without stacktrace (printed by watchdog)
echo "Check for Fatal message in server log:"
rg -Fa " " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
- && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
+ && echo -e "Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)$FAIL$(head_escaped /test_output/bc_check_fatal_messages.txt)" \
+ >> /test_output/test_results.tsv \
+ || echo -e "Backward compatibility check: No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Remove file bc_check_fatal_messages.txt if it's empty
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
@@ -575,7 +682,8 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
tar -chf /test_output/coordination.backward.tar /var/lib/clickhouse/coordination ||:
for table in query_log trace_log
do
- clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.backward.tsv.zst ||:
+ clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "select * from system.$table format TSVWithNamesAndTypes" \
+ | zstd --threads=0 > /test_output/$table.backward.tsv.zst ||:
done
fi
fi
@@ -584,13 +692,28 @@ dmesg -T > /test_output/dmesg.log
# OOM in dmesg -- those are real
grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \
- && echo -e 'OOM in dmesg\tFAIL' >> /test_output/test_results.tsv \
- || echo -e 'No OOM in dmesg\tOK' >> /test_output/test_results.tsv
+ && echo -e "OOM in dmesg$FAIL$(head_escaped /test_output/dmesg.log)" >> /test_output/test_results.tsv \
+ || echo -e "No OOM in dmesg$OK" >> /test_output/test_results.tsv
mv /var/log/clickhouse-server/stderr.log /test_output/
# Write check result into check_status.tsv
-clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%'), rowNumberInAllBlocks() LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
+# Try to choose most specific error for the whole check status
+clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by
+(test like 'Backward compatibility check%'), -- BC check goes last
+(test like '%Sanitizer%') DESC,
+(test like '%Killed by signal%') DESC,
+(test like '%gdb.log%') DESC,
+(test ilike '%possible deadlock%') DESC,
+(test like '%start%') DESC,
+(test like '%dmesg%') DESC,
+(test like '%OOM%') DESC,
+(test like '%Signal 9%') DESC,
+(test like '%Fatal message%') DESC,
+(test like '%Error message%') DESC,
+(test like '%previous release%') DESC,
+rowNumberInAllBlocks()
+LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
[ -s /test_output/check_status.tsv ] || echo -e "success\tNo errors found" > /test_output/check_status.tsv
# Core dumps
diff --git a/docker/test/stress/stress b/docker/test/stress/stress
index 3fce357cc19..86605b5ce0c 100755
--- a/docker/test/stress/stress
+++ b/docker/test/stress/stress
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import cpu_count
-from subprocess import Popen, call, check_output, STDOUT
+from subprocess import Popen, call, check_output, STDOUT, PIPE
import os
import argparse
import logging
@@ -299,14 +299,19 @@ if __name__ == "__main__":
"00001_select_1",
]
)
- res = call(cmd, shell=True, stderr=STDOUT)
- hung_check_status = "No queries hung\tOK\n"
+ hung_check_log = os.path.join(args.output_folder, "hung_check.log")
+ tee = Popen(['/usr/bin/tee', hung_check_log], stdin=PIPE)
+ res = call(cmd, shell=True, stdout=tee.stdin, stderr=STDOUT)
+ tee.stdin.close()
if res != 0 and have_long_running_queries:
logging.info("Hung check failed with exit code {}".format(res))
- hung_check_status = "Hung check failed\tFAIL\n"
- with open(
- os.path.join(args.output_folder, "test_results.tsv"), "w+"
- ) as results:
- results.write(hung_check_status)
+ else:
+ hung_check_status = "No queries hung\tOK\t\\N\t\n"
+ with open(
+ os.path.join(args.output_folder, "test_results.tsv"), "w+"
+ ) as results:
+ results.write(hung_check_status)
+ os.remove(hung_check_log)
+
logging.info("Stress test finished")
diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md
new file mode 100644
index 00000000000..1a486de7904
--- /dev/null
+++ b/docs/en/operations/query-cache.md
@@ -0,0 +1,112 @@
+---
+slug: /en/operations/query-cache
+sidebar_position: 65
+sidebar_label: Query Cache [experimental]
+---
+
+# Query Cache [experimental]
+
+The query cache allows to compute `SELECT` queries just once and to serve further executions of the same query directly from the cache.
+Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
+
+## Background, Design and Limitations
+
+Query caches can generally be viewed as transactionally consistent or inconsistent.
+
+- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the `SELECT` query changes
+ or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing
+ merges. Transactionally consistent caching is especially suitable for OLTP databases, for example
+ [MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query cache after v8.0) and
+ [Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm).
+- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are
+ assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period.
+ This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient,
+ consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically
+ slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be
+ served directly from the query cache. In this example, a reasonable validity period could be 30 min.
+
+Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
+the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
+This reduces maintenance effort and avoids redundancy.
+
+:::warning
+The query cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query
+processing) where wrong results are returned.
+:::
+
+## Configuration Settings and Usage
+
+As long as the result cache is experimental it must be activated using the following configuration setting:
+
+```sql
+SET allow_experimental_query_cache = true;
+```
+
+Afterwards, setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries
+of the current session should utilize the query cache. For example, the first execution of query
+
+```sql
+SELECT some_expensive_calculation(column_1, column_2)
+FROM table
+SETTINGS use_query_cache = true;
+```
+
+will store the query result in the query cache. Subsequent executions of the same query (also with parameter `use_query_cache = true`) will
+read the computed result from the cache and return it immediately.
+
+The way the cache is utilized can be configured in more detail using settings [enable_writes_to_query_cache](settings/settings.md#enable-writes-to-query-cache)
+and [enable_reads_from_query_cache](settings/settings.md#enable-reads-from-query-cache) (both `true` by default). The former setting
+controls whether query results are stored in the cache, whereas the latter setting determines if the database should try to retrieve query
+results from the cache. For example, the following query will use the cache only passively, i.e. attempt to read from it but not store its
+result in it:
+
+```sql
+SELECT some_expensive_calculation(column_1, column_2)
+FROM table
+SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;
+```
+
+For maximum control, it is generally recommended to provide settings "use_query_cache", "enable_writes_to_query_cache" and
+"enable_reads_from_query_cache" only with specific queries. It is also possible to enable caching at user or profile level (e.g. via `SET
+use_query_cache = true`) but one should keep in mind that all `SELECT` queries including monitoring or debugging queries to system tables
+may return cached results then.
+
+The query cache can be cleared using statement `SYSTEM DROP QUERY CACHE`. The content of the query cache is displayed in system table
+`system.query_cache`. The number of query cache hits and misses are shown as events "QueryCacheHits" and "QueryCacheMisses" in system table
+`system.events`. Both counters are only updated for `SELECT` queries which run with setting "use_query_cache = true". Other queries do not
+affect the cache miss counter.
+
+The query cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can be
+changed (see below) but doing so is not recommended for security reasons.
+
+Query results are referenced in the query cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree) of
+their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query. To
+make the matching more natural, all query-level settings related to the query cache are removed from the AST.
+
+If the query was aborted due to an exception or user cancellation, no entry is written into the query cache.
+
+The size of the query cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can
+be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-cache).
+
+To define how long a query must run at least such that its result can be cached, you can use setting
+[query_cache_min_query_duration](settings/settings.md#query-cache-min-query-duration). For example, the result of query
+
+``` sql
+SELECT some_expensive_calculation(column_1, column_2)
+FROM table
+SETTINGS use_query_cache = true, query_cache_min_query_duration = 5000;
+```
+
+is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is
+cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs).
+
+Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different
+value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl).
+
+Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using
+setting [query_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-cache-store-results-of-queries-with-nondeterministic-functions).
+
+Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
+row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
+be marked accessible by other users (i.e. shared) by supplying setting
+[query_cache_share_between_users](settings/settings.md#query-cache-share-between-users).
diff --git a/docs/en/operations/query-result-cache.md b/docs/en/operations/query-result-cache.md
index 496092ab3e4..046b75ac5c5 100644
--- a/docs/en/operations/query-result-cache.md
+++ b/docs/en/operations/query-result-cache.md
@@ -72,8 +72,8 @@ For maximum control, it is generally recommended to provide settings "use_query_
system tables may return cached results then.
The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed
-in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and
-"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for `SELECT` queries which run with setting
+in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryCacheHits" and
+"QueryCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for `SELECT` queries which run with setting
"use_query_result_cache = true". Other queries do not affect the cache miss counter.
The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index 9a67edd75ca..761d27a889f 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -1270,30 +1270,30 @@ If the table does not exist, ClickHouse will create it. If the structure of the
```
-## query_result_cache {#server_configuration_parameters_query-result-cache}
+## query_cache {#server_configuration_parameters_query-cache}
-[Query result cache](../query-result-cache.md) configuration.
+[Query cache](../query-cache.md) configuration.
The following settings are available:
-- `size`: The maximum cache size in bytes. 0 means the query result cache is disabled. Default value: `1073741824` (1 GiB).
-- `max_entries`: The maximum number of SELECT query results stored in the cache. Default value: `1024`.
-- `max_entry_size`: The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
-- `max_entry_records`: The maximum number of records SELECT query results may have to be saved in the cache. Default value: `30000000` (30 mil).
+- `size`: The maximum cache size in bytes. 0 means the query cache is disabled. Default value: `1073741824` (1 GiB).
+- `max_entries`: The maximum number of `SELECT` query results stored in the cache. Default value: `1024`.
+- `max_entry_size`: The maximum size in bytes `SELECT` query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
+- `max_entry_records`: The maximum number of records `SELECT` query results may have to be saved in the cache. Default value: `30000000` (30 mil).
:::warning
-Data for the query result cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query result cache altogether.
+Data for the query cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query cache altogether.
:::
**Example**
```xml
-
+
1073741824
1024
1048576
30000000
-
+
```
## query_thread_log {#server_configuration_parameters-query_thread_log}
diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md
index b383e0598a1..5bc174727ad 100644
--- a/docs/en/operations/settings/merge-tree-settings.md
+++ b/docs/en/operations/settings/merge-tree-settings.md
@@ -233,7 +233,7 @@ Possible values:
Default value: 100.
-Normally, the `use_async_block_ids_cache` updates as soon as there are updates in the watching keeper path. However, the cache updates might be too frequent and become a heavy burden. This minimum interval prevents the cache from updating too fast. Note that if we set this value too long, the block with duplicated inserts will have a longer retry time.
+Normally, the `use_async_block_ids_cache` updates as soon as there are updates in the watching keeper path. However, the cache updates might be too frequent and become a heavy burden. This minimum interval prevents the cache from updating too fast. Note that if we set this value too long, the block with duplicated inserts will have a longer retry time.
## max_replicated_logs_to_keep
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index 1407971c4f2..81c0531427f 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -1301,10 +1301,10 @@ Possible values:
Default value: `3`.
-## use_query_result_cache {#use-query-result-cache}
+## use_query_cache {#use-query-cache}
-If turned on, SELECT queries may utilize the [query result cache](../query-result-cache.md). Parameters [enable_reads_from_query_result_cache](#enable-reads-from-query-result-cache)
-and [enable_writes_to_query_result_cache](#enable-writes-to-query-result-cache) control in more detail how the cache is used.
+If turned on, `SELECT` queries may utilize the [query cache](../query-cache.md). Parameters [enable_reads_from_query_cache](#enable-readsfrom-query-cache)
+and [enable_writes_to_query_cache](#enable-writes-to-query-cache) control in more detail how the cache is used.
Possible values:
@@ -1313,9 +1313,9 @@ Possible values:
Default value: `0`.
-## enable_reads_from_query_result_cache {#enable-reads-from-query-result-cache}
+## enable_reads_from_query_cache {#enable-reads-from-query-cache}
-If turned on, results of SELECT queries are retrieved from the [query result cache](../query-result-cache.md).
+If turned on, results of `SELECT` queries are retrieved from the [query cache](../query-cache.md).
Possible values:
@@ -1324,9 +1324,9 @@ Possible values:
Default value: `1`.
-## enable_writes_to_query_result_cache {#enable-writes-to-query-result-cache}
+## enable_writes_to_query_cache {#enable-writes-to-query-cache}
-If turned on, results of SELECT queries are stored in the [query result cache](../query-result-cache.md).
+If turned on, results of `SELECT` queries are stored in the [query cache](../query-cache.md).
Possible values:
@@ -1335,9 +1335,9 @@ Possible values:
Default value: `1`.
-## query_result_cache_store_results_of_queries_with_nondeterministic_functions {#query-result-cache-store-results-of-queries-with-nondeterministic-functions}
+## query_cache_store_results_of_queries_with_nondeterministic_functions {#query--store-results-of-queries-with-nondeterministic-functions}
-If turned on, then results of SELECT queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query result cache](../query-result-cache.md).
+If turned on, then results of `SELECT` queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query cache](../query-cache.md).
Possible values:
@@ -1346,9 +1346,9 @@ Possible values:
Default value: `0`.
-## query_result_cache_min_query_runs {#query-result-cache-min-query-runs}
+## query_cache_min_query_runs {#query-cache-min-query-runs}
-Minimum number of times a SELECT query must run before its result is stored in the [query result cache](../query-result-cache.md).
+Minimum number of times a `SELECT` query must run before its result is stored in the [query cache](../query-cache.md).
Possible values:
@@ -1356,9 +1356,9 @@ Possible values:
Default value: `0`
-## query_result_cache_min_query_duration {#query-result-cache-min-query-duration}
+## query_cache_min_query_duration {#query-cache-min-query-duration}
-Minimum duration in milliseconds a query needs to run for its result to be stored in the [query result cache](../query-result-cache.md).
+Minimum duration in milliseconds a query needs to run for its result to be stored in the [query cache](../query-cache.md).
Possible values:
@@ -1366,9 +1366,9 @@ Possible values:
Default value: `0`
-## query_result_cache_ttl {#query-result-cache-ttl}
+## query_cache_ttl {#query-cache-ttl}
-After this time in seconds entries in the [query result cache](../query-result-cache.md) become stale.
+After this time in seconds entries in the [query cache](../query-cache.md) become stale.
Possible values:
@@ -1376,9 +1376,9 @@ Possible values:
Default value: `60`
-## query_result_cache_share_between_users {#query-result-cache-share-between-users}
+## query_cache_share_between_users {#query-cache-share-between-users}
-If turned on, the result of SELECT queries cached in the [query result cache](../query-result-cache.md) can be read by other users.
+If turned on, the result of `SELECT` queries cached in the [query cache](../query-cache.md) can be read by other users.
It is not recommended to enable this setting due to security reasons.
Possible values:
@@ -3689,6 +3689,30 @@ Default value: `0`.
- [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting
+## optimize_using_constraints
+
+Use [constraints](../../sql-reference/statements/create/table#constraints) for query optimization. The default is `false`.
+
+Possible values:
+
+- true, false
+
+## optimize_append_index
+
+Use [constraints](../../sql-reference/statements/create/table#constraints) in order to append index condition. The default is `false`.
+
+Possible values:
+
+- true, false
+
+## optimize_substitute_columns
+
+Use [constraints](../../sql-reference/statements/create/table#constraints) for column substitution. The default is `false`.
+
+Possible values:
+
+- true, false
+
## describe_include_subcolumns {#describe_include_subcolumns}
Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md/#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md/#finding-null) or an [Array](../../sql-reference/data-types/array.md/#array-size) data type.
diff --git a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
index adea1ac0282..4dc6fd33849 100644
--- a/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
+++ b/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
@@ -5,7 +5,7 @@ sidebar_label: Storing Dictionaries in Memory
---
import CloudDetails from '@site/docs/en/sql-reference/dictionaries/external-dictionaries/_snippet_dictionary_in_cloud.md';
-# Storing Dictionaries in Memory
+# Storing Dictionaries in Memory
There are a variety of ways to store dictionaries in memory.
@@ -25,7 +25,7 @@ ClickHouse generates an exception for errors with dictionaries. Examples of erro
You can view the list of dictionaries and their statuses in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
-
+
The configuration looks like this:
@@ -299,11 +299,11 @@ Example: The table contains discounts for each advertiser in the format:
To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others).
-:::warning
+:::warning
Values of `range_min` and `range_max` should fit in `Int64` type.
:::
-Example:
+Example:
``` xml
@@ -459,7 +459,7 @@ select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res;
│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null
└─────┘
-select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
+select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
┌─res─┐
│ 0.2 │ -- two ranges are matching, range_min 2015-01-15 (0.2) is bigger than 2015-01-01 (0.1)
└─────┘
@@ -496,7 +496,7 @@ select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res;
│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null
└─────┘
-select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
+select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
┌─res─┐
│ 0.1 │ -- two ranges are matching, range_min 2015-01-01 (0.1) is less than 2015-01-15 (0.2)
└─────┘
@@ -588,7 +588,7 @@ Set a large enough cache size. You need to experiment to select the number of ce
3. Assess memory consumption using the `system.dictionaries` table.
4. Increase or decrease the number of cells until the required memory consumption is reached.
-:::warning
+:::warning
Do not use ClickHouse as a source, because it is slow to process queries with random reads.
:::
@@ -660,25 +660,30 @@ This type of storage is for use with composite [keys](../../../sql-reference/dic
This type of storage is for mapping network prefixes (IP addresses) to metadata such as ASN.
-Example: The table contains network prefixes and their corresponding AS number and country code:
+**Example**
-``` text
- +-----------|-----|------+
- | prefix | asn | cca2 |
- +=================+=======+========+
- | 202.79.32.0/20 | 17501 | NP |
- +-----------|-----|------+
- | 2620:0:870::/48 | 3856 | US |
- +-----------|-----|------+
- | 2a02:6b8:1::/48 | 13238 | RU |
- +-----------|-----|------+
- | 2001:db8::/32 | 65536 | ZZ |
- +-----------|-----|------+
+Suppose we have a table in ClickHouse that contains our IP prefixes and mappings:
+
+```sql
+CREATE TABLE my_ip_addresses (
+ prefix String,
+ asn UInt32,
+ cca2 String
+)
+ENGINE = MergeTree
+PRIMARY KEY prefix;
```
-When using this type of layout, the structure must have a composite key.
+```sql
+INSERT INTO my_ip_addresses VALUES
+ ('202.79.32.0/20', 17501, 'NP'),
+ ('2620:0:870::/48', 3856, 'US'),
+ ('2a02:6b8:1::/48', 13238, 'RU'),
+ ('2001:db8::/32', 65536, 'ZZ')
+;
+```
-Example:
+Let's define an `ip_trie` dictionary for this table. The `ip_trie` layout requires a composite key:
``` xml
@@ -712,26 +717,29 @@ Example:
or
``` sql
-CREATE DICTIONARY somedict (
+CREATE DICTIONARY my_ip_trie_dictionary (
prefix String,
asn UInt32,
cca2 String DEFAULT '??'
)
PRIMARY KEY prefix
+SOURCE(CLICKHOUSE(TABLE 'my_ip_addresses'))
+LAYOUT(IP_TRIE)
+LIFETIME(3600);
```
-The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet.
+The key must have only one `String` type attribute that contains an allowed IP prefix. Other types are not supported yet.
-For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys:
+For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys. The syntax is:
``` sql
dictGetT('dict_name', 'attr_name', tuple(ip))
```
-The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6:
+The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6. For example:
``` sql
-dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1')))
+select dictGet('my_ip_trie_dictionary', 'asn', tuple(IPv6StringToNum('2001:db8::1')))
```
Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned.
diff --git a/docs/en/sql-reference/functions/geo/s2.md b/docs/en/sql-reference/functions/geo/s2.md
index ed3c66a0f6f..3cd66cfaaeb 100644
--- a/docs/en/sql-reference/functions/geo/s2.md
+++ b/docs/en/sql-reference/functions/geo/s2.md
@@ -304,7 +304,7 @@ Result:
└──────────────┘
```
-## s2RectUinion
+## s2RectUnion
Returns the smallest rectangle containing the union of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md
index 0dc6cc0d09a..0acb6637ea6 100644
--- a/docs/en/sql-reference/statements/delete.md
+++ b/docs/en/sql-reference/statements/delete.md
@@ -7,7 +7,7 @@ sidebar_label: DELETE
# DELETE Statement
``` sql
-DELETE FROM [db.]table [WHERE expr]
+DELETE FROM [db.]table [ON CLUSTER cluster] [WHERE expr]
```
`DELETE FROM` removes rows from table `[db.]table` that match expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in background. This feature is only available for MergeTree table engine family.
diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md
index 7ac0ce84e5b..300205a7ef4 100644
--- a/docs/en/sql-reference/statements/system.md
+++ b/docs/en/sql-reference/statements/system.md
@@ -103,9 +103,9 @@ Its size can be configured using the server-level setting [uncompressed_cache_si
Reset the compiled expression cache.
The compiled expression cache is enabled/disabled with the query/user/profile-level setting [compile_expressions](../../operations/settings/settings.md#compile-expressions).
-## DROP QUERY RESULT CACHE
+## DROP QUERY CACHE
-Resets the [query result cache](../../operations/query-result-cache.md).
+Resets the [query cache](../../operations/query-cache.md).
## FLUSH LOGS
diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp
index 9ac054392dc..d170b43aab3 100644
--- a/programs/copier/ClusterCopier.cpp
+++ b/programs/copier/ClusterCopier.cpp
@@ -6,7 +6,6 @@
#include
#include
#include
-#include
#include
#include
#include
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index 8b6c43b450e..7b1ab1b8180 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -1517,13 +1517,13 @@ try
global_context->setMMappedFileCache(mmap_cache_size);
/// A cache for query results.
- size_t query_result_cache_size = config().getUInt64("query_result_cache.size", 1_GiB);
- if (query_result_cache_size)
- global_context->setQueryResultCache(
- query_result_cache_size,
- config().getUInt64("query_result_cache.max_entries", 1024),
- config().getUInt64("query_result_cache.max_entry_size", 1_MiB),
- config().getUInt64("query_result_cache.max_entry_records", 30'000'000));
+ size_t query_cache_size = config().getUInt64("query_cache.size", 1_GiB);
+ if (query_cache_size)
+ global_context->setQueryCache(
+ query_cache_size,
+ config().getUInt64("query_cache.max_entries", 1024),
+ config().getUInt64("query_cache.max_entry_size", 1_MiB),
+ config().getUInt64("query_cache.max_entry_records", 30'000'000));
#if USE_EMBEDDED_COMPILER
/// 128 MB
diff --git a/programs/server/config.xml b/programs/server/config.xml
index 0ed8ec48e83..bd46263f851 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -1466,13 +1466,13 @@
-->
-
-
+
+
-
+
don't replace it
}
-void QueryResultCache::Writer::buffer(Chunk && partial_query_result)
+void QueryCache::Writer::buffer(Chunk && partial_query_result)
{
if (skip_insert)
return;
@@ -208,7 +208,7 @@ void QueryResultCache::Writer::buffer(Chunk && partial_query_result)
}
}
-void QueryResultCache::Writer::finalizeWrite()
+void QueryCache::Writer::finalizeWrite()
{
if (skip_insert)
return;
@@ -239,7 +239,7 @@ void QueryResultCache::Writer::finalizeWrite()
}
else
++it;
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Removed {} stale entries", removed_items);
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "Removed {} stale entries", removed_items);
}
/// Insert or replace if enough space
@@ -250,23 +250,23 @@ void QueryResultCache::Writer::finalizeWrite()
cache_size_in_bytes -= it->second.sizeInBytes(); // key replacement
cache[key] = std::move(query_result);
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Stored result of query {}", key.queryStringFromAst());
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stored result of query {}", key.queryStringFromAst());
}
}
-QueryResultCache::Reader::Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &)
+QueryCache::Reader::Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &)
{
auto it = cache_.find(key);
if (it == cache_.end())
{
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "No entry found for query {}", key.queryStringFromAst());
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "No entry found for query {}", key.queryStringFromAst());
return;
}
if (it->first.username.has_value() && it->first.username != key.username)
{
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Inaccessible entry found for query {}", key.queryStringFromAst());
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst());
return;
}
@@ -274,33 +274,33 @@ QueryResultCache::Reader::Reader(const Cache & cache_, const Key & key, size_t &
{
cache_size_in_bytes_ -= it->second.sizeInBytes();
const_cast(cache_).erase(it);
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Stale entry found and removed for query {}", key.queryStringFromAst());
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stale entry found and removed for query {}", key.queryStringFromAst());
return;
}
pipe = Pipe(std::make_shared(it->first.header, it->second.chunks));
- LOG_TRACE(&Poco::Logger::get("QueryResultCache"), "Entry found for query {}", key.queryStringFromAst());
+ LOG_TRACE(&Poco::Logger::get("QueryCache"), "Entry found for query {}", key.queryStringFromAst());
}
-bool QueryResultCache::Reader::hasCacheEntryForKey() const
+bool QueryCache::Reader::hasCacheEntryForKey() const
{
bool res = !pipe.empty();
if (res)
- ProfileEvents::increment(ProfileEvents::QueryResultCacheHits);
+ ProfileEvents::increment(ProfileEvents::QueryCacheHits);
else
- ProfileEvents::increment(ProfileEvents::QueryResultCacheMisses);
+ ProfileEvents::increment(ProfileEvents::QueryCacheMisses);
return res;
}
-Pipe && QueryResultCache::Reader::getPipe()
+Pipe && QueryCache::Reader::getPipe()
{
chassert(!pipe.empty()); // cf. hasCacheEntryForKey()
return std::move(pipe);
}
-QueryResultCache::QueryResultCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_)
+QueryCache::QueryCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_)
: max_cache_size_in_bytes(max_cache_size_in_bytes_)
, max_cache_entries(max_cache_entries_)
, max_cache_entry_size_in_bytes(max_cache_entry_size_in_bytes_)
@@ -308,19 +308,19 @@ QueryResultCache::QueryResultCache(size_t max_cache_size_in_bytes_, size_t max_c
{
}
-QueryResultCache::Reader QueryResultCache::createReader(const Key & key)
+QueryCache::Reader QueryCache::createReader(const Key & key)
{
std::lock_guard lock(mutex);
return Reader(cache, key, cache_size_in_bytes, lock);
}
-QueryResultCache::Writer QueryResultCache::createWriter(const Key & key, std::chrono::milliseconds min_query_runtime)
+QueryCache::Writer QueryCache::createWriter(const Key & key, std::chrono::milliseconds min_query_runtime)
{
std::lock_guard lock(mutex);
return Writer(mutex, cache, key, cache_size_in_bytes, max_cache_size_in_bytes, max_cache_entries, max_cache_entry_size_in_bytes, max_cache_entry_size_in_rows, min_query_runtime);
}
-void QueryResultCache::reset()
+void QueryCache::reset()
{
std::lock_guard lock(mutex);
cache.clear();
@@ -328,7 +328,7 @@ void QueryResultCache::reset()
cache_size_in_bytes = 0;
}
-size_t QueryResultCache::recordQueryRun(const Key & key)
+size_t QueryCache::recordQueryRun(const Key & key)
{
static constexpr size_t TIMES_EXECUTED_MAX_SIZE = 10'000;
diff --git a/src/Interpreters/Cache/QueryResultCache.h b/src/Interpreters/Cache/QueryCache.h
similarity index 89%
rename from src/Interpreters/Cache/QueryResultCache.h
rename to src/Interpreters/Cache/QueryCache.h
index 65cab854a45..45f48c7a558 100644
--- a/src/Interpreters/Cache/QueryResultCache.h
+++ b/src/Interpreters/Cache/QueryCache.h
@@ -18,7 +18,7 @@ bool astContainsNonDeterministicFunctions(ASTPtr ast, ContextPtr context);
/// returned. In order to still obtain sufficiently up-to-date query results, a expiry time (TTL) must be specified for each cache entry
/// after which it becomes stale and is ignored. Stale entries are removed opportunistically from the cache, they are only evicted when a
/// new entry is inserted and the cache has insufficient capacity.
-class QueryResultCache
+class QueryCache
{
public:
/// Represents a query result in the cache.
@@ -82,9 +82,9 @@ public:
/// Buffers multiple partial query result chunks (buffer()) and eventually stores them as cache entry (finalizeWrite()).
///
/// Implementation note: Queries may throw exceptions during runtime, e.g. out-of-memory errors. In this case, no query result must be
- /// written into the query result cache. Unfortunately, neither the Writer nor the special transform added on top of the query pipeline
- /// which holds the Writer know whether they are destroyed because the query ended successfully or because of an exception (otherwise,
- /// we could simply implement a check in their destructors). To handle exceptions correctly nevertheless, we do the actual insert in
+ /// written into the query cache. Unfortunately, neither the Writer nor the special transform added on top of the query pipeline which
+ /// holds the Writer know whether they are destroyed because the query ended successfully or because of an exception (otherwise, we
+ /// could simply implement a check in their destructors). To handle exceptions correctly nevertheless, we do the actual insert in
/// finalizeWrite() as opposed to the Writer destructor. This function is then called only for successful queries in finish_callback()
/// which runs before the transform and the Writer are destroyed, whereas for unsuccessful queries we do nothing (the Writer is
/// destroyed w/o inserting anything).
@@ -117,7 +117,7 @@ public:
size_t max_entry_size_in_bytes_, size_t max_entry_size_in_rows_,
std::chrono::milliseconds min_query_runtime_);
- friend class QueryResultCache; /// for createWriter()
+ friend class QueryCache; /// for createWriter()
};
/// Looks up a query result for a key in the cache and (if found) constructs a pipe with the query result chunks as source.
@@ -129,10 +129,10 @@ public:
private:
Reader(const Cache & cache_, const Key & key, size_t & cache_size_in_bytes_, const std::lock_guard &);
Pipe pipe;
- friend class QueryResultCache; /// for createReader()
+ friend class QueryCache; /// for createReader()
};
- QueryResultCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_);
+ QueryCache(size_t max_cache_size_in_bytes_, size_t max_cache_entries_, size_t max_cache_entry_size_in_bytes_, size_t max_cache_entry_size_in_rows_);
Reader createReader(const Key & key);
Writer createWriter(const Key & key, std::chrono::milliseconds min_query_runtime);
@@ -160,9 +160,9 @@ private:
const size_t max_cache_entry_size_in_bytes;
const size_t max_cache_entry_size_in_rows;
- friend class StorageSystemQueryResultCache;
+ friend class StorageSystemQueryCache;
};
-using QueryResultCachePtr = std::shared_ptr;
+using QueryCachePtr = std::shared_ptr;
}
diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp
index e228dcc1f4a..5c781c531ed 100644
--- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp
+++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp
@@ -7,7 +7,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp
index 2a8734596c5..d9a7aa2e677 100644
--- a/src/Interpreters/Context.cpp
+++ b/src/Interpreters/Context.cpp
@@ -40,7 +40,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -236,7 +236,7 @@ struct ContextSharedPart : boost::noncopyable
mutable std::unique_ptr load_marks_threadpool; /// Threadpool for loading marks cache.
mutable UncompressedCachePtr index_uncompressed_cache; /// The cache of decompressed blocks for MergeTree indices.
mutable MarkCachePtr index_mark_cache; /// Cache of marks in compressed files of MergeTree indices.
- mutable QueryResultCachePtr query_result_cache; /// Cache of query results.
+ mutable QueryCachePtr query_cache; /// Cache of query results.
mutable MMappedFileCachePtr mmap_cache; /// Cache of mmapped files to avoid frequent open/map/unmap/close and to reuse from several threads.
ProcessList process_list; /// Executing queries at the moment.
GlobalOvercommitTracker global_overcommit_tracker;
@@ -2041,27 +2041,27 @@ void Context::dropIndexMarkCache() const
shared->index_mark_cache->reset();
}
-void Context::setQueryResultCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records)
+void Context::setQueryCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records)
{
auto lock = getLock();
- if (shared->query_result_cache)
- throw Exception(ErrorCodes::LOGICAL_ERROR, "Query result cache has been already created.");
+ if (shared->query_cache)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Query cache has been already created.");
- shared->query_result_cache = std::make_shared(max_size_in_bytes, max_entries, max_entry_size_in_bytes, max_entry_size_in_records);
+ shared->query_cache = std::make_shared(max_size_in_bytes, max_entries, max_entry_size_in_bytes, max_entry_size_in_records);
}
-QueryResultCachePtr Context::getQueryResultCache() const
+QueryCachePtr Context::getQueryCache() const
{
auto lock = getLock();
- return shared->query_result_cache;
+ return shared->query_cache;
}
-void Context::dropQueryResultCache() const
+void Context::dropQueryCache() const
{
auto lock = getLock();
- if (shared->query_result_cache)
- shared->query_result_cache->reset();
+ if (shared->query_cache)
+ shared->query_cache->reset();
}
void Context::setMMappedFileCache(size_t cache_size_in_num_entries)
@@ -2104,8 +2104,8 @@ void Context::dropCaches() const
if (shared->index_mark_cache)
shared->index_mark_cache->reset();
- if (shared->query_result_cache)
- shared->query_result_cache->reset();
+ if (shared->query_cache)
+ shared->query_cache->reset();
if (shared->mmap_cache)
shared->mmap_cache->reset();
diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h
index 10983b15d7f..00dc4204496 100644
--- a/src/Interpreters/Context.h
+++ b/src/Interpreters/Context.h
@@ -81,8 +81,8 @@ class Macros;
struct Progress;
struct FileProgress;
class Clusters;
+class QueryCache;
class QueryLog;
-class QueryResultCache;
class QueryThreadLog;
class QueryViewsLog;
class PartLog;
@@ -861,9 +861,9 @@ public:
void dropMMappedFileCache() const;
/// Create a cache of query results for statements which run repeatedly.
- void setQueryResultCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records);
- std::shared_ptr getQueryResultCache() const;
- void dropQueryResultCache() const;
+ void setQueryCache(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes, size_t max_entry_size_in_records);
+ std::shared_ptr getQueryCache() const;
+ void dropQueryCache() const;
/** Clear the caches of the uncompressed blocks and marks.
* This is usually done when renaming tables, changing the type of columns, deleting a table.
diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp
index ee5aad3d18e..a87308f0cef 100644
--- a/src/Interpreters/InterpreterAlterQuery.cpp
+++ b/src/Interpreters/InterpreterAlterQuery.cpp
@@ -147,7 +147,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
{
table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef());
MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate();
- table->mutate(mutation_commands, getContext(), false);
+ table->mutate(mutation_commands, getContext());
}
if (!partition_commands.empty())
diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp
index abccc313e14..f8974a19f45 100644
--- a/src/Interpreters/InterpreterDeleteQuery.cpp
+++ b/src/Interpreters/InterpreterDeleteQuery.cpp
@@ -5,15 +5,16 @@
#include
#include
#include
+#include
#include
+#include
+#include
+#include
#include
-#include
-#include
#include
#include
#include
#include
-#include
namespace DB
@@ -72,7 +73,7 @@ BlockIO InterpreterDeleteQuery::execute()
table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef());
MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate();
- table->mutate(mutation_commands, getContext(), false);
+ table->mutate(mutation_commands, getContext());
return {};
}
else if (table->supportsLightweightDelete())
@@ -82,35 +83,25 @@ BlockIO InterpreterDeleteQuery::execute()
"Lightweight delete mutate is experimental. "
"Set `allow_experimental_lightweight_delete` setting to enable it");
- /// Convert to MutationCommand
- MutationCommands mutation_commands;
- MutationCommand mut_command;
+ /// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query
+ String alter_query =
+ "ALTER TABLE " + table->getStorageID().getFullTableName()
+ + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster))
+ + " UPDATE `_row_exists` = 0 WHERE " + serializeAST(*delete_query.predicate);
- /// Build "UPDATE _row_exists = 0 WHERE predicate" query
- mut_command.type = MutationCommand::Type::UPDATE;
- mut_command.predicate = delete_query.predicate;
+ ParserAlterQuery parser;
+ ASTPtr alter_ast = parseQuery(
+ parser,
+ alter_query.data(),
+ alter_query.data() + alter_query.size(),
+ "ALTER query",
+ 0,
+ DBMS_DEFAULT_MAX_PARSER_DEPTH);
- auto command = std::make_shared();
- command->type = ASTAlterCommand::UPDATE;
- command->predicate = delete_query.predicate;
- command->update_assignments = std::make_shared();
- auto set_row_does_not_exist = std::make_shared();
- set_row_does_not_exist->column_name = LightweightDeleteDescription::FILTER_COLUMN.name;
- auto zero_value = std::make_shared(DB::Field(UInt8(0)));
- set_row_does_not_exist->children.push_back(zero_value);
- command->update_assignments->children.push_back(set_row_does_not_exist);
- command->children.push_back(command->predicate);
- command->children.push_back(command->update_assignments);
- mut_command.column_to_update_expression[set_row_does_not_exist->column_name] = zero_value;
- mut_command.ast = command->ptr();
-
- mutation_commands.emplace_back(mut_command);
-
- table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef());
- MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), false).validate();
- table->mutate(mutation_commands, getContext(), true);
-
- return {};
+ auto context = Context::createCopy(getContext());
+ context->setSetting("mutations_sync", 2); /// Lightweight delete is always synchronous
+ InterpreterAlterQuery alter_interpreter(alter_ast, context);
+ return alter_interpreter.execute();
}
else
{
diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp
index 3b90ac8d284..74fe351daaf 100644
--- a/src/Interpreters/InterpreterInsertQuery.cpp
+++ b/src/Interpreters/InterpreterInsertQuery.cpp
@@ -6,7 +6,6 @@
#include
#include
#include
-#include
#include
#include
#include
diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp
index 19b31a858f4..abd0ecd6ea1 100644
--- a/src/Interpreters/InterpreterSystemQuery.cpp
+++ b/src/Interpreters/InterpreterSystemQuery.cpp
@@ -327,9 +327,9 @@ BlockIO InterpreterSystemQuery::execute()
getContext()->checkAccess(AccessType::SYSTEM_DROP_MMAP_CACHE);
system_context->dropMMappedFileCache();
break;
- case Type::DROP_QUERY_RESULT_CACHE:
- getContext()->checkAccess(AccessType::SYSTEM_DROP_QUERY_RESULT_CACHE);
- getContext()->dropQueryResultCache();
+ case Type::DROP_QUERY_CACHE:
+ getContext()->checkAccess(AccessType::SYSTEM_DROP_QUERY_CACHE);
+ getContext()->dropQueryCache();
break;
#if USE_EMBEDDED_COMPILER
case Type::DROP_COMPILED_EXPRESSION_CACHE:
@@ -969,7 +969,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
case Type::DROP_DNS_CACHE:
case Type::DROP_MARK_CACHE:
case Type::DROP_MMAP_CACHE:
- case Type::DROP_QUERY_RESULT_CACHE:
+ case Type::DROP_QUERY_CACHE:
#if USE_EMBEDDED_COMPILER
case Type::DROP_COMPILED_EXPRESSION_CACHE:
#endif
diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp
index 646f1e89fc3..f46adf91ee0 100644
--- a/src/Interpreters/executeQuery.cpp
+++ b/src/Interpreters/executeQuery.cpp
@@ -14,7 +14,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -716,48 +716,48 @@ static std::tuple executeQueryImpl(
/// If
/// - it is a SELECT query,
- /// - passive (read) use of the query result cache is enabled, and
- /// - the query result cache knows the query result
- /// then replace the pipeline by a new pipeline with a single source that is populated from the query result cache
- auto query_result_cache = context->getQueryResultCache();
- bool read_result_from_query_result_cache = false; /// a query must not read from *and* write to the query result cache at the same time
- if (query_result_cache != nullptr
- && (settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_reads_from_query_result_cache)
+ /// - passive (read) use of the query cache is enabled, and
+ /// - the query cache knows the query result
+ /// then replace the pipeline by a new pipeline with a single source that is populated from the query cache
+ auto query_cache = context->getQueryCache();
+ bool read_result_from_query_cache = false; /// a query must not read from *and* write to the query cache at the same time
+ if (query_cache != nullptr
+ && (settings.allow_experimental_query_cache && settings.use_query_cache && settings.enable_reads_from_query_cache)
&& res.pipeline.pulling())
{
- QueryResultCache::Key key(
+ QueryCache::Key key(
ast, res.pipeline.getHeader(),
std::make_optional(context->getUserName()),
- std::chrono::system_clock::now() + std::chrono::seconds(settings.query_result_cache_ttl));
- QueryResultCache::Reader reader = query_result_cache->createReader(key);
+ std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl));
+ QueryCache::Reader reader = query_cache->createReader(key);
if (reader.hasCacheEntryForKey())
{
res.pipeline = QueryPipeline(reader.getPipe());
- read_result_from_query_result_cache = true;
+ read_result_from_query_cache = true;
}
}
/// If
/// - it is a SELECT query, and
- /// - active (write) use of the query result cache is enabled
- /// then add a processor on top of the pipeline which stores the result in the query result cache.
- if (!read_result_from_query_result_cache
- && query_result_cache != nullptr
- && settings.allow_experimental_query_result_cache && settings.use_query_result_cache && settings.enable_writes_to_query_result_cache
+ /// - active (write) use of the query cache is enabled
+ /// then add a processor on top of the pipeline which stores the result in the query cache.
+ if (!read_result_from_query_cache
+ && query_cache != nullptr
+ && settings.allow_experimental_query_cache && settings.use_query_cache && settings.enable_writes_to_query_cache
&& res.pipeline.pulling()
- && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions))
+ && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_cache_store_results_of_queries_with_nondeterministic_functions))
{
- QueryResultCache::Key key(
+ QueryCache::Key key(
ast, res.pipeline.getHeader(),
- settings.query_result_cache_share_between_users ? std::nullopt : std::make_optional(context->getUserName()),
- std::chrono::system_clock::now() + std::chrono::seconds(settings.query_result_cache_ttl));
+ settings.query_cache_share_between_users ? std::nullopt : std::make_optional(context->getUserName()),
+ std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl));
- const size_t num_query_runs = query_result_cache->recordQueryRun(key);
- if (num_query_runs > settings.query_result_cache_min_query_runs)
+ const size_t num_query_runs = query_cache->recordQueryRun(key);
+ if (num_query_runs > settings.query_cache_min_query_runs)
{
- auto stream_in_query_result_cache_transform = std::make_shared(res.pipeline.getHeader(), query_result_cache, key,
- std::chrono::milliseconds(context->getSettings().query_result_cache_min_query_duration.totalMilliseconds()));
- res.pipeline.streamIntoQueryResultCache(stream_in_query_result_cache_transform);
+ auto stream_in_query_cache_transform = std::make_shared(res.pipeline.getHeader(), query_cache, key,
+ std::chrono::milliseconds(context->getSettings().query_cache_min_query_duration.totalMilliseconds()));
+ res.pipeline.streamIntoQueryCache(stream_in_query_cache_transform);
}
}
@@ -908,10 +908,10 @@ static std::tuple executeQueryImpl(
auto finish_callback = [elem,
context,
ast,
- allow_experimental_query_result_cache = settings.allow_experimental_query_result_cache,
- use_query_result_cache = settings.use_query_result_cache,
- enable_writes_to_query_result_cache = settings.enable_writes_to_query_result_cache,
- query_result_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_result_cache_store_results_of_queries_with_nondeterministic_functions,
+ allow_experimental_query_cache = settings.allow_experimental_query_cache,
+ use_query_cache = settings.use_query_cache,
+ enable_writes_to_query_cache = settings.enable_writes_to_query_cache,
+ query_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_cache_store_results_of_queries_with_nondeterministic_functions,
log_queries,
log_queries_min_type = settings.log_queries_min_type,
log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds(),
@@ -921,15 +921,15 @@ static std::tuple executeQueryImpl(
pulling_pipeline = pipeline.pulling(),
query_span](QueryPipeline & query_pipeline) mutable
{
- /// If active (write) use of the query result cache is enabled and the query is eligible for result caching, then store the
- /// query result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache.
- auto query_result_cache = context->getQueryResultCache();
- if (query_result_cache != nullptr
+ /// If active (write) use of the query cache is enabled and the query is eligible for result caching, then store the query
+ /// result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache.
+ auto query_cache = context->getQueryCache();
+ if (query_cache != nullptr
&& pulling_pipeline
- && allow_experimental_query_result_cache && use_query_result_cache && enable_writes_to_query_result_cache
- && (!astContainsNonDeterministicFunctions(ast, context) || query_result_cache_store_results_of_queries_with_nondeterministic_functions))
+ && allow_experimental_query_cache && use_query_cache && enable_writes_to_query_cache
+ && (!astContainsNonDeterministicFunctions(ast, context) || query_cache_store_results_of_queries_with_nondeterministic_functions))
{
- query_pipeline.finalizeWriteInQueryResultCache();
+ query_pipeline.finalizeWriteInQueryCache();
}
QueryStatusPtr process_list_elem = context->getProcessListElement();
diff --git a/src/Parsers/ASTDeleteQuery.cpp b/src/Parsers/ASTDeleteQuery.cpp
index 08b40f65121..09dc4b936ae 100644
--- a/src/Parsers/ASTDeleteQuery.cpp
+++ b/src/Parsers/ASTDeleteQuery.cpp
@@ -41,6 +41,8 @@ void ASTDeleteQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
}
settings.ostr << backQuoteIfNeed(getTable());
+ formatOnCluster(settings);
+
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WHERE " << (settings.hilite ? hilite_none : "");
predicate->formatImpl(settings, state, frame);
}
diff --git a/src/Parsers/ASTDeleteQuery.h b/src/Parsers/ASTDeleteQuery.h
index bcb97639b64..1dab684ffc9 100644
--- a/src/Parsers/ASTDeleteQuery.h
+++ b/src/Parsers/ASTDeleteQuery.h
@@ -2,15 +2,20 @@
#include
#include
+#include
namespace DB
{
/// DELETE FROM [db.]name WHERE ...
-class ASTDeleteQuery : public ASTQueryWithTableAndOutput
+class ASTDeleteQuery : public ASTQueryWithTableAndOutput, public ASTQueryWithOnCluster
{
public:
String getID(char delim) const final;
ASTPtr clone() const final;
+ ASTPtr getRewrittenASTWithoutOnCluster(const WithoutOnClusterASTRewriteParams & params) const override
+ {
+ return removeOnCluster(clone(), params.default_database);
+ }
ASTPtr predicate;
diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp
index fccef01a2bc..7a19cba0f75 100644
--- a/src/Parsers/ASTFunction.cpp
+++ b/src/Parsers/ASTFunction.cpp
@@ -37,81 +37,118 @@ namespace
{
/// Finds arguments of a specified function which should not be displayed for most users for security reasons.
/// That involves passwords and secret keys.
- /// The member function getRange() returns a pair of numbers [first, last) specifying arguments
- /// which must be hidden. If the function returns {-1, -1} that means no arguments must be hidden.
class FunctionSecretArgumentsFinder
{
public:
explicit FunctionSecretArgumentsFinder(const ASTFunction & function_) : function(function_)
{
- if (function.arguments)
- {
- if (const auto * expr_list = function.arguments->as())
- arguments = &expr_list->children;
- }
- }
+ if (!function.arguments)
+ return;
- std::pair getRange() const
- {
- if (!arguments)
- return npos;
+ const auto * expr_list = function.arguments->as();
+ if (!expr_list)
+ return;
+ arguments = &expr_list->children;
switch (function.kind)
{
- case ASTFunction::Kind::ORDINARY_FUNCTION: return findOrdinaryFunctionSecretArguments();
- case ASTFunction::Kind::WINDOW_FUNCTION: return npos;
- case ASTFunction::Kind::LAMBDA_FUNCTION: return npos;
- case ASTFunction::Kind::TABLE_ENGINE: return findTableEngineSecretArguments();
- case ASTFunction::Kind::DATABASE_ENGINE: return findDatabaseEngineSecretArguments();
- case ASTFunction::Kind::BACKUP_NAME: return findBackupNameSecretArguments();
+ case ASTFunction::Kind::ORDINARY_FUNCTION: findOrdinaryFunctionSecretArguments(); break;
+ case ASTFunction::Kind::WINDOW_FUNCTION: break;
+ case ASTFunction::Kind::LAMBDA_FUNCTION: break;
+ case ASTFunction::Kind::TABLE_ENGINE: findTableEngineSecretArguments(); break;
+ case ASTFunction::Kind::DATABASE_ENGINE: findDatabaseEngineSecretArguments(); break;
+ case ASTFunction::Kind::BACKUP_NAME: findBackupNameSecretArguments(); break;
}
}
- static const constexpr std::pair npos{static_cast(-1), static_cast(-1)};
+ struct Result
+ {
+ /// Result constructed by default means no arguments will be hidden.
+ size_t start = static_cast(-1);
+ size_t count = 0; /// Mostly it's either 0 or 1. There are only a few cases where `count` can be greater than 1 (e.g. see `encrypt`).
+ /// In all known cases secret arguments are consecutive
+ bool are_named = false; /// Arguments like `password = 'password'` are considered as named arguments.
+ };
+
+ Result getResult() const { return result; }
private:
- std::pair findOrdinaryFunctionSecretArguments() const
+ const ASTFunction & function;
+ const ASTs * arguments = nullptr;
+ Result result;
+
+ void markSecretArgument(size_t index, bool argument_is_named = false)
+ {
+ if (!result.count)
+ {
+ result.start = index;
+ result.are_named = argument_is_named;
+ }
+ chassert(index >= result.start); /// We always check arguments consecutively
+ result.count = index + 1 - result.start;
+ if (!argument_is_named)
+ result.are_named = false;
+ }
+
+ void findOrdinaryFunctionSecretArguments()
{
if ((function.name == "mysql") || (function.name == "postgresql") || (function.name == "mongodb"))
{
/// mysql('host:port', 'database', 'table', 'user', 'password', ...)
/// postgresql('host:port', 'database', 'table', 'user', 'password', ...)
/// mongodb('host:port', 'database', 'collection', 'user', 'password', ...)
- return {4, 5};
+ findMySQLFunctionSecretArguments();
}
else if ((function.name == "s3") || (function.name == "cosn") || (function.name == "oss"))
{
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
- return findS3FunctionSecretArguments(/* is_cluster_function= */ false);
+ findS3FunctionSecretArguments(/* is_cluster_function= */ false);
}
else if (function.name == "s3Cluster")
{
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', ...)
- return findS3FunctionSecretArguments(/* is_cluster_function= */ true);
+ findS3FunctionSecretArguments(/* is_cluster_function= */ true);
}
else if ((function.name == "remote") || (function.name == "remoteSecure"))
{
/// remote('addresses_expr', 'db', 'table', 'user', 'password', ...)
- return findRemoteFunctionSecretArguments();
+ findRemoteFunctionSecretArguments();
}
else if ((function.name == "encrypt") || (function.name == "decrypt") ||
(function.name == "aes_encrypt_mysql") || (function.name == "aes_decrypt_mysql") ||
(function.name == "tryDecrypt"))
{
/// encrypt('mode', 'plaintext', 'key' [, iv, aad])
- return findEncryptionFunctionSecretArguments();
- }
- else
- {
- return npos;
+ findEncryptionFunctionSecretArguments();
}
}
- std::pair findS3FunctionSecretArguments(bool is_cluster_function) const
+ void findMySQLFunctionSecretArguments()
+ {
+ if (isNamedCollectionName(0))
+ {
+ /// mysql(named_collection, ..., password = 'password', ...)
+ findSecretNamedArgument("password", 1);
+ }
+ else
+ {
+ /// mysql('host:port', 'database', 'table', 'user', 'password', ...)
+ markSecretArgument(4);
+ }
+ }
+
+ void findS3FunctionSecretArguments(bool is_cluster_function)
{
/// s3Cluster('cluster_name', 'url', ...) has 'url' as its second argument.
size_t url_arg_idx = is_cluster_function ? 1 : 0;
+ if (!is_cluster_function && isNamedCollectionName(0))
+ {
+ /// s3(named_collection, ..., secret_access_key = 'secret_access_key', ...)
+ findSecretNamedArgument("secret_access_key", 1);
+ return;
+ }
+
/// We're going to replace 'aws_secret_access_key' with '[HIDDEN'] for the following signatures:
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...)
/// s3Cluster('cluster_name', 'url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
@@ -119,12 +156,12 @@ namespace
/// But we should check the number of arguments first because we don't need to do any replacements in case of
/// s3('url' [, 'format']) or s3Cluster('cluster_name', 'url' [, 'format'])
if (arguments->size() < url_arg_idx + 3)
- return npos;
+ return;
if (arguments->size() >= url_arg_idx + 5)
{
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'structure', ...)
- return {url_arg_idx + 2, url_arg_idx + 3};
+ markSecretArgument(url_arg_idx + 2);
}
else
{
@@ -136,15 +173,16 @@ namespace
{
/// We couldn't evaluate the argument after 'url' so we don't know whether it is a format or `aws_access_key_id`.
/// So it's safer to wipe the next argument just in case.
- return {url_arg_idx + 2, url_arg_idx + 3}; /// Wipe either `aws_secret_access_key` or `structure`.
+ markSecretArgument(url_arg_idx + 2); /// Wipe either `aws_secret_access_key` or `structure`.
+ return;
}
if (KnownFormatNames::instance().exists(format))
- return npos; /// The argument after 'url' is a format: s3('url', 'format', ...)
+ return; /// The argument after 'url' is a format: s3('url', 'format', ...)
/// The argument after 'url' is not a format so we do our replacement:
/// s3('url', 'aws_access_key_id', 'aws_secret_access_key', ...) -> s3('url', 'aws_access_key_id', '[HIDDEN]', ...)
- return {url_arg_idx + 2, url_arg_idx + 3};
+ markSecretArgument(url_arg_idx + 2);
}
}
@@ -153,8 +191,12 @@ namespace
if (arg_idx >= arguments->size())
return false;
- ASTPtr argument = (*arguments)[arg_idx];
- if (const auto * literal = argument->as())
+ return tryGetStringFromArgument(*(*arguments)[arg_idx], res, allow_identifier);
+ }
+
+ static bool tryGetStringFromArgument(const IAST & argument, String * res, bool allow_identifier = true)
+ {
+ if (const auto * literal = argument.as())
{
if (literal->value.getType() != Field::Types::String)
return false;
@@ -165,7 +207,7 @@ namespace
if (allow_identifier)
{
- if (const auto * id = argument->as())
+ if (const auto * id = argument.as())
{
if (res)
*res = id->name();
@@ -176,8 +218,15 @@ namespace
return false;
}
- std::pair findRemoteFunctionSecretArguments() const
+ void findRemoteFunctionSecretArguments()
{
+ if (isNamedCollectionName(0))
+ {
+ /// remote(named_collection, ..., password = 'password', ...)
+ findSecretNamedArgument("password", 1);
+ return;
+ }
+
/// We're going to replace 'password' with '[HIDDEN'] for the following signatures:
/// remote('addresses_expr', db.table, 'user' [, 'password'] [, sharding_key])
/// remote('addresses_expr', 'db', 'table', 'user' [, 'password'] [, sharding_key])
@@ -186,7 +235,7 @@ namespace
/// But we should check the number of arguments first because we don't need to do any replacements in case of
/// remote('addresses_expr', db.table)
if (arguments->size() < 3)
- return npos;
+ return;
size_t arg_num = 1;
@@ -207,20 +256,17 @@ namespace
/// before the argument 'password'. So it's safer to wipe two arguments just in case.
/// The last argument can be also a `sharding_key`, so we need to check that argument is a literal string
/// before wiping it (because the `password` argument is always a literal string).
- auto res = npos;
if (tryGetStringFromArgument(arg_num + 2, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `user`.
- res = {arg_num + 2, arg_num + 3};
+ markSecretArgument(arg_num + 2);
}
if (tryGetStringFromArgument(arg_num + 3, nullptr, /* allow_identifier= */ false))
{
/// Wipe either `password` or `sharding_key`.
- if (res == npos)
- res.first = arg_num + 3;
- res.second = arg_num + 4;
+ markSecretArgument(arg_num + 3);
}
- return res;
+ return;
}
/// Skip the current argument (which is either a database name or a qualified table name).
@@ -241,9 +287,7 @@ namespace
/// before wiping it (because the `password` argument is always a literal string).
bool can_be_password = tryGetStringFromArgument(arg_num, nullptr, /* allow_identifier= */ false);
if (can_be_password)
- return {arg_num, arg_num + 1};
-
- return npos;
+ markSecretArgument(arg_num);
}
/// Tries to get either a database name or a qualified table name from an argument.
@@ -278,20 +322,24 @@ namespace
return true;
}
- std::pair findEncryptionFunctionSecretArguments() const
+ void findEncryptionFunctionSecretArguments()
{
+ if (arguments->empty())
+ return;
+
/// We replace all arguments after 'mode' with '[HIDDEN]':
/// encrypt('mode', 'plaintext', 'key' [, iv, aad]) -> encrypt('mode', '[HIDDEN]')
- return {1, arguments->size()};
+ result.start = 1;
+ result.count = arguments->size() - 1;
}
- std::pair findTableEngineSecretArguments() const
+ void findTableEngineSecretArguments()
{
const String & engine_name = function.name;
if (engine_name == "ExternalDistributed")
{
/// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
- return {5, 6};
+ findExternalDistributedTableEngineSecretArguments();
}
else if ((engine_name == "MySQL") || (engine_name == "PostgreSQL") ||
(engine_name == "MaterializedPostgreSQL") || (engine_name == "MongoDB"))
@@ -300,21 +348,38 @@ namespace
/// PostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MaterializedPostgreSQL('host:port', 'database', 'table', 'user', 'password', ...)
/// MongoDB('host:port', 'database', 'collection', 'user', 'password', ...)
- return {4, 5};
+ findMySQLFunctionSecretArguments();
}
else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS"))
{
/// S3('url', ['aws_access_key_id', 'aws_secret_access_key',] ...)
- return findS3TableEngineSecretArguments();
- }
- else
- {
- return npos;
+ findS3TableEngineSecretArguments();
}
}
- std::pair findS3TableEngineSecretArguments() const
+ void findExternalDistributedTableEngineSecretArguments()
{
+ if (isNamedCollectionName(1))
+ {
+ /// ExternalDistributed('engine', named_collection, ..., password = 'password', ...)
+ findSecretNamedArgument("password", 2);
+ }
+ else
+ {
+ /// ExternalDistributed('engine', 'host:port', 'database', 'table', 'user', 'password')
+ markSecretArgument(5);
+ }
+ }
+
+ void findS3TableEngineSecretArguments()
+ {
+ if (isNamedCollectionName(0))
+ {
+ /// S3(named_collection, ..., secret_access_key = 'secret_access_key')
+ findSecretNamedArgument("secret_access_key", 1);
+ return;
+ }
+
/// We replace 'aws_secret_access_key' with '[HIDDEN'] for the following signatures:
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format')
/// S3('url', 'aws_access_key_id', 'aws_secret_access_key', 'format', 'compression')
@@ -322,12 +387,12 @@ namespace
/// But we should check the number of arguments first because we don't need to do that replacements in case of
/// S3('url' [, 'format' [, 'compression']])
if (arguments->size() < 4)
- return npos;
+ return;
- return {2, 3};
+ markSecretArgument(2);
}
- std::pair findDatabaseEngineSecretArguments() const
+ void findDatabaseEngineSecretArguments()
{
const String & engine_name = function.name;
if ((engine_name == "MySQL") || (engine_name == "MaterializeMySQL") ||
@@ -335,31 +400,71 @@ namespace
(engine_name == "MaterializedPostgreSQL"))
{
/// MySQL('host:port', 'database', 'user', 'password')
- /// PostgreSQL('host:port', 'database', 'user', 'password', ...)
- return {3, 4};
- }
- else
- {
- return npos;
+ /// PostgreSQL('host:port', 'database', 'user', 'password')
+ findMySQLDatabaseSecretArguments();
}
}
- std::pair findBackupNameSecretArguments() const
+ void findMySQLDatabaseSecretArguments()
+ {
+ if (isNamedCollectionName(0))
+ {
+ /// MySQL(named_collection, ..., password = 'password', ...)
+ findSecretNamedArgument("password", 1);
+ }
+ else
+ {
+ /// MySQL('host:port', 'database', 'user', 'password')
+ markSecretArgument(3);
+ }
+ }
+
+ void findBackupNameSecretArguments()
{
const String & engine_name = function.name;
if (engine_name == "S3")
{
/// BACKUP ... TO S3(url, [aws_access_key_id, aws_secret_access_key])
- return {2, 3};
- }
- else
- {
- return npos;
+ markSecretArgument(2);
}
}
- const ASTFunction & function;
- const ASTs * arguments = nullptr;
+ /// Whether a specified argument can be the name of a named collection?
+ bool isNamedCollectionName(size_t arg_idx) const
+ {
+ if (arguments->size() <= arg_idx)
+ return false;
+
+ const auto * identifier = (*arguments)[arg_idx]->as();
+ return identifier != nullptr;
+ }
+
+ /// Looks for a secret argument with a specified name. This function looks for arguments in format `key=value` where the key is specified.
+ void findSecretNamedArgument(const std::string_view & key, size_t start = 0)
+ {
+ for (size_t i = start; i < arguments->size(); ++i)
+ {
+ const auto & argument = (*arguments)[i];
+ const auto * equals_func = argument->as();
+ if (!equals_func || (equals_func->name != "equals"))
+ continue;
+
+ const auto * expr_list = equals_func->arguments->as();
+ if (!expr_list)
+ continue;
+
+ const auto & equal_args = expr_list->children;
+ if (equal_args.size() != 2)
+ continue;
+
+ String found_key;
+ if (!tryGetStringFromArgument(*equal_args[0], &found_key))
+ continue;
+
+ if (found_key == key)
+ markSecretArgument(i, /* argument_is_named= */ true);
+ }
+ }
};
}
@@ -966,32 +1071,39 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
&& (name == "match" || name == "extract" || name == "extractAll" || name == "replaceRegexpOne"
|| name == "replaceRegexpAll");
- auto secret_arguments = std::make_pair(static_cast(-1), static_cast(-1));
+ FunctionSecretArgumentsFinder::Result secret_arguments;
if (!settings.show_secrets)
- secret_arguments = FunctionSecretArgumentsFinder(*this).getRange();
+ secret_arguments = FunctionSecretArgumentsFinder{*this}.getResult();
for (size_t i = 0, size = arguments->children.size(); i < size; ++i)
{
if (i != 0)
settings.ostr << ", ";
- if (arguments->children[i]->as())
+
+ const auto & argument = arguments->children[i];
+ if (argument->as())
settings.ostr << "SETTINGS ";
- if (!settings.show_secrets && (secret_arguments.first <= i) && (i < secret_arguments.second))
+ if (!settings.show_secrets && (secret_arguments.start <= i) && (i < secret_arguments.start + secret_arguments.count))
{
+ if (secret_arguments.are_named)
+ {
+ assert_cast(argument.get())->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens);
+ settings.ostr << (settings.hilite ? hilite_operator : "") << " = " << (settings.hilite ? hilite_none : "");
+ }
settings.ostr << "'[HIDDEN]'";
- if (size - 1 < secret_arguments.second)
+ if (size <= secret_arguments.start + secret_arguments.count && !secret_arguments.are_named)
break; /// All other arguments should also be hidden.
continue;
}
if ((i == 1) && special_hilite_regexp
- && highlightStringLiteralWithMetacharacters(arguments->children[i], settings, "|()^$.[]?*+{:-"))
+ && highlightStringLiteralWithMetacharacters(argument, settings, "|()^$.[]?*+{:-"))
{
continue;
}
- arguments->children[i]->formatImpl(settings, state, nested_dont_need_parens);
+ argument->formatImpl(settings, state, nested_dont_need_parens);
}
}
@@ -1005,14 +1117,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
bool ASTFunction::hasSecretParts() const
{
- if (arguments)
- {
- size_t num_arguments = arguments->children.size();
- auto secret_arguments = FunctionSecretArgumentsFinder(*this).getRange();
- if ((secret_arguments.first < num_arguments) && (secret_arguments.first < secret_arguments.second))
- return true;
- }
- return childrenHaveSecretParts();
+ return (FunctionSecretArgumentsFinder{*this}.getResult().count > 0) || childrenHaveSecretParts();
}
String getFunctionName(const IAST * ast)
diff --git a/src/Parsers/ASTQualifiedAsterisk.h b/src/Parsers/ASTQualifiedAsterisk.h
index e67b4cd82dd..079b83ae171 100644
--- a/src/Parsers/ASTQualifiedAsterisk.h
+++ b/src/Parsers/ASTQualifiedAsterisk.h
@@ -17,8 +17,13 @@ public:
ASTPtr clone() const override
{
auto clone = std::make_shared(*this);
+ clone->children.clear();
- if (transformers) { clone->transformers = transformers->clone(); clone->children.push_back(clone->transformers); }
+ if (transformers)
+ {
+ clone->transformers = transformers->clone();
+ clone->children.push_back(clone->transformers);
+ }
clone->qualifier = qualifier->clone();
clone->children.push_back(clone->qualifier);
diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h
index 4290f4e2a94..02ddbc7dcd2 100644
--- a/src/Parsers/ASTSystemQuery.h
+++ b/src/Parsers/ASTSystemQuery.h
@@ -25,7 +25,7 @@ public:
DROP_INDEX_MARK_CACHE,
DROP_INDEX_UNCOMPRESSED_CACHE,
DROP_MMAP_CACHE,
- DROP_QUERY_RESULT_CACHE,
+ DROP_QUERY_CACHE,
#if USE_EMBEDDED_COMPILER
DROP_COMPILED_EXPRESSION_CACHE,
#endif
diff --git a/src/Parsers/ParserDeleteQuery.cpp b/src/Parsers/ParserDeleteQuery.cpp
index 7b8057d227e..7b27651d82d 100644
--- a/src/Parsers/ParserDeleteQuery.cpp
+++ b/src/Parsers/ParserDeleteQuery.cpp
@@ -18,6 +18,7 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
ParserKeyword s_where("WHERE");
ParserExpression parser_exp_elem;
ParserKeyword s_settings("SETTINGS");
+ ParserKeyword s_on{"ON"};
if (s_delete.ignore(pos, expected))
{
@@ -27,6 +28,14 @@ bool ParserDeleteQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table))
return false;
+ if (s_on.ignore(pos, expected))
+ {
+ String cluster_str;
+ if (!ASTQueryWithOnCluster::parse(pos, cluster_str, expected))
+ return false;
+ query->cluster = cluster_str;
+ }
+
if (!s_where.ignore(pos, expected))
return false;
diff --git a/src/Parsers/obfuscateQueries.cpp b/src/Parsers/obfuscateQueries.cpp
index 09267148c79..909c86b0bf0 100644
--- a/src/Parsers/obfuscateQueries.cpp
+++ b/src/Parsers/obfuscateQueries.cpp
@@ -26,44 +26,448 @@ namespace
const std::unordered_set keywords
{
- "CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT",
- "MATERIALIZED", "EPHEMERAL", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH",
- "DROP", "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY",
- "PROJECT", "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW",
- "INTO", "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN",
- "ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT",
- "VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL",
- "INNER", "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP",
- "BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR",
- "ASC", "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER",
- "ROLE", "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT",
- "REPLACE", "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED",
- "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "DICTIONARY", "OFFSET", "TRIM",
- "LTRIM", "RTRIM", "BOTH", "LEADING", "TRAILING"
+ "!=",
+ "",
+ "%",
+ "*",
+ "+",
+ "-",
+ "->",
+ ".",
+ "/",
+ ":",
+ "::",
+ "<",
+ "<=",
+ "<>",
+ "=",
+ "==",
+ ">",
+ ">=",
+ "?",
+ "[",
+ "]+",
+ "]+|[",
+ "^[",
+ "||",
+ "]+$",
+ "ACCESS",
+ "ACTION",
+ "ADD",
+ "ADMIN",
+ "AFTER",
+ "ALGORITHM",
+ "ALIAS",
+ "ALL",
+ "ALLOWED_LATENESS",
+ "ALTER",
+ "AND",
+ "ANTI",
+ "ANY",
+ "APPLY",
+ "ARRAY",
+ "AS",
+ "ASC",
+ "ASCENDING",
+ "ASOF",
+ "ASSUME",
+ "AST",
+ "ASYNC",
+ "ATTACH",
+ "AUTO_INCREMENT",
+ "BACKUP",
+ "BASE_BACKUP",
+ "BEGIN",
+ "BETWEEN",
+ "BIDIRECTIONAL",
+ "BOTH",
+ "BY",
+ "CACHE",
+ "CACHES",
+ "CASCADE",
+ "CASE",
+ "CASEWITHEXPRESSION",
+ "CAST",
+ "CHANGE",
+ "CHANGEABLE_IN_READONLY",
+ "CHANGED",
+ "CHAR",
+ "CHARACTER",
+ "CHECK",
+ "CLEAR",
+ "CLUSTER",
+ "CLUSTER_HOST_IDS",
+ "CLUSTERS",
+ "CN",
+ "CODEC",
+ "COLLATE",
+ "COLLECTION",
+ "COLUMN",
+ "COLUMNS",
+ "COMMENT",
+ "COMMIT",
+ "COMPRESSION",
+ "CONCAT",
+ "CONSTRAINT",
+ "CREATE",
+ "CROSS",
+ "CUBE",
+ "CURRENT",
+ "CURRENT_USER",
+ "DATABASE",
+ "DATABASES",
+ "DATE",
+ "DATE_ADD",
+ "DATEADD",
+ "DATE_DIFF",
+ "DATEDIFF",
+ "DATE_SUB",
+ "DATESUB",
+ "DAY",
+ "DD",
+ "DDL",
+ "DEDUPLICATE",
+ "DEFAULT",
+ "DELAY",
+ "DELETE",
+ "DESC",
+ "DESCENDING",
+ "DESCRIBE",
+ "DETACH",
+ "DETACHED",
+ "DICTIONARIES",
+ "DICTIONARY",
+ "DISK",
+ "DISTINCT",
+ "DIV",
+ "DOUBLE_SHA1_HASH",
+ "DROP",
+ "ELSE",
+ "EMPTY",
+ "ENABLED",
+ "END",
+ "ENFORCED",
+ "ENGINE",
+ "EPHEMERAL",
+ "EQUALS",
+ "ESTIMATE",
+ "EVENT",
+ "EVENTS",
+ "EXCEPT",
+ "EXCHANGE",
+ "EXISTS",
+ "EXPLAIN",
+ "EXPRESSION",
+ "EXTERNAL",
+ "EXTRACT",
+ "FALSE",
+ "FETCH",
+ "FILE",
+ "FILESYSTEM",
+ "FILL",
+ "FILTER",
+ "FINAL",
+ "FIRST",
+ "FOLLOWING",
+ "FOR",
+ "FOREIGN",
+ "FORMAT",
+ "FREEZE",
+ "FROM",
+ "FULL",
+ "FULLTEXT",
+ "FUNCTION",
+ "GLOBAL",
+ "GRANT",
+ "GRANTEES",
+ "GRANTS",
+ "GRANULARITY",
+ "GREATER",
+ "GREATEROREQUALS",
+ "GROUP",
+ "GROUPING",
+ "GROUPS",
+ "HASH",
+ "HAVING",
+ "HDFS",
+ "HH",
+ "HIERARCHICAL",
+ "HOST",
+ "HOUR",
+ "ID",
+ "IDENTIFIED",
+ "IF",
+ "ILIKE",
+ "IN",
+ "INDEX",
+ "INFILE",
+ "INHERIT",
+ "INJECTIVE",
+ "INNER",
+ "INSERT",
+ "INTERPOLATE",
+ "INTERSECT",
+ "INTERVAL",
+ "INTO",
+ "INVISIBLE",
+ "IP",
+ "IS",
+ "IS_OBJECT_ID",
+ "JOIN",
+ "KEY",
+ "KEYED",
+ "KILL",
+ "LAMBDA",
+ "LARGE",
+ "LAST",
+ "LAYOUT",
+ "LEADING",
+ "LEFT",
+ "LESS",
+ "LESSOREQUALS",
+ "LEVEL",
+ "LIFETIME",
+ "LIKE",
+ "LIMIT",
+ "LIMITS",
+ "LINEAR",
+ "LIST",
+ "LITERAL",
+ "LIVE",
+ "LOCAL",
+ "LTRIM",
+ "MATCH",
+ "MATERIALIZE",
+ "MATERIALIZED",
+ "MAX",
+ "MCS",
+ "MEMORY",
+ "MI",
+ "MICROSECOND",
+ "MILLISECOND",
+ "MIN",
+ "MINUS",
+ "MINUTE",
+ "MM",
+ "MOD",
+ "MODIFY",
+ "MONTH",
+ "MOVE",
+ "MS",
+ "MULTIIF",
+ "MUTATION",
+ "NAME",
+ "NAMED",
+ "NANOSECOND",
+ "NEXT",
+ "NO",
+ "NONE",
+ "NOT",
+ "NOTEQUALS",
+ "NOTIN",
+ "NS",
+ "NULL",
+ "NULLS",
+ "OBJECT",
+ "OFFSET",
+ "ON",
+ "ONLY",
+ "OPTIMIZE",
+ "OPTION",
+ "OR",
+ "ORDER",
+ "OUTER",
+ "OUTFILE",
+ "OVER",
+ "OVERRIDE",
+ "PART",
+ "PARTIAL",
+ "PARTITION",
+ "PARTITIONS",
+ "PART_MOVE_TO_SHARD",
+ "PERMANENTLY",
+ "PERMISSIVE",
+ "PIPELINE",
+ "PLAN",
+ "PLUS",
+ "POLICY",
+ "POPULATE",
+ "POSITION",
+ "PRECEDING",
+ "PRECISION",
+ "PREWHERE",
+ "PRIMARY",
+ "PRIVILEGES",
+ "PROCESSLIST",
+ "PROFILE",
+ "PROJECTION",
+ "QQ",
+ "QUARTER",
+ "QUERY",
+ "QUOTA",
+ "RANDOMIZED",
+ "RANGE",
+ "READONLY",
+ "REALM",
+ "RECOMPRESS",
+ "REFERENCES",
+ "REFRESH",
+ "REGEXP",
+ "REGEXPQUOTEMETA",
+ "REMOVE",
+ "RENAME",
+ "REPLACE",
+ "REPLACEREGEXPALL",
+ "REPLACEREGEXPONE",
+ "RESET",
+ "RESTORE",
+ "RESTRICT",
+ "RESTRICTIVE",
+ "RESUME",
+ "REVOKE",
+ "RIGHT",
+ "ROLE",
+ "ROLES",
+ "ROLLBACK",
+ "ROLLUP",
+ "ROW",
+ "ROWS",
+ "RTRIM",
+ "S3",
+ "SALT",
+ "SAMPLE",
+ "SECOND",
+ "SELECT",
+ "SEMI",
+ "SERVER",
+ "SET",
+ "SETS",
+ "SETTING",
+ "SETTINGS",
+ "SHA256_HASH",
+ "SHARD",
+ "SHOW",
+ "SIGNED",
+ "SIMPLE",
+ "SINGLEVALUEORNULL",
+ "SNAPSHOT",
+ "SOURCE",
+ "SPATIAL",
+ "SS",
+ "STDOUT",
+ "STEP",
+ "STORAGE",
+ "STRICT",
+ "STRICTLY_ASCENDING",
+ "SUBPARTITION",
+ "SUBPARTITIONS",
+ "SUBSTRING",
+ "SUSPEND",
+ "SYNC",
+ "SYNTAX",
+ "SYSTEM",
+ "TABLE",
+ "TABLES",
+ "TEMPORARY",
+ "TEST",
+ "THAN",
+ "THEN",
+ "TIES",
+ "TIMESTAMP",
+ "TIMESTAMP_ADD",
+ "TIMESTAMPADD",
+ "TIMESTAMP_DIFF",
+ "TIMESTAMPDIFF",
+ "TIMESTAMP_SUB",
+ "TIMESTAMPSUB",
+ "TO",
+ "TODATE",
+ "TODATETIME",
+ "TOP",
+ "TOTALS",
+ "TRACKING",
+ "TRAILING",
+ "TRANSACTION",
+ "TREE",
+ "TRIGGER",
+ "TRIM",
+ "TRIMBOTH",
+ "TRIMLEFT",
+ "TRIMRIGHT",
+ "TRUE",
+ "TRUNCATE",
+ "TTL",
+ "TUPLE",
+ "TYPE",
+ "UNBOUNDED",
+ "UNFREEZE",
+ "UNION",
+ "UNIQUE",
+ "UNSIGNED",
+ "UNTUPLE",
+ "UPDATE",
+ "URL",
+ "USE",
+ "USER",
+ "USING",
+ "UUID",
+ "VALUES",
+ "VARYING",
+ "VIEW",
+ "VIEWIFPERMITTED",
+ "VISIBLE",
+ "VOLUME",
+ "WATCH",
+ "WATERMARK",
+ "WEEK",
+ "WHEN",
+ "WHERE",
+ "WINDOW",
+ "WITH",
+ "WK",
+ "WRITABLE",
+ "YEAR",
+ "YYYY",
+ "ZKPATH"
};
+/// We want to keep some words inside quotes. For example we want to keep HOUR inside:
+/// Select now() + INTERVAL '1 HOUR'
const std::unordered_set keep_words
{
- "id", "name", "value", "num",
- "Id", "Name", "Value", "Num",
- "ID", "NAME", "VALUE", "NUM",
+ "DAY",
+ "HOUR",
+ "ID",
+ "NAME",
+ "NANOSECOND",
+ "MICROSECOND",
+ "MILLISECOND",
+ "SECOND",
+ "MINUTE",
+ "NUM",
+ "VALUE",
+ "WEEK",
+ "MONTH",
+ "QUARTER",
+ "YEAR"
};
/// The list of nouns collected from here: http://www.desiquintans.com/nounlist, Public domain.
+/// Removed nouns with spaces, words with non-ascii chars and keywords
std::initializer_list nouns
{
"aardvark", "abacus", "abbey", "abbreviation", "abdomen", "ability", "abnormality", "abolishment", "abortion",
-"abrogation", "absence", "abundance", "abuse", "academics", "academy", "accelerant", "accelerator", "accent", "acceptance", "access",
+"abrogation", "absence", "abundance", "abuse", "academics", "academy", "accelerant", "accelerator", "accent", "acceptance",
"accessory", "accident", "accommodation", "accompanist", "accomplishment", "accord", "accordance", "accordion", "account", "accountability",
"accountant", "accounting", "accuracy", "accusation", "acetate", "achievement", "achiever", "acid", "acknowledgment", "acorn", "acoustics",
-"acquaintance", "acquisition", "acre", "acrylic", "act", "action", "activation", "activist", "activity", "actor", "actress", "acupuncture",
-"ad", "adaptation", "adapter", "addiction", "addition", "address", "adjective", "adjustment", "admin", "administration", "administrator",
+"acquaintance", "acquisition", "acre", "acrylic", "act", "activation", "activist", "activity", "actor", "actress", "acupuncture",
+"ad", "adaptation", "adapter", "addiction", "addition", "address", "adjective", "adjustment", "administration", "administrator",
"admire", "admission", "adobe", "adoption", "adrenalin", "adrenaline", "adult", "adulthood", "advance", "advancement", "advantage", "advent",
"adverb", "advertisement", "advertising", "advice", "adviser", "advocacy", "advocate", "affair", "affect", "affidavit", "affiliate",
"affinity", "afoul", "afterlife", "aftermath", "afternoon", "aftershave", "aftershock", "afterthought", "age", "agency", "agenda", "agent",
"aggradation", "aggression", "aglet", "agony", "agreement", "agriculture", "aid", "aide", "aim", "air", "airbag", "airbus", "aircraft",
"airfare", "airfield", "airforce", "airline", "airmail", "airman", "airplane", "airport", "airship", "airspace", "alarm", "alb", "albatross",
-"album", "alcohol", "alcove", "alder", "ale", "alert", "alfalfa", "algebra", "algorithm", "alibi", "alien", "allegation", "allergist",
+"album", "alcohol", "alcove", "alder", "ale", "alert", "alfalfa", "algebra", "alibi", "alien", "allegation", "allergist",
"alley", "alliance", "alligator", "allocation", "allowance", "alloy", "alluvium", "almanac", "almighty", "almond", "alpaca", "alpenglow",
"alpenhorn", "alpha", "alphabet", "altar", "alteration", "alternative", "altitude", "alto", "aluminium", "aluminum", "amazement", "amazon",
"ambassador", "amber", "ambience", "ambiguity", "ambition", "ambulance", "amendment", "amenity", "ammunition", "amnesty", "amount", "amusement",
@@ -76,7 +480,7 @@ std::initializer_list nouns
"apple", "applewood", "appliance", "application", "appointment", "appreciation", "apprehension", "approach", "appropriation", "approval",
"apricot", "apron", "apse", "aquarium", "aquifer", "arcade", "arch", "archaeologist", "archaeology", "archeology", "archer",
"architect", "architecture", "archives", "area", "arena", "argument", "arithmetic", "ark", "arm", "armadillo", "armament",
-"armchair", "armoire", "armor", "armour", "armpit", "armrest", "army", "arrangement", "array", "arrest", "arrival", "arrogance", "arrow",
+"armchair", "armoire", "armor", "armour", "armpit", "armrest", "army", "arrangement", "arrest", "arrival", "arrogance", "arrow",
"art", "artery", "arthur", "artichoke", "article", "artifact", "artificer", "artist", "ascend", "ascent", "ascot", "ash", "ashram", "ashtray",
"aside", "asparagus", "aspect", "asphalt", "aspic", "assassination", "assault", "assembly", "assertion", "assessment", "asset",
"assignment", "assist", "assistance", "assistant", "associate", "association", "assumption", "assurance", "asterisk", "astrakhan", "astrolabe",
@@ -85,7 +489,7 @@ std::initializer_list nouns
"attraction", "attribute", "auction", "audience", "audit", "auditorium", "aunt", "authentication", "authenticity", "author", "authorisation",
"authority", "authorization", "auto", "autoimmunity", "automation", "automaton", "autumn", "availability", "avalanche", "avenue", "average",
"avocado", "award", "awareness", "awe", "axis", "azimuth", "babe", "baboon", "babushka", "baby", "bachelor", "back", "backbone",
-"backburn", "backdrop", "background", "backpack", "backup", "backyard", "bacon", "bacterium", "badge", "badger", "bafflement", "bag",
+"backburn", "backdrop", "background", "backpack", "backyard", "bacon", "bacterium", "badge", "badger", "bafflement", "bag",
"bagel", "baggage", "baggie", "baggy", "bagpipe", "bail", "bait", "bake", "baker", "bakery", "bakeware", "balaclava", "balalaika", "balance",
"balcony", "ball", "ballet", "balloon", "balloonist", "ballot", "ballpark", "bamboo", "ban", "banana", "band", "bandana", "bandanna",
"bandolier", "bandwidth", "bangle", "banjo", "bank", "bankbook", "banker", "banking", "bankruptcy", "banner", "banquette", "banyan",
@@ -125,16 +529,16 @@ std::initializer_list nouns
"captain", "caption", "captor", "car", "carabao", "caramel", "caravan", "carbohydrate", "carbon", "carboxyl", "card", "cardboard", "cardigan",
"care", "career", "cargo", "caribou", "carload", "carnation", "carnival", "carol", "carotene", "carp", "carpenter", "carpet", "carpeting",
"carport", "carriage", "carrier", "carrot", "carry", "cart", "cartel", "carter", "cartilage", "cartload", "cartoon", "cartridge", "carving",
-"cascade", "casement", "cash", "cashew", "cashier", "casino", "casket", "cassava", "casserole", "cassock", "cast", "castanet",
+"casement", "cash", "cashew", "cashier", "casino", "casket", "cassava", "casserole", "cassock", "castanet",
"castle", "casualty", "cat", "catacomb", "catalogue", "catalysis", "catalyst", "catamaran", "catastrophe", "catch", "catcher", "category",
"caterpillar", "cathedral", "cation", "catsup", "cattle", "cauliflower", "causal", "cause", "causeway", "caution", "cave", "caviar",
"cayenne", "ceiling", "celebration", "celebrity", "celeriac", "celery", "cell", "cellar", "cello", "celsius", "cement", "cemetery", "cenotaph",
"census", "cent", "center", "centimeter", "centre", "centurion", "century", "cephalopod", "ceramic", "ceramics", "cereal", "ceremony",
"certainty", "certificate", "certification", "cesspool", "chafe", "chain", "chainstay", "chair", "chairlift", "chairman", "chairperson",
-"chaise", "chalet", "chalice", "chalk", "challenge", "chamber", "champagne", "champion", "championship", "chance", "chandelier", "change",
-"channel", "chaos", "chap", "chapel", "chaplain", "chapter", "character", "characteristic", "characterization", "chard", "charge", "charger",
+"chaise", "chalet", "chalice", "chalk", "challenge", "chamber", "champagne", "champion", "championship", "chance", "chandelier",
+"channel", "chaos", "chap", "chapel", "chaplain", "chapter", "characteristic", "characterization", "chard", "charge", "charger",
"charity", "charlatan", "charm", "charset", "chart", "charter", "chasm", "chassis", "chastity", "chasuble", "chateau", "chatter", "chauffeur",
-"chauvinist", "check", "checkbook", "checking", "checkout", "checkroom", "cheddar", "cheek", "cheer", "cheese", "cheesecake", "cheetah",
+"chauvinist", "checkbook", "checking", "checkout", "checkroom", "cheddar", "cheek", "cheer", "cheese", "cheesecake", "cheetah",
"chef", "chem", "chemical", "chemistry", "chemotaxis", "cheque", "cherry", "chess", "chest", "chestnut", "chick", "chicken", "chicory",
"chief", "chiffonier", "child", "childbirth", "childhood", "chili", "chill", "chime", "chimpanzee", "chin", "chinchilla", "chino", "chip",
"chipmunk", "chivalry", "chive", "chives", "chocolate", "choice", "choir", "choker", "cholesterol", "choosing", "chop",
@@ -146,13 +550,13 @@ std::initializer_list nouns
"claw", "clay", "cleaner", "clearance", "clearing", "cleat", "cleavage", "clef", "cleft", "clergyman", "cleric", "clerk", "click", "client",
"cliff", "climate", "climb", "clinic", "clip", "clipboard", "clipper", "cloak", "cloakroom", "clock", "clockwork", "clogs", "cloister",
"clone", "close", "closet", "closing", "closure", "cloth", "clothes", "clothing", "cloud", "cloudburst", "clove", "clover", "cloves",
-"club", "clue", "cluster", "clutch", "coach", "coal", "coalition", "coast", "coaster", "coat", "cob", "cobbler", "cobweb",
+"club", "clue", "clutch", "coach", "coal", "coalition", "coast", "coaster", "coat", "cob", "cobbler", "cobweb",
"cock", "cockpit", "cockroach", "cocktail", "cocoa", "coconut", "cod", "code", "codepage", "codling", "codon", "codpiece", "coevolution",
"cofactor", "coffee", "coffin", "cohesion", "cohort", "coil", "coin", "coincidence", "coinsurance", "coke", "cold", "coleslaw", "coliseum",
-"collaboration", "collagen", "collapse", "collar", "collard", "collateral", "colleague", "collection", "collectivisation", "collectivization",
+"collaboration", "collagen", "collapse", "collar", "collard", "collateral", "colleague", "collectivisation", "collectivization",
"collector", "college", "collision", "colloquy", "colon", "colonial", "colonialism", "colonisation", "colonization", "colony", "color",
-"colorlessness", "colt", "column", "columnist", "comb", "combat", "combination", "combine", "comeback", "comedy", "comestible", "comfort",
-"comfortable", "comic", "comics", "comma", "command", "commander", "commandment", "comment", "commerce", "commercial", "commission",
+"colorlessness", "colt", "columnist", "comb", "combat", "combination", "combine", "comeback", "comedy", "comestible", "comfort",
+"comfortable", "comic", "comics", "comma", "command", "commander", "commandment", "commerce", "commercial", "commission",
"commitment", "committee", "commodity", "common", "commonsense", "commotion", "communicant", "communication", "communion", "communist",
"community", "commuter", "company", "comparison", "compass", "compassion", "compassionate", "compensation", "competence", "competition",
"competitor", "complaint", "complement", "completion", "complex", "complexity", "compliance", "complication", "complicity", "compliment",
@@ -162,8 +566,8 @@ std::initializer_list nouns
"confidentiality", "configuration", "confirmation", "conflict", "conformation", "confusion", "conga", "congo", "congregation", "congress",
"congressman", "congressperson", "conifer", "connection", "connotation", "conscience", "consciousness", "consensus", "consent", "consequence",
"conservation", "conservative", "consideration", "consignment", "consist", "consistency", "console", "consonant", "conspiracy", "conspirator",
-"constant", "constellation", "constitution", "constraint", "construction", "consul", "consulate", "consulting", "consumer", "consumption",
-"contact", "contact lens", "contagion", "container", "content", "contention", "contest", "context", "continent", "contingency", "continuity",
+"constant", "constellation", "constitution", "construction", "consul", "consulate", "consulting", "consumer", "consumption",
+"contact", "contagion", "container", "content", "contention", "contest", "context", "continent", "contingency", "continuity",
"contour", "contract", "contractor", "contrail", "contrary", "contrast", "contribution", "contributor", "control", "controller", "controversy",
"convection", "convenience", "convention", "conversation", "conversion", "convert", "convertible", "conviction", "cook", "cookbook",
"cookie", "cooking", "coonskin", "cooperation", "coordination", "coordinator", "cop", "cope", "copper", "copy", "copying",
@@ -175,33 +579,33 @@ std::initializer_list nouns
"cousin", "covariate", "cover", "coverage", "coverall", "cow", "cowbell", "cowboy", "coyote", "crab", "crack", "cracker", "crackers",
"cradle", "craft", "craftsman", "cranberry", "crane", "cranky", "crash", "crate", "cravat", "craw", "crawdad", "crayfish", "crayon",
"crazy", "cream", "creation", "creationism", "creationist", "creative", "creativity", "creator", "creature", "creche", "credential",
-"credenza", "credibility", "credit", "creditor", "creek", "creme brulee", "crepe", "crest", "crew", "crewman", "crewmate", "crewmember",
+"credenza", "credibility", "credit", "creditor", "creek", "crepe", "crest", "crew", "crewman", "crewmate", "crewmember",
"crewmen", "cria", "crib", "cribbage", "cricket", "cricketer", "crime", "criminal", "crinoline", "crisis", "crisp", "criteria", "criterion",
-"critic", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "cross", "crotch",
+"critic", "criticism", "crocodile", "crocus", "croissant", "crook", "crop", "crotch",
"croup", "crow", "crowd", "crown", "crucifixion", "crude", "cruelty", "cruise", "crumb", "crunch", "crusader", "crush", "crust", "cry",
-"crystal", "crystallography", "cub", "cube", "cuckoo", "cucumber", "cue", "cuisine", "cultivar", "cultivator", "culture",
+"crystal", "crystallography", "cub", "cuckoo", "cucumber", "cue", "cuisine", "cultivar", "cultivator", "culture",
"culvert", "cummerbund", "cup", "cupboard", "cupcake", "cupola", "curd", "cure", "curio", "curiosity", "curl", "curler", "currant", "currency",
-"current", "curriculum", "curry", "curse", "cursor", "curtailment", "curtain", "curve", "cushion", "custard", "custody", "custom", "customer",
+"curriculum", "curry", "curse", "cursor", "curtailment", "curtain", "curve", "cushion", "custard", "custody", "custom", "customer",
"cut", "cuticle", "cutlet", "cutover", "cutting", "cyclamen", "cycle", "cyclone", "cyclooxygenase", "cygnet", "cylinder", "cymbal", "cynic",
"cyst", "cytokine", "cytoplasm", "dad", "daddy", "daffodil", "dagger", "dahlia", "daikon", "daily", "dairy", "daisy", "dam", "damage",
"dame", "dance", "dancer", "dancing", "dandelion", "danger", "dare", "dark", "darkness", "darn", "dart", "dash", "dashboard",
-"data", "date", "daughter", "dawn", "day", "daybed", "daylight", "dead", "deadline", "deal", "dealer", "dealing", "dearest",
+"data", "daughter", "dawn", "daybed", "daylight", "dead", "deadline", "deal", "dealer", "dealing", "dearest",
"death", "deathwatch", "debate", "debris", "debt", "debtor", "decade", "decadence", "decency", "decimal", "decision",
"deck", "declaration", "declination", "decline", "decoder", "decongestant", "decoration", "decrease", "decryption", "dedication", "deduce",
"deduction", "deed", "deep", "deer", "defeat", "defendant", "defender", "defense", "deficit", "definition", "deformation",
-"degradation", "degree", "delay", "deliberation", "delight", "delivery", "demand", "democracy", "democrat", "demon", "demur", "den",
+"degradation", "degree", "deliberation", "delight", "delivery", "demand", "democracy", "democrat", "demon", "demur", "den",
"denim", "denominator", "density", "dentist", "deodorant", "department", "departure", "dependency", "dependent", "deployment", "deposit",
"deposition", "depot", "depression", "depressive", "depth", "deputy", "derby", "derivation", "derivative", "derrick", "descendant", "descent",
"description", "desert", "design", "designation", "designer", "desire", "desk", "desktop", "dessert", "destination", "destiny", "destroyer",
"destruction", "detail", "detainee", "detainment", "detection", "detective", "detector", "detention", "determination", "detour", "devastation",
"developer", "developing", "development", "developmental", "deviance", "deviation", "device", "devil", "dew", "dhow", "diabetes", "diadem",
-"diagnosis", "diagram", "dial", "dialect", "dialogue", "diam", "diamond", "diaper", "diaphragm", "diarist", "diary", "dibble", "dickey", "dictaphone", "dictator", "diction", "dictionary", "die", "diesel", "diet", "difference", "differential", "difficulty", "diffuse",
+"diagnosis", "diagram", "dial", "dialect", "dialogue", "diam", "diamond", "diaper", "diaphragm", "diarist", "diary", "dibble", "dickey", "dictaphone", "dictator", "diction", "die", "diesel", "diet", "difference", "differential", "difficulty", "diffuse",
"dig", "digestion", "digestive", "digger", "digging", "digit", "dignity", "dilapidation", "dill", "dilution", "dime", "dimension", "dimple",
"diner", "dinghy", "dining", "dinner", "dinosaur", "dioxide", "dip", "diploma", "diplomacy", "dipstick", "direction", "directive", "director",
"directory", "dirndl", "dirt", "disability", "disadvantage", "disagreement", "disappointment", "disarmament", "disaster", "discharge",
"discipline", "disclaimer", "disclosure", "disco", "disconnection", "discount", "discourse", "discovery", "discrepancy", "discretion",
"discrimination", "discussion", "disdain", "disease", "disembodiment", "disengagement", "disguise", "disgust", "dish", "dishwasher",
-"disk", "disparity", "dispatch", "displacement", "display", "disposal", "disposer", "disposition", "dispute", "disregard", "disruption",
+"disparity", "dispatch", "displacement", "display", "disposal", "disposer", "disposition", "dispute", "disregard", "disruption",
"dissemination", "dissonance", "distance", "distinction", "distortion", "distribution", "distributor", "district", "divalent", "divan",
"diver", "diversity", "divide", "dividend", "divider", "divine", "diving", "division", "divorce", "doc", "dock", "doctor", "doctorate",
"doctrine", "document", "documentary", "documentation", "doe", "dog", "doggie", "dogsled", "dogwood", "doing", "doll", "dollar", "dollop",
@@ -209,10 +613,10 @@ std::initializer_list nouns
"doorpost", "doorway", "dory", "dose", "dot", "double", "doubling", "doubt", "doubter", "dough", "doughnut", "down", "downfall", "downforce",
"downgrade", "download", "downstairs", "downtown", "downturn", "dozen", "draft", "drag", "dragon", "dragonfly", "dragonfruit", "dragster",
"drain", "drainage", "drake", "drama", "dramaturge", "drapes", "draw", "drawbridge", "drawer", "drawing", "dream", "dreamer", "dredger",
-"dress", "dresser", "dressing", "drill", "drink", "drinking", "drive", "driver", "driveway", "driving", "drizzle", "dromedary", "drop",
+"dress", "dresser", "dressing", "drill", "drink", "drinking", "drive", "driver", "driveway", "driving", "drizzle", "dromedary",
"drudgery", "drug", "drum", "drummer", "drunk", "dryer", "duck", "duckling", "dud", "dude", "due", "duel", "dueling", "duffel", "dugout",
-"dulcimer", "dumbwaiter", "dump", "dump truck", "dune", "dune buggy", "dungarees", "dungeon", "duplexer", "duration", "durian", "dusk",
-"dust", "dust storm", "duster", "duty", "dwarf", "dwell", "dwelling", "dynamics", "dynamite", "dynamo", "dynasty", "dysfunction",
+"dulcimer", "dumbwaiter", "dump", "dune", "dungarees", "dungeon", "duplexer", "duration", "durian", "dusk",
+"dust", "duster", "duty", "dwarf", "dwell", "dwelling", "dynamics", "dynamite", "dynamo", "dynasty", "dysfunction",
"eagle", "eaglet", "ear", "eardrum", "earmuffs", "earnings", "earplug", "earring", "earrings", "earth", "earthquake",
"earthworm", "ease", "easel", "east", "eating", "eaves", "eavesdropper", "ecclesia", "echidna", "eclipse", "ecliptic", "ecology", "economics",
"economy", "ecosystem", "ectoderm", "ectodermal", "ecumenist", "eddy", "edge", "edger", "edible", "editing", "edition", "editor", "editorial",
@@ -222,19 +626,19 @@ std::initializer_list nouns
"ellipse", "elm", "elongation", "elver", "email", "emanate", "embarrassment", "embassy", "embellishment", "embossing", "embryo", "emerald",
"emergence", "emergency", "emergent", "emery", "emission", "emitter", "emotion", "emphasis", "empire", "employ", "employee", "employer",
"employment", "empowerment", "emu", "enactment", "encirclement", "enclave", "enclosure", "encounter", "encouragement", "encyclopedia",
-"end", "endive", "endoderm", "endorsement", "endothelium", "endpoint", "enemy", "energy", "enforcement", "engagement", "engine", "engineer",
+"endive", "endoderm", "endorsement", "endothelium", "endpoint", "enemy", "energy", "enforcement", "engagement", "engineer",
"engineering", "enigma", "enjoyment", "enquiry", "enrollment", "enterprise", "entertainment", "enthusiasm", "entirety", "entity", "entrance",
"entree", "entrepreneur", "entry", "envelope", "environment", "envy", "enzyme", "epauliere", "epee", "ephemera", "ephemeris", "ephyra",
"epic", "episode", "epithelium", "epoch", "eponym", "epoxy", "equal", "equality", "equation", "equinox", "equipment", "equity", "equivalent",
"era", "eraser", "erection", "erosion", "error", "escalator", "escape", "escort", "espadrille", "espalier", "essay", "essence", "essential",
-"establishment", "estate", "estimate", "estrogen", "estuary", "eternity", "ethernet", "ethics", "ethnicity", "ethyl", "euphonium", "eurocentrism",
-"evaluation", "evaluator", "evaporation", "eve", "evening", "event", "everybody", "everyone", "everything", "eviction",
+"establishment", "estate", "estrogen", "estuary", "eternity", "ethernet", "ethics", "ethnicity", "ethyl", "euphonium", "eurocentrism",
+"evaluation", "evaluator", "evaporation", "eve", "evening", "everybody", "everyone", "everything", "eviction",
"evidence", "evil", "evocation", "evolution", "exaggeration", "exam", "examination", "examiner", "example",
-"exasperation", "excellence", "exception", "excerpt", "excess", "exchange", "excitement", "exclamation", "excursion", "excuse", "execution",
+"exasperation", "excellence", "exception", "excerpt", "excess", "excitement", "exclamation", "excursion", "excuse", "execution",
"executive", "executor", "exercise", "exhaust", "exhaustion", "exhibit", "exhibition", "exile", "existence", "exit", "exocrine", "expansion",
"expansionism", "expectancy", "expectation", "expedition", "expense", "experience", "experiment", "experimentation", "expert", "expertise",
-"explanation", "exploration", "explorer", "explosion", "export", "expose", "exposition", "exposure", "expression", "extension", "extent",
-"exterior", "external", "extinction", "extreme", "extremist", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes",
+"explanation", "exploration", "explorer", "explosion", "export", "expose", "exposition", "exposure", "extension", "extent",
+"exterior", "extinction", "extreme", "extremist", "eye", "eyeball", "eyebrow", "eyebrows", "eyeglasses", "eyelash", "eyelashes",
"eyelid", "eyelids", "eyeliner", "eyestrain", "eyrie", "fabric", "face", "facelift", "facet", "facility", "facsimile", "fact", "factor",
"factory", "faculty", "fahrenheit", "fail", "failure", "fairness", "fairy", "faith", "faithful", "fall", "fallacy", "fame",
"familiar", "familiarity", "family", "fan", "fang", "fanlight", "fanny", "fantasy", "farm", "farmer", "farming", "farmland",
@@ -242,13 +646,13 @@ std::initializer_list nouns
"favorite", "fawn", "fax", "fear", "feast", "feather", "feature", "fedelini", "federation", "fedora", "fee", "feed", "feedback", "feeding",
"feel", "feeling", "fellow", "felony", "female", "fen", "fence", "fencing", "fender", "feng", "fennel", "ferret", "ferry", "ferryboat",
"fertilizer", "festival", "fetus", "few", "fiber", "fiberglass", "fibre", "fibroblast", "fibrosis", "ficlet", "fiction", "fiddle", "field",
-"fiery", "fiesta", "fifth", "fig", "fight", "fighter", "figure", "figurine", "file", "filing", "fill", "fillet", "filly", "film", "filter",
-"filth", "final", "finance", "financing", "finding", "fine", "finer", "finger", "fingerling", "fingernail", "finish", "finisher", "fir",
-"fire", "fireman", "fireplace", "firewall", "firm", "first", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet",
+"fiery", "fiesta", "fifth", "fig", "fight", "fighter", "figure", "figurine", "filing", "fillet", "filly", "film",
+"filth", "finance", "financing", "finding", "fine", "finer", "finger", "fingerling", "fingernail", "finish", "finisher", "fir",
+"fire", "fireman", "fireplace", "firewall", "firm", "fish", "fishbone", "fisherman", "fishery", "fishing", "fishmonger", "fishnet",
"fisting", "fit", "fitness", "fix", "fixture", "flag", "flair", "flame", "flan", "flanker", "flare", "flash", "flat", "flatboat", "flavor",
"flax", "fleck", "fledgling", "fleece", "flesh", "flexibility", "flick", "flicker", "flight", "flint", "flintlock", "flock",
"flood", "floodplain", "floor", "floozie", "flour", "flow", "flower", "flu", "flugelhorn", "fluke", "flume", "flung", "flute", "fly",
-"flytrap", "foal", "foam", "fob", "focus", "fog", "fold", "folder", "folk", "folklore", "follower", "following", "fondue", "font", "food",
+"flytrap", "foal", "foam", "fob", "focus", "fog", "fold", "folder", "folk", "folklore", "follower", "fondue", "font", "food",
"foodstuffs", "fool", "foot", "footage", "football", "footnote", "footprint", "footrest", "footstep", "footstool", "footwear", "forage",
"forager", "foray", "force", "ford", "forearm", "forebear", "forecast", "forehead", "foreigner", "forelimb", "forest", "forestry", "forever",
"forgery", "fork", "form", "formal", "formamide", "formation", "former", "formicarium", "formula", "fort", "forte", "fortnight",
@@ -256,7 +660,7 @@ std::initializer_list nouns
"frame", "framework", "fratricide", "fraud", "fraudster", "freak", "freckle", "freedom", "freelance", "freezer", "freezing", "freight",
"freighter", "frenzy", "freon", "frequency", "fresco", "friction", "fridge", "friend", "friendship", "fries", "frigate", "fright", "fringe",
"fritter", "frock", "frog", "front", "frontier", "frost", "frosting", "frown", "fruit", "frustration", "fry", "fuel", "fugato",
-"fulfillment", "full", "fun", "function", "functionality", "fund", "funding", "fundraising", "funeral", "fur", "furnace", "furniture",
+"fulfillment", "fun", "functionality", "fund", "funding", "fundraising", "funeral", "fur", "furnace", "furniture",
"furry", "fusarium", "futon", "future", "gadget", "gaffe", "gaffer", "gain", "gaiters", "gale", "gallery", "galley",
"gallon", "galoshes", "gambling", "game", "gamebird", "gaming", "gander", "gang", "gap", "garage", "garb", "garbage", "garden",
"garlic", "garment", "garter", "gas", "gasket", "gasoline", "gasp", "gastronomy", "gastropod", "gate", "gateway", "gather", "gathering",
@@ -269,7 +673,7 @@ std::initializer_list nouns
"goggles", "going", "gold", "goldfish", "golf", "gondola", "gong", "good", "goodbye", "goodie", "goodness", "goodnight",
"goodwill", "goose", "gopher", "gorilla", "gosling", "gossip", "governance", "government", "governor", "gown", "grace", "grade",
"gradient", "graduate", "graduation", "graffiti", "graft", "grain", "gram", "grammar", "gran", "grand", "grandchild", "granddaughter",
-"grandfather", "grandma", "grandmom", "grandmother", "grandpa", "grandparent", "grandson", "granny", "granola", "grant", "grape", "grapefruit",
+"grandfather", "grandma", "grandmom", "grandmother", "grandpa", "grandparent", "grandson", "granny", "granola", "grape", "grapefruit",
"graph", "graphic", "grasp", "grass", "grasshopper", "grassland", "gratitude", "gravel", "gravitas", "gravity", "gravy", "gray", "grease",
"greatness", "greed", "green", "greenhouse", "greens", "grenade", "grey", "grid", "grief",
"grill", "grin", "grip", "gripper", "grit", "grocery", "ground", "grouper", "grouse", "grove", "growth", "grub", "guacamole",
@@ -279,7 +683,7 @@ std::initializer_list nouns
"halibut", "hall", "halloween", "hallway", "halt", "ham", "hamburger", "hammer", "hammock", "hamster", "hand", "handball",
"handful", "handgun", "handicap", "handle", "handlebar", "handmaiden", "handover", "handrail", "handsaw", "hanger", "happening", "happiness",
"harald", "harbor", "harbour", "hardboard", "hardcover", "hardening", "hardhat", "hardship", "hardware", "hare", "harm",
-"harmonica", "harmonise", "harmonize", "harmony", "harp", "harpooner", "harpsichord", "harvest", "harvester", "hash", "hashtag", "hassock",
+"harmonica", "harmonise", "harmonize", "harmony", "harp", "harpooner", "harpsichord", "harvest", "harvester", "hashtag", "hassock",
"haste", "hat", "hatbox", "hatchet", "hatchling", "hate", "hatred", "haunt", "haven", "haversack", "havoc", "hawk", "hay", "haze", "hazel",
"hazelnut", "head", "headache", "headlight", "headline", "headphones", "headquarters", "headrest", "health", "hearing",
"hearsay", "heart", "heartache", "heartbeat", "hearth", "hearthside", "heartwood", "heat", "heater", "heating", "heaven",
@@ -290,53 +694,53 @@ std::initializer_list nouns
"hobbit", "hobby", "hockey", "hoe", "hog", "hold", "holder", "hole", "holiday", "home", "homeland", "homeownership", "hometown", "homework",
"homicide", "homogenate", "homonym", "honesty", "honey", "honeybee", "honeydew", "honor", "honoree", "hood",
"hoof", "hook", "hop", "hope", "hops", "horde", "horizon", "hormone", "horn", "hornet", "horror", "horse", "horseradish", "horst", "hose",
-"hosiery", "hospice", "hospital", "hospitalisation", "hospitality", "hospitalization", "host", "hostel", "hostess", "hotdog", "hotel",
-"hound", "hour", "hourglass", "house", "houseboat", "household", "housewife", "housework", "housing", "hovel", "hovercraft", "howard",
+"hosiery", "hospice", "hospital", "hospitalisation", "hospitality", "hospitalization", "hostel", "hostess", "hotdog", "hotel",
+"hound", "hourglass", "house", "houseboat", "household", "housewife", "housework", "housing", "hovel", "hovercraft", "howard",
"howitzer", "hub", "hubcap", "hubris", "hug", "hugger", "hull", "human", "humanity", "humidity", "hummus", "humor", "humour", "hunchback",
"hundred", "hunger", "hunt", "hunter", "hunting", "hurdle", "hurdler", "hurricane", "hurry", "hurt", "husband", "hut", "hutch", "hyacinth",
"hybridisation", "hybridization", "hydrant", "hydraulics", "hydrocarb", "hydrocarbon", "hydrofoil", "hydrogen", "hydrolyse", "hydrolysis",
"hydrolyze", "hydroxyl", "hyena", "hygienic", "hype", "hyphenation", "hypochondria", "hypothermia", "hypothesis", "ice",
-"iceberg", "icebreaker", "icecream", "icicle", "icing", "icon", "icy", "id", "idea", "ideal", "identification", "identity", "ideology",
+"iceberg", "icebreaker", "icecream", "icicle", "icing", "icon", "icy", "idea", "ideal", "identification", "identity", "ideology",
"idiom", "idiot", "igloo", "ignorance", "ignorant", "ikebana", "illegal", "illiteracy", "illness", "illusion", "illustration", "image",
"imagination", "imbalance", "imitation", "immigrant", "immigration", "immortal", "impact", "impairment", "impala", "impediment", "implement",
"implementation", "implication", "import", "importance", "impostor", "impress", "impression", "imprisonment", "impropriety", "improvement",
"impudence", "impulse", "inability", "inauguration", "inbox", "incandescence", "incarnation", "incense", "incentive",
"inch", "incidence", "incident", "incision", "inclusion", "income", "incompetence", "inconvenience", "increase", "incubation", "independence",
-"independent", "index", "indication", "indicator", "indigence", "individual", "industrialisation", "industrialization", "industry", "inequality",
+"independent", "indication", "indicator", "indigence", "individual", "industrialisation", "industrialization", "industry", "inequality",
"inevitable", "infancy", "infant", "infarction", "infection", "infiltration", "infinite", "infix", "inflammation", "inflation", "influence",
"influx", "info", "information", "infrastructure", "infusion", "inglenook", "ingrate", "ingredient", "inhabitant", "inheritance", "inhibition",
"inhibitor", "initial", "initialise", "initialize", "initiative", "injunction", "injury", "injustice", "ink", "inlay", "inn", "innervation",
-"innocence", "innocent", "innovation", "input", "inquiry", "inscription", "insect", "insectarium", "insert", "inside", "insight", "insolence",
+"innocence", "innocent", "innovation", "input", "inquiry", "inscription", "insect", "insectarium", "inside", "insight", "insolence",
"insomnia", "inspection", "inspector", "inspiration", "installation", "instance", "instant", "instinct", "institute", "institution",
"instruction", "instructor", "instrument", "instrumentalist", "instrumentation", "insulation", "insurance", "insurgence", "insurrection",
"integer", "integral", "integration", "integrity", "intellect", "intelligence", "intensity", "intent", "intention", "intentionality",
"interaction", "interchange", "interconnection", "intercourse", "interest", "interface", "interferometer", "interior", "interject", "interloper",
-"internet", "interpretation", "interpreter", "interval", "intervenor", "intervention", "interview", "interviewer", "intestine", "introduction",
+"internet", "interpretation", "interpreter", "intervenor", "intervention", "interview", "interviewer", "intestine", "introduction",
"intuition", "invader", "invasion", "invention", "inventor", "inventory", "inverse", "inversion", "investigation", "investigator", "investment",
"investor", "invitation", "invite", "invoice", "involvement", "iridescence", "iris", "iron", "ironclad", "irony", "irrigation", "ischemia",
"island", "isogloss", "isolation", "issue", "item", "itinerary", "ivory", "jack", "jackal", "jacket", "jackfruit", "jade", "jaguar",
-"jail", "jailhouse", "jalapeño", "jam", "jar", "jasmine", "jaw", "jazz", "jealousy", "jeans", "jeep", "jelly", "jellybeans", "jellyfish",
+"jail", "jailhouse", "jam", "jar", "jasmine", "jaw", "jazz", "jealousy", "jeans", "jeep", "jelly", "jellybeans", "jellyfish",
"jerk", "jet", "jewel", "jeweller", "jewellery", "jewelry", "jicama", "jiffy", "job", "jockey", "jodhpurs", "joey", "jogging", "joint",
"joke", "jot", "journal", "journalism", "journalist", "journey", "joy", "judge", "judgment", "judo", "jug", "juggernaut", "juice", "julienne",
"jumbo", "jump", "jumper", "jumpsuit", "jungle", "junior", "junk", "junker", "junket", "jury", "justice", "justification", "jute", "kale",
"kamikaze", "kangaroo", "karate", "kayak", "kazoo", "kebab", "keep", "keeper", "kendo", "kennel", "ketch", "ketchup", "kettle", "kettledrum",
-"key", "keyboard", "keyboarding", "keystone", "kick", "kid", "kidney", "kielbasa", "kill", "killer", "killing", "kilogram",
+"keyboard", "keyboarding", "keystone", "kick", "kid", "kidney", "kielbasa", "killer", "killing", "kilogram",
"kilometer", "kilt", "kimono", "kinase", "kind", "kindness", "king", "kingdom", "kingfish", "kiosk", "kiss", "kit", "kitchen", "kite",
"kitsch", "kitten", "kitty", "kiwi", "knee", "kneejerk", "knickers", "knife", "knight", "knitting", "knock", "knot",
"knowledge", "knuckle", "koala", "kohlrabi", "kumquat", "lab", "label", "labor", "laboratory", "laborer", "labour", "labourer", "lace",
"lack", "lacquerware", "lad", "ladder", "ladle", "lady", "ladybug", "lag", "lake", "lamb", "lambkin", "lament", "lamp", "lanai", "land",
"landform", "landing", "landmine", "landscape", "lane", "language", "lantern", "lap", "laparoscope", "lapdog", "laptop", "larch", "lard",
-"larder", "lark", "larva", "laryngitis", "lasagna", "lashes", "last", "latency", "latex", "lathe", "latitude", "latte", "latter", "laugh",
-"laughter", "laundry", "lava", "law", "lawmaker", "lawn", "lawsuit", "lawyer", "lay", "layer", "layout", "lead", "leader", "leadership",
-"leading", "leaf", "league", "leaker", "leap", "learning", "leash", "leather", "leave", "leaver", "lecture", "leek", "leeway", "left",
+"larder", "lark", "larva", "laryngitis", "lasagna", "lashes", "latency", "latex", "lathe", "latitude", "latte", "latter", "laugh",
+"laughter", "laundry", "lava", "law", "lawmaker", "lawn", "lawsuit", "lawyer", "lay", "layer", "lead", "leader", "leadership",
+"leaf", "league", "leaker", "leap", "learning", "leash", "leather", "leave", "leaver", "lecture", "leek", "leeway",
"leg", "legacy", "legal", "legend", "legging", "legislation", "legislator", "legislature", "legitimacy", "legume", "leisure", "lemon",
"lemonade", "lemur", "lender", "lending", "length", "lens", "lentil", "leopard", "leprosy", "leptocephalus", "lesson", "letter",
-"lettuce", "level", "lever", "leverage", "leveret", "liability", "liar", "liberty", "libido", "library", "licence", "license", "licensing",
-"licorice", "lid", "lie", "lieu", "lieutenant", "life", "lifestyle", "lifetime", "lift", "ligand", "light", "lighting", "lightning",
+"lettuce", "lever", "leverage", "leveret", "liability", "liar", "liberty", "libido", "library", "licence", "license", "licensing",
+"licorice", "lid", "lie", "lieu", "lieutenant", "life", "lifestyle", "lift", "ligand", "light", "lighting", "lightning",
"lightscreen", "ligula", "likelihood", "likeness", "lilac", "lily", "limb", "lime", "limestone", "limitation", "limo", "line",
"linen", "liner", "linguist", "linguistics", "lining", "link", "linkage", "linseed", "lion", "lip", "lipid", "lipoprotein", "lipstick",
-"liquid", "liquidity", "liquor", "list", "listening", "listing", "literate", "literature", "litigation", "litmus", "litter", "littleneck",
-"liver", "livestock", "living", "lizard", "llama", "load", "loading", "loaf", "loafer", "loan", "lobby", "lobotomy", "lobster", "local",
+"liquid", "liquidity", "liquor", "listening", "listing", "literate", "literature", "litigation", "litmus", "litter", "littleneck",
+"liver", "livestock", "living", "lizard", "llama", "load", "loading", "loaf", "loafer", "loan", "lobby", "lobotomy", "lobster",
"locality", "location", "lock", "locker", "locket", "locomotive", "locust", "lode", "loft", "log", "loggia", "logic", "login", "logistics",
"logo", "loincloth", "lollipop", "loneliness", "longboat", "longitude", "look", "lookout", "loop", "loophole", "loquat", "lord", "loss",
"lot", "lotion", "lottery", "lounge", "louse", "lout", "love", "lover", "lox", "loyalty", "luck", "luggage", "lumber", "lumberman", "lunch",
@@ -350,28 +754,28 @@ std::initializer_list nouns
"manufacturer", "manufacturing", "many", "map", "maple", "mapping", "maracas", "marathon", "marble", "march", "mare", "margarine", "margin",
"mariachi", "marimba", "marines", "marionberry", "mark", "marker", "market", "marketer", "marketing", "marketplace", "marksman", "markup",
"marmalade", "marriage", "marsh", "marshland", "marshmallow", "marten", "marxism", "mascara", "mask", "masonry", "mass", "massage", "mast",
-"master", "masterpiece", "mastication", "mastoid", "mat", "match", "matchmaker", "mate", "material", "maternity", "math", "mathematics",
-"matrix", "matter", "mattock", "mattress", "max", "maximum", "maybe", "mayonnaise", "mayor", "meadow", "meal", "mean", "meander", "meaning",
+"master", "masterpiece", "mastication", "mastoid", "mat", "matchmaker", "mate", "material", "maternity", "math", "mathematics",
+"matrix", "matter", "mattock", "mattress", "maximum", "maybe", "mayonnaise", "mayor", "meadow", "meal", "mean", "meander", "meaning",
"means", "meantime", "measles", "measure", "measurement", "meat", "meatball", "meatloaf", "mecca", "mechanic", "mechanism", "med", "medal",
"media", "median", "medication", "medicine", "medium", "meet", "meeting", "melatonin", "melody", "melon", "member", "membership", "membrane",
-"meme", "memo", "memorial", "memory", "men", "menopause", "menorah", "mention", "mentor", "menu", "merchandise", "merchant", "mercury",
+"meme", "memo", "memorial", "men", "menopause", "menorah", "mention", "mentor", "menu", "merchandise", "merchant", "mercury",
"meridian", "meringue", "merit", "mesenchyme", "mess", "message", "messenger", "messy", "metabolite", "metal", "metallurgist", "metaphor",
"meteor", "meteorology", "meter", "methane", "method", "methodology", "metric", "metro", "metronome", "mezzanine", "microlending", "micronutrient",
"microphone", "microwave", "midden", "middle", "middleman", "midline", "midnight", "midwife", "might", "migrant", "migration",
"mile", "mileage", "milepost", "milestone", "military", "milk", "milkshake", "mill", "millennium", "millet", "millimeter", "million",
-"millisecond", "millstone", "mime", "mimosa", "min", "mincemeat", "mind", "mine", "mineral", "mineshaft", "mini", "minibus",
-"minimalism", "minimum", "mining", "minion", "minister", "mink", "minnow", "minor", "minority", "mint", "minute", "miracle",
+"millstone", "mime", "mimosa", "mincemeat", "mind", "mine", "mineral", "mineshaft", "mini", "minibus",
+"minimalism", "minimum", "mining", "minion", "minister", "mink", "minnow", "minor", "minority", "mint", "miracle",
"mirror", "miscarriage", "miscommunication", "misfit", "misnomer", "misogyny", "misplacement", "misreading", "misrepresentation", "miss",
"missile", "mission", "missionary", "mist", "mistake", "mister", "misunderstand", "miter", "mitten", "mix", "mixer", "mixture", "moai",
"moat", "mob", "mobile", "mobility", "mobster", "moccasins", "mocha", "mochi", "mode", "model", "modeling", "modem", "modernist", "modernity",
"modification", "molar", "molasses", "molding", "mole", "molecule", "mom", "moment", "monastery", "monasticism", "money", "monger", "monitor",
-"monitoring", "monk", "monkey", "monocle", "monopoly", "monotheism", "monsoon", "monster", "month", "monument", "mood", "moody", "moon",
+"monitoring", "monk", "monkey", "monocle", "monopoly", "monotheism", "monsoon", "monster", "monument", "mood", "moody", "moon",
"moonlight", "moonscape", "moonshine", "moose", "mop", "morale", "morbid", "morbidity", "morning", "moron", "morphology", "morsel", "mortal",
"mortality", "mortgage", "mortise", "mosque", "mosquito", "most", "motel", "moth", "mother", "motion", "motivation",
"motive", "motor", "motorboat", "motorcar", "motorcycle", "mound", "mountain", "mouse", "mouser", "mousse", "moustache", "mouth", "mouton",
"movement", "mover", "movie", "mower", "mozzarella", "mud", "muffin", "mug", "mukluk", "mule", "multimedia", "murder", "muscat", "muscatel",
"muscle", "musculature", "museum", "mushroom", "music", "musician", "muskrat", "mussel", "mustache", "mustard",
-"mutation", "mutt", "mutton", "mycoplasma", "mystery", "myth", "mythology", "nail", "name", "naming", "nanoparticle", "napkin", "narrative",
+"mutt", "mutton", "mycoplasma", "mystery", "myth", "mythology", "nail", "naming", "nanoparticle", "napkin", "narrative",
"nasal", "nation", "nationality", "native", "naturalisation", "nature", "navigation", "necessity", "neck", "necklace", "necktie", "nectar",
"nectarine", "need", "needle", "neglect", "negligee", "negotiation", "neighbor", "neighborhood", "neighbour", "neighbourhood", "neologism",
"neon", "neonate", "nephew", "nerve", "nest", "nestling", "nestmate", "net", "netball", "netbook", "netsuke", "network", "networking",
@@ -381,13 +785,13 @@ std::initializer_list nouns
"noodle", "noodles", "noon", "norm", "normal", "normalisation", "normalization", "north", "nose", "notation", "note", "notebook", "notepad",
"nothing", "notice", "notion", "notoriety", "nougat", "noun", "nourishment", "novel", "nucleotidase", "nucleotide", "nudge", "nuke",
"number", "numeracy", "numeric", "numismatist", "nun", "nurse", "nursery", "nursing", "nurture", "nut", "nutmeg", "nutrient", "nutrition",
-"nylon", "nymph", "oak", "oar", "oasis", "oat", "oatmeal", "oats", "obedience", "obesity", "obi", "object", "objection", "objective",
+"nylon", "nymph", "oak", "oar", "oasis", "oat", "oatmeal", "oats", "obedience", "obesity", "obi", "objection", "objective",
"obligation", "oboe", "observation", "observatory", "obsession", "obsidian", "obstacle", "occasion", "occupation", "occurrence", "ocean",
"ocelot", "octagon", "octave", "octavo", "octet", "octopus", "odometer", "odyssey", "oeuvre", "offence", "offense", "offer",
-"offering", "office", "officer", "official", "offset", "oil", "okra", "oldie", "oleo", "olive", "omega", "omelet", "omission", "omnivore",
+"offering", "office", "officer", "official", "oil", "okra", "oldie", "oleo", "olive", "omega", "omelet", "omission", "omnivore",
"oncology", "onion", "online", "onset", "opening", "opera", "operating", "operation", "operator", "ophthalmologist", "opinion", "opium",
"opossum", "opponent", "opportunist", "opportunity", "opposite", "opposition", "optimal", "optimisation", "optimist", "optimization",
-"option", "orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "ordinary", "ordination", "ore", "oregano", "organ",
+"orange", "orangutan", "orator", "orchard", "orchestra", "orchid", "ordinary", "ordination", "ore", "oregano", "organ",
"organisation", "organising", "organization", "organizing", "orient", "orientation", "origin", "original", "originality", "ornament",
"osmosis", "osprey", "ostrich", "other", "otter", "ottoman", "ounce", "outback", "outcome", "outfielder", "outfit", "outhouse", "outlaw",
"outlay", "outlet", "outline", "outlook", "output", "outrage", "outrigger", "outrun", "outset", "outside", "oval", "ovary", "oven", "overcharge",
@@ -398,7 +802,7 @@ std::initializer_list nouns
"pansy", "panther", "panties", "pantologist", "pantology", "pantry", "pants", "pantsuit", "panty", "pantyhose", "papa", "papaya", "paper",
"paperback", "paperwork", "parable", "parachute", "parade", "paradise", "paragraph", "parallelogram", "paramecium", "paramedic", "parameter",
"paranoia", "parcel", "parchment", "pard", "pardon", "parent", "parenthesis", "parenting", "park", "parka", "parking", "parliament",
-"parole", "parrot", "parser", "parsley", "parsnip", "part", "participant", "participation", "particle", "particular", "partner", "partnership",
+"parole", "parrot", "parser", "parsley", "parsnip", "participant", "participation", "particle", "particular", "partner", "partnership",
"partridge", "party", "pass", "passage", "passbook", "passenger", "passing", "passion", "passive", "passport", "password", "past", "pasta",
"paste", "pastor", "pastoralist", "pastry", "pasture", "pat", "patch", "pate", "patent", "patentee", "path", "pathogenesis", "pathology",
"pathway", "patience", "patient", "patina", "patio", "patriarch", "patrimony", "patriot", "patrol", "patroller", "patrolling", "patron",
@@ -413,26 +817,26 @@ std::initializer_list nouns
"physical", "physics", "physiology", "pianist", "piano", "piccolo", "pick", "pickax", "pickaxe", "picket", "pickle", "pickup", "picnic",
"picture", "picturesque", "pie", "piece", "pier", "piety", "pig", "pigeon", "piglet", "pigpen", "pigsty", "pike", "pilaf", "pile", "pilgrim",
"pilgrimage", "pill", "pillar", "pillbox", "pillow", "pilot", "pimp", "pimple", "pin", "pinafore", "pine", "pineapple",
-"pinecone", "ping", "pink", "pinkie", "pinot", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "pipeline", "piracy", "pirate",
+"pinecone", "ping", "pink", "pinkie", "pinot", "pinstripe", "pint", "pinto", "pinworm", "pioneer", "pipe", "piracy", "pirate",
"pistol", "pit", "pita", "pitch", "pitcher", "pitching", "pith", "pizza", "place", "placebo", "placement", "placode", "plagiarism",
-"plain", "plaintiff", "plan", "plane", "planet", "planning", "plant", "plantation", "planter", "planula", "plaster", "plasterboard",
+"plain", "plaintiff", "plane", "planet", "planning", "plant", "plantation", "planter", "planula", "plaster", "plasterboard",
"plastic", "plate", "platelet", "platform", "platinum", "platter", "platypus", "play", "player", "playground", "playroom", "playwright",
"plea", "pleasure", "pleat", "pledge", "plenty", "plier", "pliers", "plight", "plot", "plough", "plover", "plow", "plowman", "plug",
"plugin", "plum", "plumber", "plume", "plunger", "plywood", "pneumonia", "pocket", "pocketbook", "pod", "podcast", "poem",
"poet", "poetry", "poignance", "point", "poison", "poisoning", "poker", "polarisation", "polarization", "pole", "polenta", "police",
-"policeman", "policy", "polish", "politician", "politics", "poll", "polliwog", "pollutant", "pollution", "polo", "polyester", "polyp",
+"policeman", "polish", "politician", "politics", "poll", "polliwog", "pollutant", "pollution", "polo", "polyester", "polyp",
"pomegranate", "pomelo", "pompom", "poncho", "pond", "pony", "pool", "poor", "pop", "popcorn", "poppy", "popsicle", "popularity", "population",
"populist", "porcelain", "porch", "porcupine", "pork", "porpoise", "port", "porter", "portfolio", "porthole", "portion", "portrait",
-"position", "possession", "possibility", "possible", "post", "postage", "postbox", "poster", "posterior", "postfix", "pot", "potato",
+"possession", "possibility", "possible", "post", "postage", "postbox", "poster", "posterior", "postfix", "pot", "potato",
"potential", "pottery", "potty", "pouch", "poultry", "pound", "pounding", "poverty", "powder", "power", "practice", "practitioner", "prairie",
-"praise", "pray", "prayer", "precedence", "precedent", "precipitation", "precision", "predecessor", "preface", "preference", "prefix",
+"praise", "pray", "prayer", "precedence", "precedent", "precipitation", "predecessor", "preface", "preference", "prefix",
"pregnancy", "prejudice", "prelude", "premeditation", "premier", "premise", "premium", "preoccupation", "preparation", "prescription",
"presence", "present", "presentation", "preservation", "preserves", "presidency", "president", "press", "pressroom", "pressure", "pressurisation",
"pressurization", "prestige", "presume", "pretzel", "prevalence", "prevention", "prey", "price", "pricing", "pride", "priest", "priesthood",
-"primary", "primate", "prince", "princess", "principal", "principle", "print", "printer", "printing", "prior", "priority", "prison",
+"primate", "prince", "princess", "principal", "principle", "print", "printer", "printing", "prior", "priority", "prison",
"prisoner", "privacy", "private", "privilege", "prize", "prizefight", "probability", "probation", "probe", "problem", "procedure", "proceedings",
"process", "processing", "processor", "proctor", "procurement", "produce", "producer", "product", "production", "productivity", "profession",
-"professional", "professor", "profile", "profit", "progenitor", "program", "programme", "programming", "progress", "progression", "prohibition",
+"professional", "professor", "profit", "progenitor", "program", "programme", "programming", "progress", "progression", "prohibition",
"project", "proliferation", "promenade", "promise", "promotion", "prompt", "pronoun", "pronunciation", "proof", "propaganda",
"propane", "property", "prophet", "proponent", "proportion", "proposal", "proposition", "proprietor", "prose", "prosecution", "prosecutor",
"prospect", "prosperity", "prostacyclin", "prostanoid", "prostrate", "protection", "protein", "protest", "protocol", "providence", "provider",
@@ -440,14 +844,14 @@ std::initializer_list nouns
"psychologist", "psychology", "ptarmigan", "pub", "public", "publication", "publicity", "publisher", "publishing", "pudding", "puddle",
"puffin", "pug", "puggle", "pulley", "pulse", "puma", "pump", "pumpernickel", "pumpkin", "pumpkinseed", "pun", "punch", "punctuation",
"punishment", "pup", "pupa", "pupil", "puppet", "puppy", "purchase", "puritan", "purity", "purple", "purpose", "purr", "purse", "pursuit",
-"push", "pusher", "put", "puzzle", "pyramid", "pyridine", "quadrant", "quail", "qualification", "quality", "quantity", "quart", "quarter",
-"quartet", "quartz", "queen", "query", "quest", "question", "questioner", "questionnaire", "quiche", "quicksand", "quiet", "quill", "quilt",
-"quince", "quinoa", "quit", "quiver", "quota", "quotation", "quote", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racism",
+"push", "pusher", "put", "puzzle", "pyramid", "pyridine", "quadrant", "quail", "qualification", "quality", "quantity", "quart",
+"quartet", "quartz", "queen", "quest", "question", "questioner", "questionnaire", "quiche", "quicksand", "quiet", "quill", "quilt",
+"quince", "quinoa", "quit", "quiver", "quotation", "quote", "rabbi", "rabbit", "raccoon", "race", "racer", "racing", "racism",
"racist", "rack", "radar", "radiator", "radio", "radiosonde", "radish", "raffle", "raft", "rag", "rage", "raid", "rail", "railing", "railroad",
"railway", "raiment", "rain", "rainbow", "raincoat", "rainmaker", "rainstorm", "rainy", "raise", "raisin", "rake", "rally", "ram", "rambler",
-"ramen", "ramie", "ranch", "rancher", "randomisation", "randomization", "range", "ranger", "rank", "rap", "rape", "raspberry", "rat",
+"ramen", "ramie", "ranch", "rancher", "randomisation", "randomization", "ranger", "rank", "rap", "rape", "raspberry", "rat",
"rate", "ratepayer", "rating", "ratio", "rationale", "rations", "raven", "ravioli", "rawhide", "ray", "rayon", "razor", "reach", "reactant",
-"reaction", "read", "reader", "readiness", "reading", "real", "reality", "realization", "realm", "reamer", "rear", "reason", "reasoning",
+"reaction", "read", "reader", "readiness", "reading", "real", "reality", "realization", "reamer", "rear", "reason", "reasoning",
"rebel", "rebellion", "reboot", "recall", "recapitulation", "receipt", "receiver", "reception", "receptor", "recess", "recession", "recipe",
"recipient", "reciprocity", "reclamation", "recliner", "recognition", "recollection", "recommendation", "reconsideration", "record",
"recorder", "recording", "recovery", "recreation", "recruit", "rectangle", "red", "redesign", "redhead", "redirect", "rediscovery", "reduction",
@@ -457,21 +861,21 @@ std::initializer_list nouns
"reliability", "relief", "religion", "relish", "reluctance", "remains", "remark", "reminder", "remnant", "remote", "removal", "renaissance",
"rent", "reorganisation", "reorganization", "repair", "reparation", "repayment", "repeat", "replacement", "replica", "replication", "reply",
"report", "reporter", "reporting", "repository", "representation", "representative", "reprocessing", "republic", "republican", "reputation",
-"request", "requirement", "resale", "rescue", "research", "researcher", "resemblance", "reservation", "reserve", "reservoir", "reset",
+"request", "requirement", "resale", "rescue", "research", "researcher", "resemblance", "reservation", "reserve", "reservoir",
"residence", "resident", "residue", "resist", "resistance", "resolution", "resolve", "resort", "resource", "respect", "respite", "response",
-"responsibility", "rest", "restaurant", "restoration", "restriction", "restroom", "restructuring", "result", "resume", "retailer", "retention",
+"responsibility", "rest", "restaurant", "restoration", "restriction", "restroom", "restructuring", "result", "retailer", "retention",
"rethinking", "retina", "retirement", "retouching", "retreat", "retrospect", "retrospective", "retrospectivity", "return", "reunion",
"revascularisation", "revascularization", "reveal", "revelation", "revenant", "revenge", "revenue", "reversal", "reverse", "review",
"revitalisation", "revitalization", "revival", "revolution", "revolver", "reward", "rhetoric", "rheumatism", "rhinoceros", "rhubarb",
-"rhyme", "rhythm", "rib", "ribbon", "rice", "riddle", "ride", "rider", "ridge", "riding", "rifle", "right", "rim", "ring", "ringworm",
+"rhyme", "rhythm", "rib", "ribbon", "rice", "riddle", "ride", "rider", "ridge", "riding", "rifle", "rim", "ring", "ringworm",
"riot", "rip", "ripple", "rise", "riser", "risk", "rite", "ritual", "river", "riverbed", "rivulet", "road", "roadway", "roar", "roast",
-"robe", "robin", "robot", "robotics", "rock", "rocker", "rocket", "rod", "role", "roll", "roller", "romaine", "romance",
+"robe", "robin", "robot", "robotics", "rock", "rocker", "rocket", "rod", "roll", "roller", "romaine", "romance",
"roof", "room", "roommate", "rooster", "root", "rope", "rose", "rosemary", "roster", "rostrum", "rotation", "round", "roundabout", "route",
-"router", "routine", "row", "rowboat", "rowing", "rubber", "rubric", "ruby", "ruckus", "rudiment", "ruffle", "rug", "rugby",
+"router", "routine", "rowboat", "rowing", "rubber", "rubric", "ruby", "ruckus", "rudiment", "ruffle", "rug", "rugby",
"ruin", "rule", "ruler", "ruling", "rum", "rumor", "run", "runaway", "runner", "running", "runway", "rush", "rust", "rutabaga", "rye",
"sabre", "sac", "sack", "saddle", "sadness", "safari", "safe", "safeguard", "safety", "saffron", "sage", "sail", "sailboat", "sailing",
-"sailor", "saint", "sake", "salad", "salami", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salsa", "salt", "salute", "samovar",
-"sampan", "sample", "samurai", "sanction", "sanctity", "sanctuary", "sand", "sandal", "sandbar", "sandpaper", "sandwich", "sanity", "sardine",
+"sailor", "saint", "sake", "salad", "salami", "salary", "sale", "salesman", "salmon", "salon", "saloon", "salsa", "salute", "samovar",
+"sampan", "samurai", "sanction", "sanctity", "sanctuary", "sand", "sandal", "sandbar", "sandpaper", "sandwich", "sanity", "sardine",
"sari", "sarong", "sash", "satellite", "satin", "satire", "satisfaction", "sauce", "saucer", "sauerkraut", "sausage", "savage", "savannah",
"saving", "savings", "savior", "saviour", "savory", "saw", "saxophone", "scaffold", "scale", "scallion", "scallops", "scalp", "scam",
"scanner", "scarecrow", "scarf", "scarification", "scenario", "scene", "scenery", "scent", "schedule", "scheduling", "schema", "scheme",
@@ -479,20 +883,20 @@ std::initializer_list nouns
"scooter", "scope", "score", "scorn", "scorpion", "scotch", "scout", "scow", "scrambled", "scrap", "scraper", "scratch", "screamer",
"screen", "screening", "screenwriting", "screw", "screwdriver", "scrim", "scrip", "script", "scripture", "scrutiny", "sculpting",
"sculptural", "sculpture", "sea", "seabass", "seafood", "seagull", "seal", "seaplane", "search", "seashore", "seaside", "season", "seat",
-"seaweed", "second", "secrecy", "secret", "secretariat", "secretary", "secretion", "section", "sectional", "sector", "security", "sediment",
+"seaweed", "secrecy", "secret", "secretariat", "secretary", "secretion", "section", "sectional", "sector", "security", "sediment",
"seed", "seeder", "seeker", "seep", "segment", "seizure", "selection", "self", "seller",
"selling", "semantics", "semester", "semicircle", "semicolon", "semiconductor", "seminar", "senate", "senator", "sender", "senior", "sense",
"sensibility", "sensitive", "sensitivity", "sensor", "sentence", "sentencing", "sentiment", "sepal", "separation", "septicaemia", "sequel",
-"sequence", "serial", "series", "sermon", "serum", "serval", "servant", "server", "service", "servitude", "sesame", "session", "set",
-"setback", "setting", "settlement", "settler", "severity", "sewer", "sex", "sexuality", "shack", "shackle", "shade", "shadow", "shadowbox",
+"sequence", "serial", "series", "sermon", "serum", "serval", "servant", "service", "servitude", "sesame", "session",
+"setback", "settlement", "settler", "severity", "sewer", "sex", "sexuality", "shack", "shackle", "shade", "shadow", "shadowbox",
"shakedown", "shaker", "shallot", "shallows", "shame", "shampoo", "shanty", "shape", "share", "shareholder", "shark", "shaw", "shawl",
"shear", "shearling", "sheath", "shed", "sheep", "sheet", "shelf", "shell", "shelter", "sherbet", "sherry", "shield", "shift", "shin",
"shine", "shingle", "ship", "shipper", "shipping", "shipyard", "shirt", "shirtdress", "shoat", "shock", "shoe",
"shoehorn", "shoelace", "shoemaker", "shoes", "shoestring", "shofar", "shoot", "shootdown", "shop", "shopper", "shopping", "shore", "shoreline",
-"short", "shortage", "shorts", "shortwave", "shot", "shoulder", "shout", "shovel", "show", "shower", "shred", "shrimp",
+"short", "shortage", "shorts", "shortwave", "shot", "shoulder", "shout", "shovel", "shower", "shred", "shrimp",
"shrine", "shutdown", "sibling", "sick", "sickness", "side", "sideboard", "sideburns", "sidecar", "sidestream", "sidewalk", "siding",
"siege", "sigh", "sight", "sightseeing", "sign", "signal", "signature", "signet", "significance", "signify", "signup", "silence", "silica",
-"silicon", "silk", "silkworm", "sill", "silly", "silo", "silver", "similarity", "simple", "simplicity", "simplification", "simvastatin",
+"silicon", "silk", "silkworm", "sill", "silly", "silo", "silver", "similarity", "simplicity", "simplification", "simvastatin",
"sin", "singer", "singing", "singular", "sink", "sinuosity", "sip", "sir", "sister", "sitar", "site", "situation", "size",
"skate", "skating", "skean", "skeleton", "ski", "skiing", "skill", "skin", "skirt", "skull", "skullcap", "skullduggery", "skunk", "sky",
"skylight", "skyline", "skyscraper", "skywalk", "slang", "slapstick", "slash", "slate", "slavery", "slaw", "sled", "sledge",
@@ -503,7 +907,7 @@ std::initializer_list nouns
"society", "sociology", "sock", "socks", "soda", "sofa", "softball", "softdrink", "softening", "software", "soil", "soldier", "sole",
"solicitation", "solicitor", "solidarity", "solidity", "soliloquy", "solitaire", "solution", "solvency", "sombrero", "somebody", "someone",
"someplace", "somersault", "something", "somewhere", "son", "sonar", "sonata", "song", "songbird", "sonnet", "soot", "sophomore", "soprano",
-"sorbet", "sorghum", "sorrel", "sorrow", "sort", "soul", "soulmate", "sound", "soundness", "soup", "source", "sourwood", "sousaphone",
+"sorbet", "sorghum", "sorrel", "sorrow", "sort", "soul", "soulmate", "sound", "soundness", "soup", "sourwood", "sousaphone",
"south", "southeast", "souvenir", "sovereignty", "sow", "soy", "soybean", "space", "spacing", "spade", "spaghetti", "span", "spandex",
"spank", "sparerib", "spark", "sparrow", "spasm", "spat", "spatula", "spawn", "speaker", "speakerphone", "speaking", "spear", "spec",
"special", "specialist", "specialty", "species", "specification", "spectacle", "spectacles", "spectrograph", "spectrum", "speculation",
@@ -515,11 +919,11 @@ std::initializer_list nouns
"staff", "stag", "stage", "stain", "stair", "staircase", "stake", "stalk", "stall", "stallion", "stamen", "stamina", "stamp", "stance",
"stand", "standard", "standardisation", "standardization", "standing", "standoff", "standpoint", "star", "starboard", "start", "starter",
"state", "statement", "statin", "station", "statistic", "statistics", "statue", "status", "statute", "stay", "steak",
-"stealth", "steam", "steamroller", "steel", "steeple", "stem", "stench", "stencil", "step",
+"stealth", "steam", "steamroller", "steel", "steeple", "stem", "stench", "stencil",
"stepdaughter", "stepmother",
"stepson", "stereo", "stew", "steward", "stick", "sticker", "stiletto", "still", "stimulation", "stimulus", "sting",
"stinger", "stitch", "stitcher", "stock", "stockings", "stole", "stomach", "stone", "stonework", "stool",
-"stop", "stopsign", "stopwatch", "storage", "store", "storey", "storm", "story", "storyboard", "stot", "stove", "strait",
+"stop", "stopsign", "stopwatch", "store", "storey", "storm", "story", "storyboard", "stot", "stove", "strait",
"strand", "stranger", "strap", "strategy", "straw", "strawberry", "strawman", "stream", "street", "streetcar", "strength", "stress",
"stretch", "strife", "strike", "string", "strip", "stripe", "strobe", "stroke", "structure", "strudel", "struggle", "stucco", "stud",
"student", "studio", "study", "stuff", "stumbling", "stump", "stupidity", "sturgeon", "sty", "style", "styling", "stylus", "sub", "subcomponent",
@@ -533,16 +937,16 @@ std::initializer_list nouns
"suspenders", "suspension", "sustainment", "sustenance", "swallow", "swamp", "swan", "swanling", "swath", "sweat", "sweater", "sweatshirt",
"sweatshop", "sweatsuit", "sweets", "swell", "swim", "swimming", "swimsuit", "swine", "swing", "switch", "switchboard", "switching",
"swivel", "sword", "swordfight", "swordfish", "sycamore", "symbol", "symmetry", "sympathy", "symptom", "syndicate", "syndrome", "synergy",
-"synod", "synonym", "synthesis", "syrup", "system", "tab", "tabby", "tabernacle", "tablecloth", "tablet", "tabletop",
+"synod", "synonym", "synthesis", "syrup", "tab", "tabby", "tabernacle", "tablecloth", "tablet", "tabletop",
"tachometer", "tackle", "taco", "tactics", "tactile", "tadpole", "tag", "tail", "tailbud", "tailor", "tailspin", "takeover",
"tale", "talent", "talk", "talking", "tamale", "tambour", "tambourine", "tan", "tandem", "tangerine", "tank",
"tanker", "tankful", "tap", "tape", "tapioca", "target", "taro", "tarragon", "tart", "task", "tassel", "taste", "tatami", "tattler",
"tattoo", "tavern", "tax", "taxi", "taxicab", "taxpayer", "tea", "teacher", "teaching", "team", "teammate", "teapot", "tear", "tech",
"technician", "technique", "technologist", "technology", "tectonics", "teen", "teenager", "teepee", "telephone", "telescreen", "teletype",
-"television", "tell", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temporary", "temptation", "temptress",
+"television", "tell", "teller", "temp", "temper", "temperature", "temple", "tempo", "temporariness", "temptation", "temptress",
"tenant", "tendency", "tender", "tenement", "tenet", "tennis", "tenor", "tension", "tensor", "tent", "tentacle", "tenth", "tepee", "teriyaki",
"term", "terminal", "termination", "terminology", "termite", "terrace", "terracotta", "terrapin", "terrarium", "territory", "terror",
-"terrorism", "terrorist", "test", "testament", "testimonial", "testimony", "testing", "text", "textbook", "textual", "texture", "thanks",
+"terrorism", "terrorist", "testament", "testimonial", "testimony", "testing", "text", "textbook", "textual", "texture", "thanks",
"thaw", "theater", "theft", "theism", "theme", "theology", "theory", "therapist", "therapy", "thermals", "thermometer", "thermostat",
"thesis", "thickness", "thief", "thigh", "thing", "thinking", "thirst", "thistle", "thong", "thongs", "thorn", "thought", "thousand",
"thread", "threat", "threshold", "thrift", "thrill", "throat", "throne", "thrush", "thrust", "thug", "thumb", "thump", "thunder", "thunderbolt",
@@ -550,49 +954,49 @@ std::initializer_list nouns
"timber", "time", "timeline", "timeout", "timer", "timetable", "timing", "timpani", "tin", "tinderbox", "tinkle", "tintype", "tip", "tire",
"tissue", "titanium", "title", "toad", "toast", "toaster", "tobacco", "today", "toe", "toenail", "toffee", "tofu", "tog", "toga", "toilet",
"tolerance", "tolerant", "toll", "tomatillo", "tomato", "tomb", "tomography", "tomorrow", "ton", "tonality", "tone", "tongue",
-"tonic", "tonight", "tool", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "top", "topic", "topsail", "toque",
+"tonic", "tonight", "tool", "toot", "tooth", "toothbrush", "toothpaste", "toothpick", "topic", "topsail", "toque",
"toreador", "tornado", "torso", "torte", "tortellini", "tortilla", "tortoise", "tosser", "total", "tote", "touch", "tour",
"tourism", "tourist", "tournament", "towel", "tower", "town", "townhouse", "township", "toy", "trace", "trachoma", "track",
-"tracking", "tracksuit", "tract", "tractor", "trade", "trader", "trading", "tradition", "traditionalism", "traffic", "trafficker", "tragedy",
-"trail", "trailer", "trailpatrol", "train", "trainer", "training", "trait", "tram", "tramp", "trance", "transaction", "transcript", "transfer",
+"tracksuit", "tract", "tractor", "trade", "trader", "trading", "tradition", "traditionalism", "traffic", "trafficker", "tragedy",
+"trail", "trailer", "trailpatrol", "train", "trainer", "training", "trait", "tram", "tramp", "trance", "transcript", "transfer",
"transformation", "transit", "transition", "translation", "transmission", "transom", "transparency", "transplantation", "transport",
"transportation", "trap", "trapdoor", "trapezium", "trapezoid", "trash", "travel", "traveler", "tray", "treasure", "treasury", "treat",
-"treatment", "treaty", "tree", "trek", "trellis", "tremor", "trench", "trend", "triad", "trial", "triangle", "tribe", "tributary", "trick",
-"trigger", "trigonometry", "trillion", "trim", "trinket", "trip", "tripod", "tritone", "triumph", "trolley", "trombone", "troop", "trooper",
+"treatment", "treaty", "trek", "trellis", "tremor", "trench", "trend", "triad", "trial", "triangle", "tribe", "tributary", "trick",
+"trigonometry", "trillion", "trinket", "trip", "tripod", "tritone", "triumph", "trolley", "trombone", "troop", "trooper",
"trophy", "trouble", "trousers", "trout", "trove", "trowel", "truck", "trumpet", "trunk", "trust", "trustee", "truth", "try", "tsunami",
"tub", "tuba", "tube", "tuber", "tug", "tugboat", "tuition", "tulip", "tumbler", "tummy", "tuna", "tune", "tunic", "tunnel",
"turban", "turf", "turkey", "turmeric", "turn", "turning", "turnip", "turnover", "turnstile", "turret", "turtle", "tusk", "tussle", "tutu",
-"tuxedo", "tweet", "tweezers", "twig", "twilight", "twine", "twins", "twist", "twister", "twitter", "type", "typeface", "typewriter",
+"tuxedo", "tweet", "tweezers", "twig", "twilight", "twine", "twins", "twist", "twister", "twitter", "typeface", "typewriter",
"typhoon", "ukulele", "ultimatum", "umbrella", "unblinking", "uncertainty", "uncle", "underclothes", "underestimate", "underground",
"underneath", "underpants", "underpass", "undershirt", "understanding", "understatement", "undertaker", "underwear", "underweight", "underwire",
-"underwriting", "unemployment", "unibody", "uniform", "uniformity", "unique", "unit", "unity", "universe", "university", "update",
-"upgrade", "uplift", "upper", "upstairs", "upward", "urge", "urgency", "urn", "usage", "use", "user", "usher", "usual", "utensil", "utilisation",
+"underwriting", "unemployment", "unibody", "uniform", "uniformity", "unit", "unity", "universe", "university",
+"upgrade", "uplift", "upper", "upstairs", "upward", "urge", "urgency", "urn", "usage", "usher", "usual", "utensil", "utilisation",
"utility", "utilization", "vacation", "vaccine", "vacuum", "vagrant", "valance", "valentine", "validate", "validity", "valley", "valuable",
-"value", "vampire", "van", "vanadyl", "vane", "vanilla", "vanity", "variability", "variable", "variant", "variation", "variety", "vascular",
+"vampire", "van", "vanadyl", "vane", "vanilla", "vanity", "variability", "variable", "variant", "variation", "variety", "vascular",
"vase", "vault", "vaulting", "veal", "vector", "vegetable", "vegetarian", "vegetarianism", "vegetation", "vehicle", "veil", "vein", "veldt",
"vellum", "velocity", "velodrome", "velvet", "vendor", "veneer", "vengeance", "venison", "venom", "venti", "venture", "venue", "veranda",
"verb", "verdict", "verification", "vermicelli", "vernacular", "verse", "version", "vertigo", "verve", "vessel", "vest", "vestment",
"vet", "veteran", "veterinarian", "veto", "viability", "vibe", "vibraphone", "vibration", "vibrissae", "vice", "vicinity", "victim",
-"victory", "video", "view", "viewer", "vignette", "villa", "village", "vine", "vinegar", "vineyard", "vintage", "vintner", "vinyl", "viola",
+"victory", "video", "viewer", "vignette", "villa", "village", "vine", "vinegar", "vineyard", "vintage", "vintner", "vinyl", "viola",
"violation", "violence", "violet", "violin", "virginal", "virtue", "virus", "visa", "viscose", "vise", "vision", "visit", "visitor",
"visor", "vista", "visual", "vitality", "vitamin", "vitro", "vivo", "vixen", "vodka", "vogue", "voice", "void", "vol", "volatility",
-"volcano", "volleyball", "volume", "volunteer", "volunteering", "vomit", "vote", "voter", "voting", "voyage", "vulture", "wad", "wafer",
+"volcano", "volleyball", "volunteer", "volunteering", "vomit", "vote", "voter", "voting", "voyage", "vulture", "wad", "wafer",
"waffle", "wage", "wagon", "waist", "waistband", "wait", "waiter", "waiting", "waitress", "waiver", "wake", "walk", "walker", "walking",
"walkway", "wall", "wallaby", "wallet", "walnut", "walrus", "wampum", "wannabe", "want", "war", "warden", "wardrobe", "warfare", "warlock",
"warlord", "warming", "warmth", "warning", "warrant", "warren", "warrior", "wasabi", "wash", "washbasin", "washcloth", "washer",
-"washtub", "wasp", "waste", "wastebasket", "wasting", "watch", "watcher", "watchmaker", "water", "waterbed", "watercress", "waterfall",
+"washtub", "wasp", "waste", "wastebasket", "wasting", "watcher", "watchmaker", "water", "waterbed", "watercress", "waterfall",
"waterfront", "watermelon", "waterskiing", "waterspout", "waterwheel", "wave", "waveform", "wax", "way", "weakness", "wealth", "weapon",
-"wear", "weasel", "weather", "web", "webinar", "webmail", "webpage", "website", "wedding", "wedge", "weed", "weeder", "weedkiller", "week",
+"wear", "weasel", "weather", "web", "webinar", "webmail", "webpage", "website", "wedding", "wedge", "weed", "weeder", "weedkiller",
"weekend", "weekender", "weight", "weird", "welcome", "welfare", "well", "west", "western", "wetland", "wetsuit",
"whack", "whale", "wharf", "wheat", "wheel", "whelp", "whey", "whip", "whirlpool", "whirlwind", "whisker", "whiskey", "whisper", "whistle",
"white", "whole", "wholesale", "wholesaler", "whorl", "wick", "widget", "widow", "width", "wife", "wifi", "wild", "wildebeest", "wilderness",
-"wildlife", "will", "willingness", "willow", "win", "wind", "windage", "window", "windscreen", "windshield", "wine", "winery",
+"wildlife", "will", "willingness", "willow", "win", "wind", "windage", "windscreen", "windshield", "wine", "winery",
"wing", "wingman", "wingtip", "wink", "winner", "winter", "wire", "wiretap", "wiring", "wisdom", "wiseguy", "wish", "wisteria", "wit",
"witch", "withdrawal", "witness", "wok", "wolf", "woman", "wombat", "wonder", "wont", "wood", "woodchuck", "woodland",
"woodshed", "woodwind", "wool", "woolens", "word", "wording", "work", "workbench", "worker", "workforce", "workhorse", "working", "workout",
"workplace", "workshop", "world", "worm", "worry", "worship", "worshiper", "worth", "wound", "wrap", "wraparound", "wrapper", "wrapping",
"wreck", "wrecker", "wren", "wrench", "wrestler", "wriggler", "wrinkle", "wrist", "writer", "writing", "wrong", "xylophone", "yacht",
-"yahoo", "yak", "yam", "yang", "yard", "yarmulke", "yarn", "yawl", "year", "yeast", "yellow", "yellowjacket", "yesterday", "yew", "yin",
+"yahoo", "yak", "yam", "yang", "yard", "yarmulke", "yarn", "yawl", "yeast", "yellow", "yellowjacket", "yesterday", "yew", "yin",
"yoga", "yogurt", "yoke", "yolk", "young", "youngster", "yourself", "youth", "yoyo", "yurt", "zampone", "zebra", "zebrafish", "zen",
"zephyr", "zero", "ziggurat", "zinc", "zipper", "zither", "zombie", "zone", "zoo", "zoologist", "zoology", "zucchini"
};
@@ -637,7 +1041,10 @@ void obfuscateIdentifier(std::string_view src, WriteBuffer & result, WordMap & o
{
std::string_view word(word_begin, src_pos - word_begin);
- if (keep_words.contains(word))
+ String wordcopy(word_begin, src_pos - word_begin);
+ Poco::toUpperInPlace(wordcopy);
+
+ if (keep_words.contains(wordcopy))
{
result.write(word.data(), word.size());
}
@@ -805,18 +1212,28 @@ void obfuscateLiteral(std::string_view src, WriteBuffer & result, SipHash hash_f
while (alpha_end < src_end && isAlphaASCII(*alpha_end))
++alpha_end;
- hash_func.update(src_pos, alpha_end - src_pos);
- pcg64 rng(hash_func.get64());
-
- while (src_pos < alpha_end)
+ String wordcopy(src_pos, alpha_end);
+ Poco::toUpperInPlace(wordcopy);
+ if (keep_words.contains(wordcopy))
{
- auto random = rng();
- if (isLowerAlphaASCII(*src_pos))
- result.write('a' + random % 26);
- else
- result.write('A' + random % 26);
+ result.write(src_pos, alpha_end - src_pos);
+ src_pos = alpha_end;
+ }
+ else
+ {
+ hash_func.update(src_pos, alpha_end - src_pos);
+ pcg64 rng(hash_func.get64());
- ++src_pos;
+ while (src_pos < alpha_end)
+ {
+ auto random = rng();
+ if (isLowerAlphaASCII(*src_pos))
+ result.write('a' + random % 26);
+ else
+ result.write('A' + random % 26);
+
+ ++src_pos;
+ }
}
}
else if (isASCII(src_pos[0]))
diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp
index 2a9d06bc17b..0d302fda904 100644
--- a/src/Planner/Planner.cpp
+++ b/src/Planner/Planner.cpp
@@ -1377,8 +1377,7 @@ void Planner::buildPlanForQueryNode()
*/
if (query_node.hasLimit() && apply_limit && !limit_applied && apply_offset)
addLimitStep(query_plan, query_analysis_result, planner_context, query_node);
-
- if (apply_offset && query_node.hasOffset())
+ else if (!limit_applied && apply_offset && query_node.hasOffset())
addOffsetStep(query_plan, query_analysis_result);
const auto & projection_analysis_result = expression_analysis_result.getProjection();
diff --git a/src/Processors/Transforms/StreamInQueryCacheTransform.cpp b/src/Processors/Transforms/StreamInQueryCacheTransform.cpp
new file mode 100644
index 00000000000..1ba57ea8ed2
--- /dev/null
+++ b/src/Processors/Transforms/StreamInQueryCacheTransform.cpp
@@ -0,0 +1,24 @@
+#include
+
+namespace DB
+{
+
+StreamInQueryCacheTransform::StreamInQueryCacheTransform(
+ const Block & header_, QueryCachePtr cache, const QueryCache::Key & cache_key, std::chrono::milliseconds min_query_duration)
+ : ISimpleTransform(header_, header_, false)
+ , cache_writer(cache->createWriter(cache_key, min_query_duration))
+{
+}
+
+void StreamInQueryCacheTransform::transform(Chunk & chunk)
+{
+ cache_writer.buffer(chunk.clone());
+}
+
+void StreamInQueryCacheTransform::finalizeWriteInQueryCache()
+{
+ if (!isCancelled())
+ cache_writer.finalizeWrite();
+}
+
+};
diff --git a/src/Processors/Transforms/StreamInQueryCacheTransform.h b/src/Processors/Transforms/StreamInQueryCacheTransform.h
new file mode 100644
index 00000000000..15d977cd445
--- /dev/null
+++ b/src/Processors/Transforms/StreamInQueryCacheTransform.h
@@ -0,0 +1,26 @@
+#pragma once
+
+#include
+#include
+
+namespace DB
+{
+
+class StreamInQueryCacheTransform : public ISimpleTransform
+{
+public:
+ StreamInQueryCacheTransform(
+ const Block & header_, QueryCachePtr cache, const QueryCache::Key & cache_key, std::chrono::milliseconds min_query_duration);
+
+protected:
+ void transform(Chunk & chunk) override;
+
+public:
+ void finalizeWriteInQueryCache();
+ String getName() const override { return "StreamInQueryCacheTransform"; }
+
+private:
+ QueryCache::Writer cache_writer;
+};
+
+}
diff --git a/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp b/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp
deleted file mode 100644
index 841fcfdf8b5..00000000000
--- a/src/Processors/Transforms/StreamInQueryResultCacheTransform.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-#include
-
-namespace DB
-{
-
-StreamInQueryResultCacheTransform::StreamInQueryResultCacheTransform(
- const Block & header_, QueryResultCachePtr cache, const QueryResultCache::Key & cache_key, std::chrono::milliseconds min_query_duration)
- : ISimpleTransform(header_, header_, false)
- , cache_writer(cache->createWriter(cache_key, min_query_duration))
-{
-}
-
-void StreamInQueryResultCacheTransform::transform(Chunk & chunk)
-{
- cache_writer.buffer(chunk.clone());
-}
-
-void StreamInQueryResultCacheTransform::finalizeWriteInQueryResultCache()
-{
- if (!isCancelled())
- cache_writer.finalizeWrite();
-}
-
-};
diff --git a/src/Processors/Transforms/StreamInQueryResultCacheTransform.h b/src/Processors/Transforms/StreamInQueryResultCacheTransform.h
deleted file mode 100644
index a90d33a0681..00000000000
--- a/src/Processors/Transforms/StreamInQueryResultCacheTransform.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include
-#include
-
-namespace DB
-{
-
-class StreamInQueryResultCacheTransform : public ISimpleTransform
-{
-public:
- StreamInQueryResultCacheTransform(
- const Block & header_, QueryResultCachePtr cache, const QueryResultCache::Key & cache_key, std::chrono::milliseconds min_query_duration);
-
-protected:
- void transform(Chunk & chunk) override;
-
-public:
- void finalizeWriteInQueryResultCache();
- String getName() const override { return "StreamInQueryResultCacheTransform"; }
-
-private:
- QueryResultCache::Writer cache_writer;
-};
-
-}
diff --git a/src/QueryPipeline/QueryPipeline.cpp b/src/QueryPipeline/QueryPipeline.cpp
index aa01801b1ec..b7b18014f1f 100644
--- a/src/QueryPipeline/QueryPipeline.cpp
+++ b/src/QueryPipeline/QueryPipeline.cpp
@@ -18,7 +18,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -525,7 +525,7 @@ bool QueryPipeline::tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & resu
return true;
}
-void QueryPipeline::streamIntoQueryResultCache(std::shared_ptr transform)
+void QueryPipeline::streamIntoQueryCache(std::shared_ptr transform)
{
assert(pulling());
@@ -534,16 +534,16 @@ void QueryPipeline::streamIntoQueryResultCache(std::shared_ptremplace_back(transform);
}
-void QueryPipeline::finalizeWriteInQueryResultCache()
+void QueryPipeline::finalizeWriteInQueryCache()
{
auto it = std::find_if(
processors->begin(), processors->end(),
- [](ProcessorPtr processor){ return dynamic_cast(&*processor); });
+ [](ProcessorPtr processor){ return dynamic_cast(&*processor); });
- /// the pipeline should theoretically contain just one StreamInQueryResultCacheTransform
+ /// the pipeline should theoretically contain just one StreamInQueryCacheTransform
if (it != processors->end())
- dynamic_cast(**it).finalizeWriteInQueryResultCache();
+ dynamic_cast(**it).finalizeWriteInQueryCache();
}
void QueryPipeline::addStorageHolder(StoragePtr storage)
diff --git a/src/QueryPipeline/QueryPipeline.h b/src/QueryPipeline/QueryPipeline.h
index da43aa035f3..55c78ca78ed 100644
--- a/src/QueryPipeline/QueryPipeline.h
+++ b/src/QueryPipeline/QueryPipeline.h
@@ -31,7 +31,7 @@ class SinkToStorage;
class ISource;
class ISink;
class ReadProgressCallback;
-class StreamInQueryResultCacheTransform;
+class StreamInQueryCacheTransform;
struct ColumnWithTypeAndName;
using ColumnsWithTypeAndName = std::vector;
@@ -105,8 +105,8 @@ public:
void setLimitsAndQuota(const StreamLocalLimits & limits, std::shared_ptr quota_);
bool tryGetResultRowsAndBytes(UInt64 & result_rows, UInt64 & result_bytes) const;
- void streamIntoQueryResultCache(std::shared_ptr transform);
- void finalizeWriteInQueryResultCache();
+ void streamIntoQueryCache(std::shared_ptr transform);
+ void finalizeWriteInQueryCache();
void setQuota(std::shared_ptr quota_);
diff --git a/src/Storages/HDFS/StorageHDFSCluster.cpp b/src/Storages/HDFS/StorageHDFSCluster.cpp
index 91204d852ae..f6e6f773d6c 100644
--- a/src/Storages/HDFS/StorageHDFSCluster.cpp
+++ b/src/Storages/HDFS/StorageHDFSCluster.cpp
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
#include
#include
#include
diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h
index a4825358d6e..699780db0b9 100644
--- a/src/Storages/IStorage.h
+++ b/src/Storages/IStorage.h
@@ -487,7 +487,7 @@ public:
}
/// Mutate the table contents
- virtual void mutate(const MutationCommands &, ContextPtr, bool /*force_wait*/)
+ virtual void mutate(const MutationCommands &, ContextPtr)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Mutations are not supported by storage {}", getName());
}
diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp
index 3aa68266a3e..d55248df0af 100644
--- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp
+++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp
@@ -165,7 +165,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromVolume()
if (!storage || !mark_type)
{
/// Didn't find any data or mark file, suppose that part is empty.
- return withBytesAndRows(0, 0);
+ return withBytesAndRowsOnDisk(0, 0);
}
part_storage = std::move(storage);
@@ -181,7 +181,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromStorage()
if (!mark_type)
{
/// Didn't find any mark file, suppose that part is empty.
- return withBytesAndRows(0, 0);
+ return withBytesAndRowsOnDisk(0, 0);
}
part_type = mark_type->part_type;
diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp
index 86c0dffa60d..90034c81f10 100644
--- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp
+++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp
@@ -217,7 +217,7 @@ void StorageEmbeddedRocksDB::checkMutationIsPossible(const MutationCommands & co
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only DELETE and UPDATE mutation supported for EmbeddedRocksDB");
}
-void StorageEmbeddedRocksDB::mutate(const MutationCommands & commands, ContextPtr context_, bool /*force_wait*/)
+void StorageEmbeddedRocksDB::mutate(const MutationCommands & commands, ContextPtr context_)
{
if (commands.empty())
return;
diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h
index 7f6fc49fb18..32d7740009e 100644
--- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.h
+++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.h
@@ -52,7 +52,7 @@ public:
void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, ContextPtr, TableExclusiveLockHolder &) override;
void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override;
- void mutate(const MutationCommands &, ContextPtr, bool) override;
+ void mutate(const MutationCommands &, ContextPtr) override;
bool supportsParallelInsert() const override { return true; }
bool supportsIndexForIn() const override { return true; }
diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp
index f4cf0875059..b57e717c272 100644
--- a/src/Storages/StorageJoin.cpp
+++ b/src/Storages/StorageJoin.cpp
@@ -108,7 +108,7 @@ void StorageJoin::checkMutationIsPossible(const MutationCommands & commands, con
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine Join supports only DELETE mutations");
}
-void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context, bool /*force_wait*/)
+void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context)
{
/// Firstly acquire lock for mutation, that locks changes of data.
/// We cannot acquire rwlock here, because read lock is needed
diff --git a/src/Storages/StorageJoin.h b/src/Storages/StorageJoin.h
index 96afd442c72..61ea743c841 100644
--- a/src/Storages/StorageJoin.h
+++ b/src/Storages/StorageJoin.h
@@ -45,7 +45,7 @@ public:
/// Only delete is supported.
void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override;
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override;
+ void mutate(const MutationCommands & commands, ContextPtr context) override;
/// Return instance of HashJoin holding lock that protects from insertions to StorageJoin.
/// HashJoin relies on structure of hash table that's why we need to return it with locked mutex.
diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp
index d424344e7bf..971ecf8dbf2 100644
--- a/src/Storages/StorageMaterializedView.cpp
+++ b/src/Storages/StorageMaterializedView.cpp
@@ -319,10 +319,10 @@ void StorageMaterializedView::checkAlterPartitionIsPossible(
getTargetTable()->checkAlterPartitionIsPossible(commands, metadata_snapshot, settings);
}
-void StorageMaterializedView::mutate(const MutationCommands & commands, ContextPtr local_context, bool force_wait)
+void StorageMaterializedView::mutate(const MutationCommands & commands, ContextPtr local_context)
{
checkStatementCanBeForwarded();
- getTargetTable()->mutate(commands, local_context, force_wait);
+ getTargetTable()->mutate(commands, local_context);
}
void StorageMaterializedView::renameInMemory(const StorageID & new_table_id)
diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h
index c0fee7e870b..af2dedf8164 100644
--- a/src/Storages/StorageMaterializedView.h
+++ b/src/Storages/StorageMaterializedView.h
@@ -65,7 +65,7 @@ public:
void checkAlterPartitionIsPossible(const PartitionCommands & commands, const StorageMetadataPtr & metadata_snapshot, const Settings & settings) const override;
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override;
+ void mutate(const MutationCommands & commands, ContextPtr context) override;
void renameInMemory(const StorageID & new_table_id) override;
diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp
index f1b33977e27..881cbc18b10 100644
--- a/src/Storages/StorageMemory.cpp
+++ b/src/Storages/StorageMemory.cpp
@@ -305,7 +305,7 @@ void StorageMemory::checkMutationIsPossible(const MutationCommands & /*commands*
/// Some validation will be added
}
-void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context, bool /*force_wait*/)
+void StorageMemory::mutate(const MutationCommands & commands, ContextPtr context)
{
std::lock_guard lock(mutex);
auto metadata_snapshot = getInMemoryMetadataPtr();
diff --git a/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h
index 2274a27a267..c739088dbe4 100644
--- a/src/Storages/StorageMemory.h
+++ b/src/Storages/StorageMemory.h
@@ -67,7 +67,7 @@ public:
void drop() override;
void checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const override;
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override;
+ void mutate(const MutationCommands & commands, ContextPtr context) override;
void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override;
diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp
index 60c5157d463..4ef34ae91d5 100644
--- a/src/Storages/StorageMergeTree.cpp
+++ b/src/Storages/StorageMergeTree.cpp
@@ -532,14 +532,14 @@ void StorageMergeTree::setMutationCSN(const String & mutation_id, CSN csn)
it->second.writeCSN(csn);
}
-void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context, bool force_wait)
+void StorageMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context)
{
/// Validate partition IDs (if any) before starting mutation
getPartitionIdsAffectedByCommands(commands, query_context);
Int64 version = startMutation(commands, query_context);
- if (force_wait || query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction())
+ if (query_context->getSettingsRef().mutations_sync > 0 || query_context->getCurrentTransaction())
waitForMutation(version);
}
diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h
index 706ceda17b3..1dff6323e4c 100644
--- a/src/Storages/StorageMergeTree.h
+++ b/src/Storages/StorageMergeTree.h
@@ -85,7 +85,7 @@ public:
const Names & deduplicate_by_columns,
ContextPtr context) override;
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override;
+ void mutate(const MutationCommands & commands, ContextPtr context) override;
bool hasLightweightDeletedMask() const override;
diff --git a/src/Storages/StorageProxy.h b/src/Storages/StorageProxy.h
index 2ce5f85e11f..2afd9e8a63b 100644
--- a/src/Storages/StorageProxy.h
+++ b/src/Storages/StorageProxy.h
@@ -132,7 +132,7 @@ public:
return getNested()->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, context);
}
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override { getNested()->mutate(commands, context, force_wait); }
+ void mutate(const MutationCommands & commands, ContextPtr context) override { getNested()->mutate(commands, context); }
CancellationCode killMutation(const String & mutation_id) override { return getNested()->killMutation(mutation_id); }
diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp
index 33bf0122361..62c7992bafb 100644
--- a/src/Storages/StorageReplicatedMergeTree.cpp
+++ b/src/Storages/StorageReplicatedMergeTree.cpp
@@ -6276,7 +6276,7 @@ void StorageReplicatedMergeTree::fetchPartition(
}
-void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context, bool force_wait)
+void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, ContextPtr query_context)
{
/// Overview of the mutation algorithm.
///
@@ -6390,8 +6390,7 @@ void StorageReplicatedMergeTree::mutate(const MutationCommands & commands, Conte
throw Coordination::Exception("Unable to create a mutation znode", rc);
}
- const size_t mutations_sync = force_wait ? 2 : query_context->getSettingsRef().mutations_sync;
- waitMutation(mutation_entry.znode_name, mutations_sync);
+ waitMutation(mutation_entry.znode_name, query_context->getSettingsRef().mutations_sync);
}
void StorageReplicatedMergeTree::waitMutation(const String & znode_name, size_t mutations_sync) const
diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h
index 568f9709aaa..042e6acf4e2 100644
--- a/src/Storages/StorageReplicatedMergeTree.h
+++ b/src/Storages/StorageReplicatedMergeTree.h
@@ -153,7 +153,7 @@ public:
void alter(const AlterCommands & commands, ContextPtr query_context, AlterLockHolder & table_lock_holder) override;
- void mutate(const MutationCommands & commands, ContextPtr context, bool force_wait) override;
+ void mutate(const MutationCommands & commands, ContextPtr context) override;
void waitMutation(const String & znode_name, size_t mutations_sync) const;
std::vector getMutationsStatus() const override;
CancellationCode killMutation(const String & mutation_id) override;
diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp
index 3ee10113b32..0ef02cac790 100644
--- a/src/Storages/StorageS3Cluster.cpp
+++ b/src/Storages/StorageS3Cluster.cpp
@@ -8,6 +8,7 @@
#include "Client/Connection.h"
#include "Core/QueryProcessingStage.h"
#include
+#include
#include
#include
#include
diff --git a/src/Storages/System/StorageSystemQueryResultCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp
similarity index 68%
rename from src/Storages/System/StorageSystemQueryResultCache.cpp
rename to src/Storages/System/StorageSystemQueryCache.cpp
index cb6349b6d47..2de8e4594b9 100644
--- a/src/Storages/System/StorageSystemQueryResultCache.cpp
+++ b/src/Storages/System/StorageSystemQueryCache.cpp
@@ -1,15 +1,15 @@
-#include "StorageSystemQueryResultCache.h"
+#include "StorageSystemQueryCache.h"
#include
#include
#include
-#include
+#include
#include
namespace DB
{
-NamesAndTypesList StorageSystemQueryResultCache::getNamesAndTypes()
+NamesAndTypesList StorageSystemQueryCache::getNamesAndTypes()
{
return {
{"query", std::make_shared()},
@@ -21,23 +21,23 @@ NamesAndTypesList StorageSystemQueryResultCache::getNamesAndTypes()
};
}
-StorageSystemQueryResultCache::StorageSystemQueryResultCache(const StorageID & table_id_)
+StorageSystemQueryCache::StorageSystemQueryCache(const StorageID & table_id_)
: IStorageSystemOneBlock(table_id_)
{
}
-void StorageSystemQueryResultCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
+void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
{
- auto query_result_cache = context->getQueryResultCache();
+ auto query_cache = context->getQueryCache();
- if (!query_result_cache)
+ if (!query_cache)
return;
const String & username = context->getUserName();
- std::lock_guard lock(query_result_cache->mutex);
+ std::lock_guard lock(query_cache->mutex);
- for (const auto & [key, result] : query_result_cache->cache)
+ for (const auto & [key, result] : query_cache->cache)
{
/// Showing other user's queries is considered a security risk
if (key.username.has_value() && key.username != username)
diff --git a/src/Storages/System/StorageSystemQueryResultCache.h b/src/Storages/System/StorageSystemQueryCache.h
similarity index 52%
rename from src/Storages/System/StorageSystemQueryResultCache.h
rename to src/Storages/System/StorageSystemQueryCache.h
index 4862878a31a..5ff5f0a0454 100644
--- a/src/Storages/System/StorageSystemQueryResultCache.h
+++ b/src/Storages/System/StorageSystemQueryCache.h
@@ -5,12 +5,12 @@
namespace DB
{
-class StorageSystemQueryResultCache final : public IStorageSystemOneBlock
+class StorageSystemQueryCache final : public IStorageSystemOneBlock
{
public:
- explicit StorageSystemQueryResultCache(const StorageID & table_id_);
+ explicit StorageSystemQueryCache(const StorageID & table_id_);
- std::string getName() const override { return "SystemQueryResultCache"; }
+ std::string getName() const override { return "SystemQueryCache"; }
static NamesAndTypesList getNamesAndTypes();
diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp
index eeb08d7e3d4..07db151069f 100644
--- a/src/Storages/System/attachSystemTables.cpp
+++ b/src/Storages/System/attachSystemTables.cpp
@@ -73,7 +73,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -176,7 +176,7 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b
attach(context, system_database, "part_moves_between_shards");
attach(context, system_database, "asynchronous_inserts");
attach(context, system_database, "filesystem_cache");
- attach(context, system_database, "query_result_cache");
+ attach(context, system_database, "query_cache");
attach(context, system_database, "remote_data_paths");
attach(context, system_database, "certificates");
attach(context, system_database, "named_collections");
diff --git a/src/TableFunctions/TableFunctionFormat.cpp b/src/TableFunctions/TableFunctionFormat.cpp
index f2a92b41560..1e37775f574 100644
--- a/src/TableFunctions/TableFunctionFormat.cpp
+++ b/src/TableFunctions/TableFunctionFormat.cpp
@@ -4,6 +4,7 @@
#include
#include
+#include
#include
@@ -38,23 +39,29 @@ void TableFunctionFormat::parseArguments(const ASTPtr & ast_function, ContextPtr
ASTs & args = args_func.at(0)->children;
- if (args.size() != 2)
- throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' requires 2 arguments: format and data", getName());
+ if (args.size() != 2 && args.size() != 3)
+ throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Table function '{}' requires 2 or 3 arguments: format, [structure], data", getName());
for (auto & arg : args)
arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context);
format = checkAndGetLiteralArgument(args[0], "format");
- data = checkAndGetLiteralArgument(args[1], "data");
+ data = checkAndGetLiteralArgument(args.back(), "data");
+ if (args.size() == 3)
+ structure = checkAndGetLiteralArgument(args[1], "structure");
}
ColumnsDescription TableFunctionFormat::getActualTableStructure(ContextPtr context) const
{
- ReadBufferIterator read_buffer_iterator = [&](ColumnsDescription &)
+ if (structure == "auto")
{
- return std::make_unique(data);
- };
- return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, false, context);
+ ReadBufferIterator read_buffer_iterator = [&](ColumnsDescription &)
+ {
+ return std::make_unique(data);
+ };
+ return readSchemaFromFormat(format, std::nullopt, read_buffer_iterator, false, context);
+ }
+ return parseColumnsListFromString(structure, context);
}
Block TableFunctionFormat::parseData(ColumnsDescription columns, ContextPtr context) const
diff --git a/src/TableFunctions/TableFunctionFormat.h b/src/TableFunctions/TableFunctionFormat.h
index c6db322343b..d64ab14cb64 100644
--- a/src/TableFunctions/TableFunctionFormat.h
+++ b/src/TableFunctions/TableFunctionFormat.h
@@ -28,6 +28,7 @@ private:
String format;
String data;
+ String structure = "auto";
};
}
diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py
index f914bb42d99..654cd6869dc 100644
--- a/tests/ci/clickhouse_helper.py
+++ b/tests/ci/clickhouse_helper.py
@@ -183,6 +183,7 @@ def prepare_tests_results_for_clickhouse(
current_row["test_duration_ms"] = int(test_time * 1000)
current_row["test_name"] = test_name
current_row["test_status"] = test_status
+ current_row["test_context_raw"] = test_result.raw_logs or ""
result.append(current_row)
return result
diff --git a/tests/ci/report.py b/tests/ci/report.py
index da04411632d..d33ed4a9d91 100644
--- a/tests/ci/report.py
+++ b/tests/ci/report.py
@@ -211,7 +211,7 @@ def read_test_results(results_path: Path, with_raw_logs: bool = True) -> TestRes
name = line[0]
status = line[1]
time = None
- if len(line) >= 3 and line[2]:
+ if len(line) >= 3 and line[2] and line[2] != "\\N":
# The value can be emtpy, but when it's not,
# it's the time spent on the test
try:
diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py
index c8edf44b16b..fb38969cb23 100644
--- a/tests/ci/stress_check.py
+++ b/tests/ci/stress_check.py
@@ -95,7 +95,7 @@ def process_results(
try:
results_path = Path(result_folder) / "test_results.tsv"
- test_results = read_test_results(results_path, False)
+ test_results = read_test_results(results_path, True)
if len(test_results) == 0:
raise Exception("Empty results")
except Exception as e:
diff --git a/tests/integration/test_mask_sensitive_info/configs/named_collections.xml b/tests/integration/test_mask_sensitive_info/configs/named_collections.xml
new file mode 100644
index 00000000000..ee923a90171
--- /dev/null
+++ b/tests/integration/test_mask_sensitive_info/configs/named_collections.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py
index f938148e5a0..3f71b047213 100644
--- a/tests/integration/test_mask_sensitive_info/test.py
+++ b/tests/integration/test_mask_sensitive_info/test.py
@@ -4,7 +4,13 @@ from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
-node = cluster.add_instance("node", with_zookeeper=True)
+node = cluster.add_instance(
+ "node",
+ main_configs=[
+ "configs/named_collections.xml",
+ ],
+ with_zookeeper=True,
+)
@pytest.fixture(scope="module", autouse=True)
@@ -116,6 +122,12 @@ def test_create_table():
f"S3('http://minio1:9001/root/data/test3.csv.gz', 'CSV', 'gzip')",
f"S3('http://minio1:9001/root/data/test4.csv', 'minio', '{password}', 'CSV')",
f"S3('http://minio1:9001/root/data/test5.csv.gz', 'minio', '{password}', 'CSV', 'gzip')",
+ f"MySQL(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '{password}')",
+ f"MySQL(named_collection_2, database = 'mysql_db', host = 'mysql57', port = 3306, password = '{password}', table = 'mysql_table', user = 'mysql_user')",
+ f"MySQL(named_collection_3, database = 'mysql_db', host = 'mysql57', port = 3306, table = 'mysql_table')",
+ f"PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user', password = '{password}')",
+ f"MongoDB(named_collection_5, host = 'mongo1', port = 5432, database = 'mongo_db', collection = 'mongo_col', user = 'mongo_user', password = '{password}')",
+ f"S3(named_collection_6, url = 'http://minio1:9001/root/data/test8.csv', access_key_id = 'minio', secret_access_key = '{password}', format = 'CSV')",
]
for i, table_engine in enumerate(table_engines):
@@ -147,6 +159,12 @@ def test_create_table():
"CREATE TABLE table5 (x int) ENGINE = S3('http://minio1:9001/root/data/test3.csv.gz', 'CSV', 'gzip')",
"CREATE TABLE table6 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test4.csv', 'minio', '[HIDDEN]', 'CSV')",
"CREATE TABLE table7 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test5.csv.gz', 'minio', '[HIDDEN]', 'CSV', 'gzip')",
+ "CREATE TABLE table8 (`x` int) ENGINE = MySQL(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')",
+ "CREATE TABLE table9 (`x` int) ENGINE = MySQL(named_collection_2, database = 'mysql_db', host = 'mysql57', port = 3306, password = '[HIDDEN]', table = 'mysql_table', user = 'mysql_user')",
+ "CREATE TABLE table10 (x int) ENGINE = MySQL(named_collection_3, database = 'mysql_db', host = 'mysql57', port = 3306, table = 'mysql_table')",
+ "CREATE TABLE table11 (`x` int) ENGINE = PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user', password = '[HIDDEN]')",
+ "CREATE TABLE table12 (`x` int) ENGINE = MongoDB(named_collection_5, host = 'mongo1', port = 5432, database = 'mongo_db', collection = 'mongo_col', user = 'mongo_user', password = '[HIDDEN]'",
+ "CREATE TABLE table13 (`x` int) ENGINE = S3(named_collection_6, url = 'http://minio1:9001/root/data/test8.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]', format = 'CSV')",
],
must_not_contain=[password],
)
@@ -160,6 +178,7 @@ def test_create_database():
database_engines = [
f"MySQL('localhost:3306', 'mysql_db', 'mysql_user', '{password}') SETTINGS connect_timeout=1, connection_max_tries=1",
+ f"MySQL(named_collection_1, host = 'localhost', port = 3306, database = 'mysql_db', user = 'mysql_user', password = '{password}') SETTINGS connect_timeout=1, connection_max_tries=1",
# f"PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '{password}')",
]
@@ -173,7 +192,8 @@ def test_create_database():
check_logs(
must_contain=[
"CREATE DATABASE database0 ENGINE = MySQL('localhost:3306', 'mysql_db', 'mysql_user', '[HIDDEN]')",
- # "CREATE DATABASE database1 ENGINE = PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '[HIDDEN]')",
+ "CREATE DATABASE database1 ENGINE = MySQL(named_collection_1, host = 'localhost', port = 3306, database = 'mysql_db', user = 'mysql_user', password = '[HIDDEN]')",
+ # "CREATE DATABASE database2 ENGINE = PostgreSQL('localhost:5432', 'postgres_db', 'postgres_user', '[HIDDEN]')",
],
must_not_contain=[password],
)
@@ -211,6 +231,11 @@ def test_table_functions():
f"remote('127.{{2..11}}', numbers(10), 'remote_user', '{password}', rand())",
f"remoteSecure('127.{{2..11}}', 'default', 'remote_table', 'remote_user', '{password}')",
f"remoteSecure('127.{{2..11}}', 'default', 'remote_table', 'remote_user', rand())",
+ f"mysql(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '{password}')",
+ f"postgresql(named_collection_2, password = '{password}', host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user')",
+ f"s3(named_collection_3, url = 'http://minio1:9001/root/data/test4.csv', access_key_id = 'minio', secret_access_key = '{password}')",
+ f"remote(named_collection_4, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}', sharding_key = rand())",
+ f"remoteSecure(named_collection_5, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}')",
]
for i, table_function in enumerate(table_functions):
@@ -259,6 +284,11 @@ def test_table_functions():
"CREATE TABLE tablefunc22 (`x` int) AS remote('127.{2..11}', numbers(10), 'remote_user', '[HIDDEN]', rand())",
"CREATE TABLE tablefunc23 (`x` int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', '[HIDDEN]')",
"CREATE TABLE tablefunc24 (x int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', rand())",
+ "CREATE TABLE tablefunc25 (`x` int) AS mysql(named_collection_1, host = 'mysql57', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')",
+ "CREATE TABLE tablefunc26 (`x` int) AS postgresql(named_collection_2, password = '[HIDDEN]', host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user')",
+ "CREATE TABLE tablefunc27 (`x` int) AS s3(named_collection_3, url = 'http://minio1:9001/root/data/test4.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]')",
+ "CREATE TABLE tablefunc28 (`x` int) AS remote(named_collection_4, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]', sharding_key = rand())",
+ "CREATE TABLE tablefunc29 (`x` int) AS remoteSecure(named_collection_5, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]')",
],
must_not_contain=[password],
)
diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py
index d3fcc89561a..8160a6b47a7 100644
--- a/tests/integration/test_replicated_database/test.py
+++ b/tests/integration/test_replicated_database/test.py
@@ -212,6 +212,48 @@ def test_simple_alter_table(started_cluster, engine):
competing_node.query("DROP DATABASE testdb SYNC")
+@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
+def test_delete_from_table(started_cluster, engine):
+ main_node.query(
+ "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');"
+ )
+ dummy_node.query(
+ "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');"
+ )
+
+ name = "testdb.delete_test_{}".format(engine)
+ main_node.query(
+ "CREATE TABLE {} "
+ "(id UInt64, value String) "
+ "ENGINE = {} PARTITION BY id%2 ORDER BY (id);".format(name, engine)
+ )
+ main_node.query("INSERT INTO TABLE {} VALUES(1, 'aaaa');".format(name))
+ main_node.query("INSERT INTO TABLE {} VALUES(2, 'aaaa');".format(name))
+ dummy_node.query("INSERT INTO TABLE {} VALUES(1, 'bbbb');".format(name))
+ dummy_node.query("INSERT INTO TABLE {} VALUES(2, 'bbbb');".format(name))
+
+ main_node.query(
+ "SET allow_experimental_lightweight_delete=1; DELETE FROM {} WHERE id=2;".format(
+ name
+ )
+ )
+
+ expected = "1\taaaa\n1\tbbbb"
+
+ table_for_select = name
+ if not "Replicated" in engine:
+ table_for_select = "cluster('testdb', {})".format(name)
+ for node in [main_node, dummy_node]:
+ assert_eq_with_retry(
+ node,
+ "SELECT * FROM {} ORDER BY id, value;".format(table_for_select),
+ expected,
+ )
+
+ main_node.query("DROP DATABASE testdb SYNC")
+ dummy_node.query("DROP DATABASE testdb SYNC")
+
+
def get_table_uuid(database, name):
return main_node.query(
f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'"
diff --git a/tests/queries/0_stateless/00304_http_external_data.sh b/tests/queries/0_stateless/00304_http_external_data.sh
index 4a097249cca..def17bc5cd1 100755
--- a/tests/queries/0_stateless/00304_http_external_data.sh
+++ b/tests/queries/0_stateless/00304_http_external_data.sh
@@ -6,4 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo -ne '1,Hello\n2,World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String";
echo -ne '1@Hello\n2@World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String&format_csv_delimiter=@";
-echo -ne '\x01\x00\x00\x00\x02\x00\x00\x00' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary";
+
+# use big-endian version of binary data for s390x
+if [[ $(uname -a | grep s390x) ]]; then
+ echo -ne '\x00\x00\x00\x01\x00\x00\x00\x02' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary";
+else
+ echo -ne '\x01\x00\x00\x00\x02\x00\x00\x00' | ${CLICKHOUSE_CURL} -sSF "tmp=@-" "${CLICKHOUSE_URL}&query=SELECT+*+FROM+tmp&tmp_structure=TaskID+UInt32&tmp_format=RowBinary";
+fi
diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference
index 5d46fd8585f..79399589533 100644
--- a/tests/queries/0_stateless/01271_show_privileges.reference
+++ b/tests/queries/0_stateless/01271_show_privileges.reference
@@ -96,7 +96,7 @@ SYSTEM DROP DNS CACHE ['SYSTEM DROP DNS','DROP DNS CACHE','DROP DNS'] GLOBAL SYS
SYSTEM DROP MARK CACHE ['SYSTEM DROP MARK','DROP MARK CACHE','DROP MARKS'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP UNCOMPRESSED CACHE ['SYSTEM DROP UNCOMPRESSED','DROP UNCOMPRESSED CACHE','DROP UNCOMPRESSED'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP MMAP CACHE ['SYSTEM DROP MMAP','DROP MMAP CACHE','DROP MMAP'] GLOBAL SYSTEM DROP CACHE
-SYSTEM DROP QUERY RESULT CACHE ['SYSTEM DROP QUERY RESULT','DROP QUERY RESULT CACHE','DROP QUERY RESULT'] GLOBAL SYSTEM DROP CACHE
+SYSTEM DROP QUERY CACHE ['SYSTEM DROP QUERY','DROP QUERY CACHE','DROP QUERY'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP COMPILED EXPRESSION CACHE ['SYSTEM DROP COMPILED EXPRESSION','DROP COMPILED EXPRESSION CACHE','DROP COMPILED EXPRESSIONS'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP FILESYSTEM CACHE ['SYSTEM DROP FILESYSTEM CACHE','DROP FILESYSTEM CACHE'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP SCHEMA CACHE ['SYSTEM DROP SCHEMA CACHE','DROP SCHEMA CACHE'] GLOBAL SYSTEM DROP CACHE
diff --git a/tests/queries/0_stateless/01508_query_obfuscator.reference b/tests/queries/0_stateless/01508_query_obfuscator.reference
index 7d910734dbd..9268b444d90 100644
--- a/tests/queries/0_stateless/01508_query_obfuscator.reference
+++ b/tests/queries/0_stateless/01508_query_obfuscator.reference
@@ -1,16 +1,16 @@
-SELECT 116, 'Qqfu://2020-02-10isqkc1203 sp 2000-05-27T18:38:01', 13e100, Obsidian_id_diverTeam, sweets(Workplace), avgIf(remote('128.0.0.1'))
-SELECT treasury_mammoth_hazelnut between nutmeg and span, case when chive >= 116 then switching else null end
+SELECT 116, 'Qqfu://2020-02-10isqkc1203 sp 2000-05-27T18:38:01', 13e100, Jewelry_id_studyBeast, algebra(Stable), avgIf(remote('128.0.0.1'))
+SELECT surfboard_solitaire_crunch between understanding and populist, case when instrument >= 116 then poverty else null end
SELECT
- EarthquakeID,
- Workout.ID, Workout.CoupleThrill,
- MedalEMPIRE,
- HOPE.ListingName, HOPE.ListingBomb, HOPE.ListingRamen, HOPE.ListingResult, HOPE.CoupleThrill, HOPE.Smile
-FROM merge.marsh_agreement
+ BugleID,
+ Reliability.ID, Reliability.ExperiencePrevalence,
+ DepressiveTURKEY,
+ SPARK.RainmakerName, SPARK.RainmakerReligion, SPARK.RainmakerMisfit, SPARK.RainmakerAardvark, SPARK.ExperiencePrevalence, SPARK.Type
+FROM merge.invader_schizophrenic
WHERE
- RecapitulationLeaver >= '2020-10-13' AND RecapitulationLeaver <= '2020-10-21'
- AND MasonryID = 30750384
- AND intHash32(EyeballID) = 448362928 AND intHash64(EyeballID) = 12572659331310383983
- AND EarthquakeID IN (8195672321757027078, 7079643623150622129, 5057006826979676478, 7886875230160484653, 7494974311229040743)
- AND Aide = 1
+ PortraitInvasion >= '2020-10-13' AND PortraitInvasion <= '2020-10-21'
+ AND FrownID = 30750384
+ AND intHash32(HaversackID) = 448362928 AND intHash64(HaversackID) = 12572659331310383983
+ AND BugleID IN (8195672321757027078, 7079643623150622129, 5057006826979676478, 7886875230160484653, 7494974311229040743)
+ AND Hostel = 1
diff --git a/tests/queries/0_stateless/02016_aggregation_spark_bar.sql b/tests/queries/0_stateless/02016_aggregation_spark_bar.sql
index 8b5b62305ec..5237f832d25 100644
--- a/tests/queries/0_stateless/02016_aggregation_spark_bar.sql
+++ b/tests/queries/0_stateless/02016_aggregation_spark_bar.sql
@@ -35,3 +35,10 @@ SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FRO
DROP TABLE IF EXISTS spark_bar_test;
WITH number DIV 50 AS k, number % 50 AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k;
+
+-- OOM guard
+DROP TABLE IF EXISTS spark_bar_oom;
+CREATE TABLE spark_bar_oom (x UInt64, y UInt8) Engine=MergeTree ORDER BY tuple();
+INSERT INTO spark_bar_oom VALUES (18446744073709551615,255),(0,0),(0,0),(4036797895307271799,163);
+SELECT sparkbar(9)(x,y) FROM spark_bar_oom SETTINGS max_memory_usage = 100000000; -- { serverError 241 }
+DROP TABLE IF EXISTS spark_bar_oom;
diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference
index beff51fb294..80fe6aeefec 100644
--- a/tests/queries/0_stateless/02117_show_create_table_system.reference
+++ b/tests/queries/0_stateless/02117_show_create_table_system.reference
@@ -288,7 +288,7 @@ CREATE TABLE system.grants
(
`user_name` Nullable(String),
`role_name` Nullable(String),
- `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156),
+ `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156),
`database` Nullable(String),
`table` Nullable(String),
`column` Nullable(String),
@@ -569,10 +569,10 @@ ENGINE = SystemPartsColumns
COMMENT 'SYSTEM TABLE is built on the fly.'
CREATE TABLE system.privileges
(
- `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156),
+ `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156),
`aliases` Array(String),
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)),
- `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY RESULT CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156))
+ `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE FUNCTION' = 53, 'CREATE NAMED COLLECTION' = 54, 'CREATE' = 55, 'DROP DATABASE' = 56, 'DROP TABLE' = 57, 'DROP VIEW' = 58, 'DROP DICTIONARY' = 59, 'DROP FUNCTION' = 60, 'DROP NAMED COLLECTION' = 61, 'DROP' = 62, 'TRUNCATE' = 63, 'OPTIMIZE' = 64, 'BACKUP' = 65, 'KILL QUERY' = 66, 'KILL TRANSACTION' = 67, 'MOVE PARTITION BETWEEN SHARDS' = 68, 'CREATE USER' = 69, 'ALTER USER' = 70, 'DROP USER' = 71, 'CREATE ROLE' = 72, 'ALTER ROLE' = 73, 'DROP ROLE' = 74, 'ROLE ADMIN' = 75, 'CREATE ROW POLICY' = 76, 'ALTER ROW POLICY' = 77, 'DROP ROW POLICY' = 78, 'CREATE QUOTA' = 79, 'ALTER QUOTA' = 80, 'DROP QUOTA' = 81, 'CREATE SETTINGS PROFILE' = 82, 'ALTER SETTINGS PROFILE' = 83, 'DROP SETTINGS PROFILE' = 84, 'SHOW USERS' = 85, 'SHOW ROLES' = 86, 'SHOW ROW POLICIES' = 87, 'SHOW QUOTAS' = 88, 'SHOW SETTINGS PROFILES' = 89, 'SHOW ACCESS' = 90, 'SHOW NAMED COLLECTIONS' = 91, 'ACCESS MANAGEMENT' = 92, 'SYSTEM SHUTDOWN' = 93, 'SYSTEM DROP DNS CACHE' = 94, 'SYSTEM DROP MARK CACHE' = 95, 'SYSTEM DROP UNCOMPRESSED CACHE' = 96, 'SYSTEM DROP MMAP CACHE' = 97, 'SYSTEM DROP QUERY CACHE' = 98, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 99, 'SYSTEM DROP FILESYSTEM CACHE' = 100, 'SYSTEM DROP SCHEMA CACHE' = 101, 'SYSTEM DROP CACHE' = 102, 'SYSTEM RELOAD CONFIG' = 103, 'SYSTEM RELOAD USERS' = 104, 'SYSTEM RELOAD SYMBOLS' = 105, 'SYSTEM RELOAD DICTIONARY' = 106, 'SYSTEM RELOAD MODEL' = 107, 'SYSTEM RELOAD FUNCTION' = 108, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 109, 'SYSTEM RELOAD' = 110, 'SYSTEM RESTART DISK' = 111, 'SYSTEM MERGES' = 112, 'SYSTEM TTL MERGES' = 113, 'SYSTEM FETCHES' = 114, 'SYSTEM MOVES' = 115, 'SYSTEM DISTRIBUTED SENDS' = 116, 'SYSTEM REPLICATED SENDS' = 117, 'SYSTEM SENDS' = 118, 'SYSTEM REPLICATION QUEUES' = 119, 'SYSTEM DROP REPLICA' = 120, 'SYSTEM SYNC REPLICA' = 121, 'SYSTEM RESTART REPLICA' = 122, 'SYSTEM RESTORE REPLICA' = 123, 'SYSTEM WAIT LOADING PARTS' = 124, 'SYSTEM SYNC DATABASE REPLICA' = 125, 'SYSTEM SYNC TRANSACTION LOG' = 126, 'SYSTEM SYNC FILE CACHE' = 127, 'SYSTEM FLUSH DISTRIBUTED' = 128, 'SYSTEM FLUSH LOGS' = 129, 'SYSTEM FLUSH' = 130, 'SYSTEM THREAD FUZZER' = 131, 'SYSTEM UNFREEZE' = 132, 'SYSTEM' = 133, 'dictGet' = 134, 'addressToLine' = 135, 'addressToLineWithInlines' = 136, 'addressToSymbol' = 137, 'demangle' = 138, 'INTROSPECTION' = 139, 'FILE' = 140, 'URL' = 141, 'REMOTE' = 142, 'MONGO' = 143, 'MEILISEARCH' = 144, 'MYSQL' = 145, 'POSTGRES' = 146, 'SQLITE' = 147, 'ODBC' = 148, 'JDBC' = 149, 'HDFS' = 150, 'S3' = 151, 'HIVE' = 152, 'SOURCES' = 153, 'CLUSTER' = 154, 'ALL' = 155, 'NONE' = 156))
)
ENGINE = SystemPrivileges
COMMENT 'SYSTEM TABLE is built on the fly.'
diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference
index 96aae2a978c..9023b36a9bf 100644
--- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference
+++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference
@@ -31,6 +31,9 @@ IPv6 functions
::ffff:127.0.0.1
::ffff:127.0.0.1
::ffff:127.0.0.1
+::
+\N
+100000000
--
::ffff:127.0.0.1
--
diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql
index 436f232e441..6c65fe86cc9 100644
--- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql
+++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql
@@ -56,6 +56,12 @@ SELECT toIPv6('::ffff:127.0.0.1');
SELECT toIPv6OrDefault('::ffff:127.0.0.1');
SELECT toIPv6OrNull('::ffff:127.0.0.1');
+SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6}
+SELECT toIPv6OrDefault('::.1.2.3');
+SELECT toIPv6OrNull('::.1.2.3');
+
+SELECT count() FROM numbers_mt(100000000) WHERE NOT ignore(toIPv6OrZero(randomString(8)));
+
SELECT '--';
SELECT cast('test' , 'IPv6'); --{serverError CANNOT_PARSE_IPV6}
diff --git a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.reference b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.reference
rename to tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql
new file mode 100644
index 00000000000..9440a1fd9c0
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_case_agnostic_matching.sql
@@ -0,0 +1,29 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+-- Start with empty query cache (QC) and query log
+SYSTEM DROP QUERY CACHE;
+DROP TABLE system.query_log SYNC;
+
+-- Insert an entry into the query cache.
+SELECT 1 SETTINGS use_query_cache = true;
+-- Check that entry in QC exists
+SELECT COUNT(*) FROM system.query_cache;
+
+-- Run the same SELECT but with different case (--> select). We want its result to be served from the QC.
+SELECT '---';
+select 1 SETTINGS use_query_cache = true;
+
+-- There should still be just one entry in the QC
+SELECT COUNT(*) FROM system.query_cache;
+
+-- The second query should cause a QC hit.
+SYSTEM FLUSH LOGS;
+SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
+FROM system.query_log
+WHERE type = 'QueryFinish'
+ AND query = 'select 1 SETTINGS use_query_cache = true;';
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.reference b/tests/queries/0_stateless/02494_query_cache_drop_cache.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_drop_cache.reference
rename to tests/queries/0_stateless/02494_query_cache_drop_cache.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_drop_cache.sql b/tests/queries/0_stateless/02494_query_cache_drop_cache.sql
new file mode 100644
index 00000000000..1f61472fcb0
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_drop_cache.sql
@@ -0,0 +1,12 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+-- Cache query result in query cache
+SELECT 1 SETTINGS use_query_cache = true;
+SELECT count(*) FROM system.query_cache;
+
+-- No query results are cached after DROP
+SYSTEM DROP QUERY CACHE;
+SELECT count(*) FROM system.query_cache;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.reference b/tests/queries/0_stateless/02494_query_cache_eligible_queries.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_eligible_queries.reference
rename to tests/queries/0_stateless/02494_query_cache_eligible_queries.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql b/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql
new file mode 100644
index 00000000000..b4bc9e2c258
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_eligible_queries.sql
@@ -0,0 +1,68 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+DROP TABLE IF EXISTS eligible_test;
+DROP TABLE IF EXISTS eligible_test2;
+
+-- enable query cache session-wide but also force it individually in each of below statements
+SET use_query_cache = true;
+
+-- check that SELECT statements create entries in the query cache ...
+SELECT 1 SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
+
+-- ... and all other statements also should not create entries:
+
+-- CREATE
+CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown
+SELECT COUNT(*) FROM system.query_cache;
+
+-- ALTER
+ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- INSERT
+INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown
+SELECT COUNT(*) FROM system.query_cache;
+INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- SHOW
+SHOW TABLES SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- CHECK
+CHECK TABLE eligible_test SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- DESCRIBE
+DESCRIBE TABLE eligible_test SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- EXISTS
+EXISTS TABLE eligible_test SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- KILL
+KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- OPTIMIZE
+OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- TRUNCATE
+TRUNCATE TABLE eligible_test SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+-- RENAME
+RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
+DROP TABLE eligible_test2;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_events.reference b/tests/queries/0_stateless/02494_query_cache_events.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_events.reference
rename to tests/queries/0_stateless/02494_query_cache_events.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_events.sql b/tests/queries/0_stateless/02494_query_cache_events.sql
new file mode 100644
index 00000000000..d775467d525
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_events.sql
@@ -0,0 +1,32 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+-- Start with empty query cache QC and query log
+SYSTEM DROP QUERY CACHE;
+DROP TABLE system.query_log SYNC;
+
+-- Run a query with QC on. The first execution is a QC miss.
+SELECT '---';
+SELECT 1 SETTINGS use_query_cache = true;
+
+SYSTEM FLUSH LOGS;
+SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
+FROM system.query_log
+WHERE type = 'QueryFinish'
+ AND query = 'SELECT 1 SETTINGS use_query_cache = true;';
+
+
+-- Run previous query again with query cache on
+SELECT '---';
+SELECT 1 SETTINGS use_query_cache = true;
+
+DROP TABLE system.query_log SYNC;
+SYSTEM FLUSH LOGS;
+SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
+FROM system.query_log
+WHERE type = 'QueryFinish'
+ AND query = 'SELECT 1 SETTINGS use_query_cache = true;';
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.reference b/tests/queries/0_stateless/02494_query_cache_exception_handling.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_exception_handling.reference
rename to tests/queries/0_stateless/02494_query_cache_exception_handling.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_exception_handling.sql b/tests/queries/0_stateless/02494_query_cache_exception_handling.sql
new file mode 100644
index 00000000000..4d686d81ed3
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_exception_handling.sql
@@ -0,0 +1,12 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- If an exception is thrown during query execution, no entry must be created in the query cache
+SELECT throwIf(1) SETTINGS use_query_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_explain.reference b/tests/queries/0_stateless/02494_query_cache_explain.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_explain.reference
rename to tests/queries/0_stateless/02494_query_cache_explain.reference
diff --git a/tests/queries/0_stateless/02494_query_result_cache_explain.sql b/tests/queries/0_stateless/02494_query_cache_explain.sql
similarity index 54%
rename from tests/queries/0_stateless/02494_query_result_cache_explain.sql
rename to tests/queries/0_stateless/02494_query_cache_explain.sql
index 0daed9df151..67717efde13 100644
--- a/tests/queries/0_stateless/02494_query_result_cache_explain.sql
+++ b/tests/queries/0_stateless/02494_query_cache_explain.sql
@@ -1,23 +1,23 @@
-- Tags: no-parallel
-- Tag no-parallel: Messes with internal cache
-SET allow_experimental_query_result_cache = true;
+SET allow_experimental_query_cache = true;
-SYSTEM DROP QUERY RESULT CACHE;
+SYSTEM DROP QUERY CACHE;
--- Run a silly query with a non-trivial plan and put the result into the query result cache (QRC)
-SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true;
-SELECT count(*) FROM system.query_result_cache;
+-- Run a silly query with a non-trivial plan and put the result into the query cache QC
+SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true;
+SELECT count(*) FROM system.query_cache;
--- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QRC
+-- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QC
EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1;
-EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*)
+EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*)
--- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QRC
+-- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QC
EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1;
-EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_result_cache = true; -- (*)
+EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*)
--- Statements (*) must not cache their results into the QRC
-SELECT count(*) FROM system.query_result_cache;
+-- Statements (*) must not cache their results into the QC
+SELECT count(*) FROM system.query_cache;
-SYSTEM DROP QUERY RESULT CACHE;
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.reference b/tests/queries/0_stateless/02494_query_cache_min_query_duration.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_min_query_duration.reference
rename to tests/queries/0_stateless/02494_query_cache_min_query_duration.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql b/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql
new file mode 100644
index 00000000000..7d759c86130
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_min_query_duration.sql
@@ -0,0 +1,20 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- This creates an entry in the query cache ...
+SELECT 1 SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
+
+SELECT '---';
+
+-- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_duration = 10000;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.reference b/tests/queries/0_stateless/02494_query_cache_min_query_runs.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_min_query_runs.reference
rename to tests/queries/0_stateless/02494_query_cache_min_query_runs.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql b/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql
new file mode 100644
index 00000000000..2401beeab13
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_min_query_runs.sql
@@ -0,0 +1,34 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- Cache the query after the 1st query invocation
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 0;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '---';
+
+SYSTEM DROP QUERY CACHE;
+
+-- Cache the query result after the 2nd query invocation
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1;
+SELECT COUNT(*) FROM system.query_cache;
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '---';
+
+SYSTEM DROP QUERY CACHE;
+
+-- Cache the query result after the 3rd query invocation
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2;
+SELECT COUNT(*) FROM system.query_cache;
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2;
+SELECT COUNT(*) FROM system.query_cache;
+SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.reference b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.reference
rename to tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql
new file mode 100644
index 00000000000..534d63aa427
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_nondeterministic_functions.sql
@@ -0,0 +1,18 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- rand() is non-deterministic, with default settings no entry in the query cache should be created
+SELECT COUNT(rand(1)) SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '---';
+
+-- But an entry can be forced using a setting
+SELECT COUNT(RAND(1)) SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.reference b/tests/queries/0_stateless/02494_query_cache_normalize_ast.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_normalize_ast.reference
rename to tests/queries/0_stateless/02494_query_cache_normalize_ast.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql
new file mode 100644
index 00000000000..7e3cd273312
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql
@@ -0,0 +1,31 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+-- Start with empty query cache (QC) and query log.
+SYSTEM DROP QUERY CACHE;
+DROP TABLE system.query_log SYNC;
+
+-- Run query whose result gets cached in the query cache.
+-- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care
+-- *that* they are passed and not about their effect.
+SELECT 1 SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16;
+
+-- Check that entry in QC exists
+SELECT COUNT(*) FROM system.query_cache;
+
+-- Run the same SELECT but with different SETTINGS. We want its result to be served from the QC (--> passive mode, achieve it by
+-- disabling active mode)
+SELECT '---';
+SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;
+
+-- Technically, both SELECT queries have different ASTs, leading to different QC keys. QC does some AST normalization (erase all
+-- QC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QC hit.
+SYSTEM FLUSH LOGS;
+SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
+FROM system.query_log
+WHERE type = 'QueryFinish'
+ AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;';
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.reference b/tests/queries/0_stateless/02494_query_cache_passive_usage.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_passive_usage.reference
rename to tests/queries/0_stateless/02494_query_cache_passive_usage.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_passive_usage.sql b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql
new file mode 100644
index 00000000000..8f1e3972b6d
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_passive_usage.sql
@@ -0,0 +1,41 @@
+-- Tags: no-parallel
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+-- Start with empty query cache (QC).
+SYSTEM DROP QUERY CACHE;
+
+-- By default, don't write query result into QC.
+SELECT 1;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '-----';
+
+-- Try to retrieve query from empty QC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert).
+SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '-----';
+
+-- Put query into cache.
+SELECT 1 SETTINGS use_query_cache = true;
+SELECT COUNT(*) FROM system.query_cache;
+
+SELECT '-----';
+
+-- Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit.
+
+-- Get rid of log of previous SELECT
+DROP TABLE system.query_log SYNC;
+
+SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;
+SELECT COUNT(*) FROM system.query_cache;
+
+SYSTEM FLUSH LOGS;
+SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses']
+FROM system.query_log
+WHERE type = 'QueryFinish'
+ AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;';
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_secrets.reference b/tests/queries/0_stateless/02494_query_cache_secrets.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_secrets.reference
rename to tests/queries/0_stateless/02494_query_cache_secrets.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_secrets.sql b/tests/queries/0_stateless/02494_query_cache_secrets.sql
new file mode 100644
index 00000000000..99a972b003c
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_secrets.sql
@@ -0,0 +1,15 @@
+-- Tags: no-parallel, no-fasttest
+-- Tag no-fasttest: Depends on OpenSSL
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- Cache a result of a query with secret in the query cache
+SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_cache = true;
+
+-- The secret should not be revealed in system.query_cache
+SELECT query FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.reference b/tests/queries/0_stateless/02494_query_cache_ttl_long.reference
similarity index 100%
rename from tests/queries/0_stateless/02494_query_result_cache_ttl_long.reference
rename to tests/queries/0_stateless/02494_query_cache_ttl_long.reference
diff --git a/tests/queries/0_stateless/02494_query_cache_ttl_long.sql b/tests/queries/0_stateless/02494_query_cache_ttl_long.sql
new file mode 100644
index 00000000000..135ddf2195c
--- /dev/null
+++ b/tests/queries/0_stateless/02494_query_cache_ttl_long.sql
@@ -0,0 +1,31 @@
+-- Tags: no-fasttest, no-parallel, long
+-- Tag no-fasttest: Test runtime is > 6 sec
+-- Tag long: Test runtime is > 6 sec
+-- Tag no-parallel: Messes with internal cache
+
+SET allow_experimental_query_cache = true;
+
+SYSTEM DROP QUERY CACHE;
+
+-- Cache query result into query cache with a TTL of 3 sec
+SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3;
+
+-- Expect one non-stale cache entry
+SELECT COUNT(*) FROM system.query_cache;
+SELECT stale FROM system.query_cache;
+
+-- Wait until entry is expired
+SELECT sleep(3);
+SELECT sleep(3);
+SELECT stale FROM system.query_cache;
+
+SELECT '---';
+
+-- Run same query as before
+SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3;
+
+-- The entry should have been refreshed (non-stale)
+SELECT COUNT(*) FROM system.query_cache;
+SELECT stale FROM system.query_cache;
+
+SYSTEM DROP QUERY CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql b/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql
deleted file mode 100644
index e37c0a9cb3f..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_case_agnostic_matching.sql
+++ /dev/null
@@ -1,29 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
--- Start with empty query result cache (QRC) and query log
-SYSTEM DROP QUERY RESULT CACHE;
-DROP TABLE system.query_log SYNC;
-
--- Insert an entry into the query result cache.
-SELECT 1 SETTINGS use_query_result_cache = true;
--- Check that entry in QRC exists
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- Run the same SELECT but with different case (--> select). We want its result to be served from the QRC.
-SELECT '---';
-select 1 SETTINGS use_query_result_cache = true;
-
--- There should still be just one entry in the QRC
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- The second query should cause a QRC hit.
-SYSTEM FLUSH LOGS;
-SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
-FROM system.query_log
-WHERE type = 'QueryFinish'
- AND query = 'select 1 SETTINGS use_query_result_cache = true;';
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql b/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql
deleted file mode 100644
index 49e4298bd76..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_drop_cache.sql
+++ /dev/null
@@ -1,12 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
--- Cache query result in query result cache
-SELECT 1 SETTINGS use_query_result_cache = true;
-SELECT count(*) FROM system.query_result_cache;
-
--- No query results are cached after DROP
-SYSTEM DROP QUERY RESULT CACHE;
-SELECT count(*) FROM system.query_result_cache;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql b/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql
deleted file mode 100644
index 23a869f9df7..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_eligible_queries.sql
+++ /dev/null
@@ -1,68 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-DROP TABLE IF EXISTS eligible_test;
-DROP TABLE IF EXISTS eligible_test2;
-
--- enable query result cache session-wide but also force it individually in each of below statements
-SET use_query_result_cache = true;
-
--- check that SELECT statements create entries in the query result cache ...
-SELECT 1 SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- ... and all other statements also should not create entries:
-
--- CREATE
-CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- ALTER
-ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- INSERT
-INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_result_cache = true; -- SETTINGS rejected as unknown
-SELECT COUNT(*) FROM system.query_result_cache;
-INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- SHOW
-SHOW TABLES SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- CHECK
-CHECK TABLE eligible_test SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- DESCRIBE
-DESCRIBE TABLE eligible_test SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- EXISTS
-EXISTS TABLE eligible_test SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- KILL
-KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- OPTIMIZE
-OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- TRUNCATE
-TRUNCATE TABLE eligible_test SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- RENAME
-RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
-DROP TABLE eligible_test2;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_events.sql b/tests/queries/0_stateless/02494_query_result_cache_events.sql
deleted file mode 100644
index 73f95ef8f36..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_events.sql
+++ /dev/null
@@ -1,32 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
--- Start with empty query result cache (QRC) and query log
-SYSTEM DROP QUERY RESULT CACHE;
-DROP TABLE system.query_log SYNC;
-
--- Run a query with QRC on. The first execution is a QRC miss.
-SELECT '---';
-SELECT 1 SETTINGS use_query_result_cache = true;
-
-SYSTEM FLUSH LOGS;
-SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
-FROM system.query_log
-WHERE type = 'QueryFinish'
- AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;';
-
-
--- Run previous query again with query result cache on
-SELECT '---';
-SELECT 1 SETTINGS use_query_result_cache = true;
-
-DROP TABLE system.query_log SYNC;
-SYSTEM FLUSH LOGS;
-SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
-FROM system.query_log
-WHERE type = 'QueryFinish'
- AND query = 'SELECT 1 SETTINGS use_query_result_cache = true;';
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql b/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql
deleted file mode 100644
index 4ba3b73ad2f..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_exception_handling.sql
+++ /dev/null
@@ -1,12 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- If an exception is thrown during query execution, no entry must be created in the query result cache
-SELECT throwIf(1) SETTINGS use_query_result_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO }
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql b/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql
deleted file mode 100644
index 37cd4590396..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_min_query_duration.sql
+++ /dev/null
@@ -1,20 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- This creates an entry in the query result cache ...
-SELECT 1 SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
-SELECT '---';
-
--- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_duration = 10000;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql b/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql
deleted file mode 100644
index 4a93ee507ab..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_min_query_runs.sql
+++ /dev/null
@@ -1,34 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- Cache the query result after the 1st query invocation
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 0;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '---';
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- Cache the query result after the 2nd query invocation
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1;
-SELECT COUNT(*) FROM system.query_result_cache;
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 1;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '---';
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- Cache the query result after the 3rd query invocation
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
-SELECT COUNT(*) FROM system.query_result_cache;
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
-SELECT COUNT(*) FROM system.query_result_cache;
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_min_query_runs = 2;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql b/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql
deleted file mode 100644
index 87dc04c9919..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_nondeterministic_functions.sql
+++ /dev/null
@@ -1,18 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- rand() is non-deterministic, with default settings no entry in the query result cache should be created
-SELECT COUNT(rand(1)) SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '---';
-
--- But an entry can be forced using a setting
-SELECT COUNT(RAND(1)) SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql
deleted file mode 100644
index 15bab7e5584..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_normalize_ast.sql
+++ /dev/null
@@ -1,31 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
--- Start with empty query result cache (QRC) and query log.
-SYSTEM DROP QUERY RESULT CACHE;
-DROP TABLE system.query_log SYNC;
-
--- Run query whose result gets cached in the query result cache.
--- Besides "use_query_result_cache", pass two more knobs (one QRC-specific knob and one non-QRC-specific knob). We just care
--- *that* they are passed and not about their effect.
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16;
-
--- Check that entry in QRC exists
-SELECT COUNT(*) FROM system.query_result_cache;
-
--- Run the same SELECT but with different SETTINGS. We want its result to be served from the QRC (--> passive mode, achieve it by
--- disabling active mode)
-SELECT '---';
-SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16;
-
--- Technically, both SELECT queries have different ASTs, leading to different QRC keys. QRC does some AST normalization (erase all
--- QRC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QRC hit.
-SYSTEM FLUSH LOGS;
-SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
-FROM system.query_log
-WHERE type = 'QueryFinish'
- AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false, max_threads = 16;';
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql b/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql
deleted file mode 100644
index 86c06461463..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_passive_usage.sql
+++ /dev/null
@@ -1,41 +0,0 @@
--- Tags: no-parallel
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
--- Start with empty query result cache (QRC).
-SYSTEM DROP QUERY RESULT CACHE;
-
--- By default, don't write query result into query result cache (QRC).
-SELECT 1;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '-----';
-
--- Try to retrieve query result from empty QRC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert).
-SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '-----';
-
--- Put query result into cache.
-SELECT 1 SETTINGS use_query_result_cache = true;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SELECT '-----';
-
--- Run same query with passive mode again. There must still be one entry in the QRC and we must have a QRC hit.
-
--- Get rid of log of previous SELECT
-DROP TABLE system.query_log SYNC;
-
-SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;
-SELECT COUNT(*) FROM system.query_result_cache;
-
-SYSTEM FLUSH LOGS;
-SELECT ProfileEvents['QueryResultCacheHits'], ProfileEvents['QueryResultCacheMisses']
-FROM system.query_log
-WHERE type = 'QueryFinish'
- AND query = 'SELECT 1 SETTINGS use_query_result_cache = true, enable_writes_to_query_result_cache = false;';
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_secrets.sql b/tests/queries/0_stateless/02494_query_result_cache_secrets.sql
deleted file mode 100644
index b45db639efb..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_secrets.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Tags: no-parallel, no-fasttest
--- Tag no-fasttest: Depends on OpenSSL
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- Cache a result of a query with secret in the query result cache
-SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_result_cache = true;
-
--- The secret should not be revealed in system.query_result_cache
-SELECT query FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql b/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql
deleted file mode 100644
index 7acee9b2a5b..00000000000
--- a/tests/queries/0_stateless/02494_query_result_cache_ttl_long.sql
+++ /dev/null
@@ -1,31 +0,0 @@
--- Tags: no-fasttest, no-parallel, long
--- Tag no-fasttest: Test runtime is > 6 sec
--- Tag long: Test runtime is > 6 sec
--- Tag no-parallel: Messes with internal cache
-
-SET allow_experimental_query_result_cache = true;
-
-SYSTEM DROP QUERY RESULT CACHE;
-
--- Cache query result into query result cache with a TTL of 3 sec
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3;
-
--- Expect one non-stale cache entry
-SELECT COUNT(*) FROM system.query_result_cache;
-SELECT stale FROM system.query_result_cache;
-
--- Wait until entry is expired
-SELECT sleep(3);
-SELECT sleep(3);
-SELECT stale FROM system.query_result_cache;
-
-SELECT '---';
-
--- Run same query as before
-SELECT 1 SETTINGS use_query_result_cache = true, query_result_cache_ttl = 3;
-
--- The entry should have been refreshed (non-stale)
-SELECT COUNT(*) FROM system.query_result_cache;
-SELECT stale FROM system.query_result_cache;
-
-SYSTEM DROP QUERY RESULT CACHE;
diff --git a/tests/queries/0_stateless/025335_analyzer_limit.reference b/tests/queries/0_stateless/025335_analyzer_limit.reference
new file mode 100644
index 00000000000..ea7e98aa8ab
--- /dev/null
+++ b/tests/queries/0_stateless/025335_analyzer_limit.reference
@@ -0,0 +1,10 @@
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
diff --git a/tests/queries/0_stateless/025335_analyzer_limit.sql b/tests/queries/0_stateless/025335_analyzer_limit.sql
new file mode 100644
index 00000000000..8f98d823e5c
--- /dev/null
+++ b/tests/queries/0_stateless/025335_analyzer_limit.sql
@@ -0,0 +1,3 @@
+SET allow_experimental_analyzer = 1;
+
+SELECT number FROM numbers(100) LIMIT 10 OFFSET 10;
diff --git a/tests/queries/0_stateless/025334_keyed_siphash.reference b/tests/queries/0_stateless/02534_keyed_siphash.reference
similarity index 100%
rename from tests/queries/0_stateless/025334_keyed_siphash.reference
rename to tests/queries/0_stateless/02534_keyed_siphash.reference
diff --git a/tests/queries/0_stateless/025334_keyed_siphash.sql b/tests/queries/0_stateless/02534_keyed_siphash.sql
similarity index 100%
rename from tests/queries/0_stateless/025334_keyed_siphash.sql
rename to tests/queries/0_stateless/02534_keyed_siphash.sql
diff --git a/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference b/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference
new file mode 100644
index 00000000000..3fb7eba4357
--- /dev/null
+++ b/tests/queries/0_stateless/02536_date_from_number_inference_fix.reference
@@ -0,0 +1,3 @@
+x Nullable(Int64)
+20000101
+19000101
diff --git a/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql b/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql
new file mode 100644
index 00000000000..912057265e7
--- /dev/null
+++ b/tests/queries/0_stateless/02536_date_from_number_inference_fix.sql
@@ -0,0 +1,4 @@
+desc format(JSONEachRow, '{"x" : "20000101"}');
+select * from format(JSONEachRow, '{"x" : "20000101"}');
+select * from format(JSONEachRow, '{"x" : "19000101"}');
+
diff --git a/tests/queries/0_stateless/02540_duplicate_primary_key.sql b/tests/queries/0_stateless/02540_duplicate_primary_key.sql
index 5934f597334..322b6d74845 100644
--- a/tests/queries/0_stateless/02540_duplicate_primary_key.sql
+++ b/tests/queries/0_stateless/02540_duplicate_primary_key.sql
@@ -4,103 +4,102 @@ set allow_suspicious_low_cardinality_types = 1;
CREATE TABLE test
(
- `timestamp` DateTime,
- `latitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
- `longitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
- `m_registered` UInt8,
- `m_mcc` Nullable(Int16),
- `m_mnc` Nullable(Int16),
- `m_ci` Nullable(Int32),
- `m_tac` Nullable(Int32),
- `enb_id` Nullable(Int32),
- `ci` Nullable(Int32),
- `m_earfcn` Int32,
- `rsrp` Nullable(Int16),
- `rsrq` Nullable(Int16),
- `cqi` Nullable(Int16),
- `source` String,
- `gps_accuracy` Nullable(Float32),
- `operator_name` String,
- `band` Nullable(String),
- `NAME_2` String,
- `NAME_1` String,
- `quadkey_19_key` FixedString(19),
- `quadkey_17_key` FixedString(17),
- `manipulation` UInt8,
- `ss_rsrp` Nullable(Int16),
- `ss_rsrq` Nullable(Int16),
- `ss_sinr` Nullable(Int16),
- `csi_rsrp` Nullable(Int16),
- `csi_rsrq` Nullable(Int16),
- `csi_sinr` Nullable(Int16),
- `altitude` Nullable(Float32),
- `access_technology` Nullable(String),
- `buildingtype` String,
- `LocationType` String,
- `carrier_name` Nullable(String),
- `CustomPolygonName` String,
- `h3_10_pixel` UInt64,
- `stc_cluster` Nullable(String),
- PROJECTION cumsum_projection_simple
+ `coverage` DateTime,
+ `haunt` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
+ `sail` Nullable(Float32) CODEC(Gorilla, ZSTD(1)),
+ `empowerment_turnstile` UInt8,
+ `empowerment_haversack` Nullable(Int16),
+ `empowerment_function` Nullable(Int16),
+ `empowerment_guidance` Nullable(Int32),
+ `empowerment_high` Nullable(Int32),
+ `trading_id` Nullable(Int32),
+ `guidance` Nullable(Int32),
+ `empowerment_rawhide` Int32,
+ `memo` Nullable(Int16),
+ `oeuvre` Nullable(Int16),
+ `bun` Nullable(Int16),
+ `tramp` String,
+ `anthropology_total` Nullable(Float32),
+ `situation_name` String,
+ `timing` Nullable(String),
+ `NAME_cockroach` String,
+ `NAME_toe` String,
+ `business_error_methane` FixedString(110),
+ `business_instrumentation_methane` FixedString(15),
+ `market` UInt8,
+ `crew_memo` Nullable(Int16),
+ `crew_oeuvre` Nullable(Int16),
+ `crew_fortnight` Nullable(Int16),
+ `princess_memo` Nullable(Int16),
+ `princess_oeuvre` Nullable(Int16),
+ `princess_fortnight` Nullable(Int16),
+ `emerald` Nullable(Float32),
+ `cannon_crate` Nullable(String),
+ `thinking` String,
+ `SectorMen` String,
+ `rage_name` Nullable(String),
+ `DevelopmentalLigandName` String,
+ `chard_heavy_quadrant` UInt64,
+ `poster_effective` Nullable(String),
+ PROJECTION chrysalis_trapezium_ham
(
SELECT
- m_registered,
- toStartOfInterval(timestamp, toIntervalMonth(1)),
- toStartOfWeek(timestamp, 8),
- toStartOfInterval(timestamp, toIntervalDay(1)),
- NAME_1,
- NAME_2,
- operator_name,
- rsrp,
- rsrq,
- ss_rsrp,
- ss_rsrq,
- cqi,
- sum(multiIf(ss_rsrp IS NULL, 0, 1)),
- sum(multiIf(ss_rsrq IS NULL, 0, 1)),
- sum(multiIf(ss_sinr IS NULL, 0, 1)),
- max(toStartOfInterval(timestamp, toIntervalDay(1))),
- max(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
- min(toStartOfInterval(timestamp, toIntervalDay(1))),
- min(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
+ empowerment_turnstile,
+ toStartOfInterval(coverage, toIntervalMonth(1)),
+ toStartOfWeek(coverage, 10),
+ toStartOfInterval(coverage, toIntervalDay(1)),
+ NAME_toe,
+ NAME_cockroach,
+ situation_name,
+ memo,
+ oeuvre,
+ crew_memo,
+ crew_oeuvre,
+ bun,
+ sum(multiIf(crew_memo IS NULL, 0, 1)),
+ sum(multiIf(crew_oeuvre IS NULL, 0, 1)),
+ sum(multiIf(crew_fortnight IS NULL, 0, 1)),
+ max(toStartOfInterval(coverage, toIntervalDay(1))),
+ max(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
+ min(toStartOfInterval(coverage, toIntervalDay(1))),
+ min(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')),
count(),
sum(1)
GROUP BY
- m_registered,
- toStartOfInterval(timestamp, toIntervalMonth(1)),
- toStartOfWeek(timestamp, 8),
- toStartOfInterval(timestamp, toIntervalDay(1)),
- m_registered,
- toStartOfInterval(timestamp, toIntervalMonth(1)),
- toStartOfWeek(timestamp, 8),
- toStartOfInterval(timestamp, toIntervalDay(1)),
- NAME_1,
- NAME_2,
- operator_name,
- rsrp,
- rsrq,
- ss_rsrp,
- ss_rsrq,
- cqi
+ empowerment_turnstile,
+ toStartOfInterval(coverage, toIntervalMonth(1)),
+ toStartOfWeek(coverage, 10),
+ toStartOfInterval(coverage, toIntervalDay(1)),
+ empowerment_turnstile,
+ toStartOfInterval(coverage, toIntervalMonth(1)),
+ toStartOfWeek(coverage, 10),
+ toStartOfInterval(coverage, toIntervalDay(1)),
+ NAME_toe,
+ NAME_cockroach,
+ situation_name,
+ memo,
+ oeuvre,
+ crew_memo,
+ crew_oeuvre,
+ bun
)
)
ENGINE = MergeTree
-PARTITION BY toYYYYMM(timestamp)
-ORDER BY (timestamp, operator_name, NAME_1, NAME_2)
-SETTINGS index_granularity = 8192;
+PARTITION BY toYYYYMM(coverage)
+ORDER BY (coverage, situation_name, NAME_toe, NAME_cockroach);
insert into test select * from generateRandom() limit 10;
-with tt as (
- select cast(toStartOfInterval(timestamp, INTERVAL 1 day) as Date) as dd, count() as samples
+with dissonance as (
+ Select cast(toStartOfInterval(coverage, INTERVAL 1 day) as Date) as flour, count() as regulation
from test
- group by dd having dd >= toDate(now())-100
+ group by flour having flour >= toDate(now())-100
),
-tt2 as (
- select dd, samples from tt
- union distinct
- select toDate(now())-1, ifnull((select samples from tt where dd = toDate(now())-1),0) as samples
-)
-select dd, samples from tt2 order by dd with fill step 1 limit 100 format Null;
+cheetah as (
+ Select flour, regulation from dissonance
+ union distinct
+ Select toDate(now())-1, ifnull((select regulation from dissonance where flour = toDate(now())-1),0) as regulation
+)
+Select flour, regulation from cheetah order by flour with fill step 1 limit 100 format Null;
drop table test;
diff --git a/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference
new file mode 100644
index 00000000000..02801a64d21
--- /dev/null
+++ b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.reference
@@ -0,0 +1,13 @@
+localhost 9000 0 0 0
+-- { echoOn }
+
+SELECT * FROM t1_local ORDER BY tc1, tc2;
+partition1 1 1
+partition2 1 2
+partition1 2 3
+partition2 2 4
+DELETE FROM t1_local ON CLUSTER test_shard_localhost WHERE tc1 = 1;
+localhost 9000 0 0 0
+SELECT * FROM t1_local ORDER BY tc1, tc2;
+partition1 2 3
+partition2 2 4
diff --git a/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql
new file mode 100644
index 00000000000..5d3da88d727
--- /dev/null
+++ b/tests/queries/0_stateless/02541_lightweight_delete_on_cluster.sql
@@ -0,0 +1,23 @@
+-- Tags: distributed, no-replicated-database
+-- Tag no-replicated-database: ON CLUSTER is not allowed
+
+SET distributed_ddl_output_mode='throw';
+
+CREATE TABLE t1_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int) ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1;
+
+INSERT INTO t1_local VALUES('partition1', 1,1);
+INSERT INTO t1_local VALUES('partition2', 1,2);
+INSERT INTO t1_local VALUES('partition1', 2,3);
+INSERT INTO t1_local VALUES('partition2', 2,4);
+
+SET allow_experimental_lightweight_delete=1;
+
+-- { echoOn }
+
+SELECT * FROM t1_local ORDER BY tc1, tc2;
+
+DELETE FROM t1_local ON CLUSTER test_shard_localhost WHERE tc1 = 1;
+
+SELECT * FROM t1_local ORDER BY tc1, tc2;
+
+-- { echoOff }
diff --git a/tests/queries/0_stateless/02542_table_function_format.reference b/tests/queries/0_stateless/02542_table_function_format.reference
new file mode 100644
index 00000000000..c8488967144
--- /dev/null
+++ b/tests/queries/0_stateless/02542_table_function_format.reference
@@ -0,0 +1,22 @@
+a Nullable(String)
+b Nullable(Int64)
+a String
+b Int64
+Hello 111
+World 123
+Hello 111
+World 123
+c1 Nullable(Int64)
+c2 Nullable(Int64)
+c3 Array(Nullable(Int64))
+c4 Array(Array(Nullable(String)))
+a1 Int32
+a2 UInt64
+a3 Array(Int32)
+a4 Array(Array(String))
+1 2 [1,2,3] [['abc'],[],['d','e']]
+1 2 [1,2,3] [['abc'],[],['d','e']]
+20210129005809043707
+123456789
+987654321
+cust_id UInt128
diff --git a/tests/queries/0_stateless/02542_table_function_format.sql b/tests/queries/0_stateless/02542_table_function_format.sql
new file mode 100644
index 00000000000..e32e9001b9f
--- /dev/null
+++ b/tests/queries/0_stateless/02542_table_function_format.sql
@@ -0,0 +1,36 @@
+desc format(JSONEachRow,
+$$
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+$$);
+
+desc format(JSONEachRow, 'a String, b Int64',
+$$
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+$$);
+
+select * from format(JSONEachRow, 'a String, b Int64',
+$$
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+{"a": "Hello", "b": 111}
+{"a": "World", "b": 123}
+$$);
+
+desc format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"');
+desc format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"');
+select * from format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"');
+select * from format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"');
+
+drop table if exists test;
+
+create table test as format(TSV, 'cust_id UInt128', '20210129005809043707\n123456789\n987654321');
+
+select * from test;
+desc table test;
+drop table test;
diff --git a/tests/queries/0_stateless/02551_obfuscator_keywords.reference b/tests/queries/0_stateless/02551_obfuscator_keywords.reference
new file mode 100644
index 00000000000..fc1d5e81bc8
--- /dev/null
+++ b/tests/queries/0_stateless/02551_obfuscator_keywords.reference
@@ -0,0 +1,3 @@
+select 1 order by 1 with fill step 1
+SELECT id, untuple(id) FROM id
+SELECT 1 IS NULL
diff --git a/tests/queries/0_stateless/02551_obfuscator_keywords.sh b/tests/queries/0_stateless/02551_obfuscator_keywords.sh
new file mode 100755
index 00000000000..a17ad670f0a
--- /dev/null
+++ b/tests/queries/0_stateless/02551_obfuscator_keywords.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+obf="$CLICKHOUSE_FORMAT --obfuscate"
+
+echo "select 1 order by 1 with fill step 1" | $obf
+echo "SELECT id, untuple(id) FROM id" | $obf
+echo "SELECT 1 IS NULL" | $obf
diff --git a/tests/queries/0_stateless/add-test b/tests/queries/0_stateless/add-test
index 39f6742f71c..e8e68cf174e 100755
--- a/tests/queries/0_stateless/add-test
+++ b/tests/queries/0_stateless/add-test
@@ -27,5 +27,12 @@ set -x
touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT}
if [[ $FILEEXT == "sh" ]] ; then
chmod +x ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT}
+ # shellcheck disable=SC2016
+ echo '#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+' >> ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.${FILEEXT}
fi
touch ${TESTS_PATH}/${NEW_TEST_NO}_${FILENAME}.reference
diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt
index 1ee45f5874d..75c693bc1a8 100644
--- a/utils/check-style/aspell-ignore/en/aspell-dict.txt
+++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt
@@ -127,8 +127,8 @@ PrettySpaceNoEscapesMonoBlock
Protobuf
ProtobufSingle
QTCreator
-QueryResultCacheHits
-QueryResultCacheMisses
+QueryCacheHits
+QueryCacheMisses
RBAC
RawBLOB
RedHat
diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp
index 0ea6371b49f..e82b21079fe 100644
--- a/utils/keeper-data-dumper/main.cpp
+++ b/utils/keeper-data-dumper/main.cpp
@@ -69,7 +69,8 @@ int main(int argc, char *argv[])
LOG_INFO(logger, "Last committed index: {}", last_commited_index);
- DB::KeeperLogStore changelog(argv[2], 10000000, true, settings->compress_logs);
+ DB::KeeperLogStore changelog(
+ argv[2], LogFileSettings{.force_sync = true, .compress_logs = settings->compress_logs, .rotate_interval = 10000000});
changelog.init(last_commited_index, 10000000000UL); /// collect all logs
if (changelog.size() == 0)
LOG_INFO(logger, "Changelog empty");