mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into sse2-special-build
This commit is contained in:
commit
401747eaa7
@ -15,7 +15,7 @@
|
||||
*
|
||||
* Allow to search for next character from the set of 'symbols...' in a string.
|
||||
* It is similar to 'strpbrk', 'strcspn' (and 'strchr', 'memchr' in the case of one symbol and '\0'),
|
||||
* but with the following differencies:
|
||||
* but with the following differences:
|
||||
* - works with any memory ranges, including containing zero bytes;
|
||||
* - doesn't require terminating zero byte: end of memory range is passed explicitly;
|
||||
* - if not found, returns pointer to end instead of nullptr;
|
||||
|
@ -63,7 +63,7 @@
|
||||
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
|
||||
* so we don't pay attention to using non-temporary stores.
|
||||
*
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most beneficial,
|
||||
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
|
||||
*
|
||||
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
|
||||
@ -214,4 +214,3 @@ tail:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The pcg_extras namespace contains some support code that is likley to
|
||||
* The pcg_extras namespace contains some support code that is likely to
|
||||
* be useful for a variety of RNGs, including:
|
||||
* - 128-bit int support for platforms where it isn't available natively
|
||||
* - bit twiddling operations
|
||||
|
@ -22,7 +22,7 @@
|
||||
/*
|
||||
* This code provides a a C++ class that can provide 128-bit (or higher)
|
||||
* integers. To produce 2K-bit integers, it uses two K-bit integers,
|
||||
* placed in a union that allowes the code to also see them as four K/2 bit
|
||||
* placed in a union that allows the code to also see them as four K/2 bit
|
||||
* integers (and access them either directly name, or by index).
|
||||
*
|
||||
* It may seem like we're reinventing the wheel here, because several
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit bdba298189e29995892de78dcecf64d127444e81
|
||||
Subproject commit 1be805e7cb2494aa8170015493474379b0362dfc
|
2
contrib/datasketches-cpp
vendored
2
contrib/datasketches-cpp
vendored
@ -1 +1 @@
|
||||
Subproject commit 7d73d7610db31d4e1ecde0fb3a7ee90ef371207f
|
||||
Subproject commit 7abd49bb2e72bf9a5029993d31dcb1872da88292
|
@ -54,9 +54,8 @@ set(SRCS
|
||||
add_library(cxx ${SRCS})
|
||||
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
|
||||
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>
|
||||
$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}>/src)
|
||||
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
|
||||
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
|
||||
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
|
||||
|
||||
# Enable capturing stack traces for all exceptions.
|
||||
|
@ -83,5 +83,8 @@ RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||
--yes --no-install-recommends \
|
||||
&& apt-get clean
|
||||
|
||||
# for external_symbolizer_path
|
||||
RUN ln -s /usr/bin/llvm-symbolizer-15 /usr/bin/llvm-symbolizer
|
||||
|
||||
COPY build.sh /
|
||||
CMD ["bash", "-c", "/build.sh 2>&1"]
|
||||
|
@ -31,9 +31,6 @@ ARG deb_location_url=""
|
||||
|
||||
# set non-empty single_binary_location_url to create docker image
|
||||
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
|
||||
# for example (run on aarch64 server):
|
||||
# docker build . --network host --build-arg single_binary_location_url="https://builds.clickhouse.com/master/aarch64/clickhouse" -t altinity/clickhouse-server:master-testing-arm
|
||||
# note: clickhouse-odbc-bridge is not supported there.
|
||||
ARG single_binary_location_url=""
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -37,7 +37,6 @@ if [ -n "$ERROR_LOG_PATH" ]; then ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH")";
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=format_schema_path || true)"
|
||||
|
||||
# There could be many disks declared in config
|
||||
readarray -t FILESYSTEM_CACHE_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.data_cache_path' || true)
|
||||
readarray -t DISKS_PATHS < <(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key='storage_configuration.disks.*.path' || true)
|
||||
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
@ -51,7 +50,6 @@ for dir in "$DATA_DIR" \
|
||||
"$TMP_DIR" \
|
||||
"$USER_PATH" \
|
||||
"$FORMAT_SCHEMA_PATH" \
|
||||
"${FILESYSTEM_CACHE_PATHS[@]}" \
|
||||
"${DISKS_PATHS[@]}"
|
||||
do
|
||||
# check if variable not empty
|
||||
|
@ -40,6 +40,7 @@ function configure()
|
||||
{
|
||||
# install test configs
|
||||
export USE_DATABASE_ORDINARY=1
|
||||
export EXPORT_S3_STORAGE_POLICIES=1
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# we mount tests folder from repo to /usr/share
|
||||
@ -183,11 +184,11 @@ install_packages package_folder
|
||||
configure
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateful # to have a proper environment
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
start
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
@ -200,12 +201,36 @@ start
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
|
||||
|
||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8, VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32, Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String, EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32, SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32, SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16, UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16, FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8, Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32), WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64, ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32, ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32, ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32, ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16, ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32, OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime, PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8, PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16), CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64, StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64, OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64, UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32, DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16)) ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||
|
||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||
|
||||
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||
|
||||
clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
clickhouse-client --query "SYSTEM STOP THREAD FUZZER"
|
||||
|
||||
stop
|
||||
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
configure
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml | sed "s|<disk>s3</disk>|<disk>s3</disk><disk>default</disk>|" > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
start
|
||||
|
||||
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" \
|
||||
&& echo -e 'Test script exit code\tOK' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Test script failed\tFAIL' >> /test_output/test_results.tsv
|
||||
@ -255,6 +280,14 @@ zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-serve
|
||||
# Remove file logical_errors.txt if it's empty
|
||||
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
|
||||
|
||||
# No such key errors
|
||||
zgrep -Ea "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
|
||||
&& echo -e 'S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'No lost s3 keys\tOK' >> /test_output/test_results.tsv
|
||||
|
||||
# Remove file no_such_key_errors.txt if it's empty
|
||||
[ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt
|
||||
|
||||
# Crash
|
||||
zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \
|
||||
&& echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|
@ -17,7 +17,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
shellcheck \
|
||||
yamllint \
|
||||
&& pip3 install black boto3 codespell dohq-artifactory PyGithub unidiff pylint==2.6.2 \
|
||||
&& pip3 install black==22.8.0 boto3 codespell==2.2.1 dohq-artifactory PyGithub unidiff pylint==2.6.2 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
|
@ -37,7 +37,7 @@ sudo xcode-select --install
|
||||
|
||||
``` bash
|
||||
brew update
|
||||
brew install cmake ninja libtool gettext llvm gcc binutils grep findutils
|
||||
brew install ccache cmake ninja libtool gettext llvm gcc binutils grep findutils
|
||||
```
|
||||
|
||||
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}
|
||||
|
@ -12,7 +12,7 @@ One ClickHouse server can have multiple replicated databases running and updatin
|
||||
|
||||
## Creating a Database {#creating-a-database}
|
||||
``` sql
|
||||
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
|
||||
CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') [SETTINGS ...]
|
||||
```
|
||||
|
||||
**Engine Parameters**
|
||||
@ -21,9 +21,7 @@ One ClickHouse server can have multiple replicated databases running and updatin
|
||||
- `shard_name` — Shard name. Database replicas are grouped into shards by `shard_name`.
|
||||
- `replica_name` — Replica name. Replica names must be different for all replicas of the same shard.
|
||||
|
||||
:::warning
|
||||
For [ReplicatedMergeTree](../table-engines/mergetree-family/replication.md#table_engines-replication) tables if no arguments provided, then default arguments are used: `/clickhouse/tables/{uuid}/{shard}` and `{replica}`. These can be changed in the server settings [default_replica_path](../../operations/server-configuration-parameters/settings.md#default_replica_path) and [default_replica_name](../../operations/server-configuration-parameters/settings.md#default_replica_name). Macro `{uuid}` is unfolded to table's uuid, `{shard}` and `{replica}` are unfolded to values from server config, not from database engine arguments. But in the future, it will be possible to use `shard_name` and `replica_name` of Replicated database.
|
||||
:::
|
||||
|
||||
## Specifics and Recommendations {#specifics-and-recommendations}
|
||||
|
||||
|
@ -16,12 +16,14 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
|
||||
...
|
||||
) ENGINE = EmbeddedRocksDB([ttl]) PRIMARY KEY(primary_key_name)
|
||||
) ENGINE = EmbeddedRocksDB([ttl, rocksdb_dir, read_only]) PRIMARY KEY(primary_key_name)
|
||||
```
|
||||
|
||||
Engine parameters:
|
||||
|
||||
- `ttl` - time to live for values. TTL is accepted in seconds. If TTL is 0, regular RocksDB instance is used (without TTL).
|
||||
- `rocksdb_dir` - path to the directory of an existed RocksDB or the destination path of the created RocksDB. Open the table with the specified `rocksdb_dir`.
|
||||
- `read_only` - when `read_only` is set to true, read-only mode is used. For storage with TTL, compaction will not be triggered (neither manual nor automatic), so no expired entries are removed.
|
||||
- `primary_key_name` – any column name in the column list.
|
||||
- `primary key` must be specified, it supports only one column in the primary key. The primary key will be serialized in binary as a `rocksdb key`.
|
||||
- columns other than the primary key will be serialized in binary as `rocksdb` value in corresponding order.
|
||||
|
@ -15,7 +15,7 @@ Usage examples:
|
||||
## Usage in ClickHouse Server {#usage-in-clickhouse-server}
|
||||
|
||||
``` sql
|
||||
ENGINE = GenerateRandom(random_seed, max_string_length, max_array_length)
|
||||
ENGINE = GenerateRandom([random_seed] [,max_string_length] [,max_array_length])
|
||||
```
|
||||
|
||||
The `max_array_length` and `max_string_length` parameters specify maximum length of all
|
||||
|
@ -6,19 +6,28 @@ title: "UK Property Price Paid"
|
||||
---
|
||||
|
||||
The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995.
|
||||
The size of the dataset in uncompressed form is about 4 GiB and it will take about 270 MiB in ClickHouse.
|
||||
The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse.
|
||||
|
||||
Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads <br/>
|
||||
Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads
|
||||
Description of the fields: https://www.gov.uk/guidance/about-the-price-paid-data
|
||||
|
||||
Contains HM Land Registry data © Crown copyright and database right 2021. This data is licensed under the Open Government Licence v3.0.
|
||||
|
||||
## Download the Dataset {#download-dataset}
|
||||
|
||||
Run the command:
|
||||
|
||||
```bash
|
||||
wget http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv
|
||||
```
|
||||
|
||||
Download will take about 2 minutes with good internet connection.
|
||||
|
||||
## Create the Table {#create-table}
|
||||
|
||||
```sql
|
||||
CREATE TABLE uk_price_paid
|
||||
(
|
||||
uuid UUID,
|
||||
price UInt32,
|
||||
date Date,
|
||||
postcode1 LowCardinality(String),
|
||||
@ -33,68 +42,65 @@ CREATE TABLE uk_price_paid
|
||||
town LowCardinality(String),
|
||||
district LowCardinality(String),
|
||||
county LowCardinality(String),
|
||||
category UInt8,
|
||||
category2 UInt8
|
||||
) ORDER BY (postcode1, postcode2, addr1, addr2);
|
||||
category UInt8
|
||||
) ENGINE = MergeTree ORDER BY (postcode1, postcode2, addr1, addr2);
|
||||
```
|
||||
|
||||
## Preprocess and Import Data {#preprocess-import-data}
|
||||
|
||||
In this example, we define the structure of source data from the CSV file and specify a query to preprocess the data with either `clickhouse-client` or the web based Play UI.
|
||||
We will use `clickhouse-local` tool for data preprocessing and `clickhouse-client` to upload it.
|
||||
|
||||
In this example, we define the structure of source data from the CSV file and specify a query to preprocess the data with `clickhouse-local`.
|
||||
|
||||
The preprocessing is:
|
||||
- splitting the postcode to two different columns `postcode1` and `postcode2` that are better for storage and queries;
|
||||
- splitting the postcode to two different columns `postcode1` and `postcode2` that is better for storage and queries;
|
||||
- coverting the `time` field to date as it only contains 00:00 time;
|
||||
- ignoring the [UUid](../../sql-reference/data-types/uuid.md) field because we don't need it for analysis;
|
||||
- transforming `type` and `duration` to more readable Enum fields with function [transform](../../sql-reference/functions/other-functions.md#transform);
|
||||
- transforming `is_new` and `category` fields from single-character string (`Y`/`N` and `A`/`B`) to [UInt8](../../sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64-int128-int256) field with 0 and 1.
|
||||
|
||||
Preprocessed data is piped directly to `clickhouse-client` to be inserted into ClickHouse table in streaming fashion.
|
||||
|
||||
```bash
|
||||
INSERT INTO uk_price_paid
|
||||
WITH
|
||||
splitByChar(' ', postcode) AS p
|
||||
SELECT
|
||||
replaceRegexpAll(uuid_string, '{|}','') AS uuid,
|
||||
toUInt32(price_string) AS price,
|
||||
parseDateTimeBestEffortUS(time) AS date,
|
||||
p[1] AS postcode1,
|
||||
p[2] AS postcode2,
|
||||
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
|
||||
b = 'Y' AS is_new,
|
||||
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
|
||||
addr1,
|
||||
addr2,
|
||||
street,
|
||||
locality,
|
||||
town,
|
||||
district,
|
||||
county,
|
||||
d = 'B' AS category,
|
||||
e = 'B' AS category2
|
||||
FROM url(
|
||||
'http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
|
||||
'CSV',
|
||||
'uuid_string String,
|
||||
price_string String,
|
||||
time String,
|
||||
postcode String,
|
||||
a String,
|
||||
b String,
|
||||
c String,
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street String,
|
||||
locality String,
|
||||
town String,
|
||||
district String,
|
||||
county String,
|
||||
d String,
|
||||
e String'
|
||||
)
|
||||
SETTINGS max_http_get_redirects=1;
|
||||
clickhouse-local --input-format CSV --structure '
|
||||
uuid String,
|
||||
price UInt32,
|
||||
time DateTime,
|
||||
postcode String,
|
||||
a String,
|
||||
b String,
|
||||
c String,
|
||||
addr1 String,
|
||||
addr2 String,
|
||||
street String,
|
||||
locality String,
|
||||
town String,
|
||||
district String,
|
||||
county String,
|
||||
d String,
|
||||
e String
|
||||
' --query "
|
||||
WITH splitByChar(' ', postcode) AS p
|
||||
SELECT
|
||||
price,
|
||||
toDate(time) AS date,
|
||||
p[1] AS postcode1,
|
||||
p[2] AS postcode2,
|
||||
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
|
||||
b = 'Y' AS is_new,
|
||||
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
|
||||
addr1,
|
||||
addr2,
|
||||
street,
|
||||
locality,
|
||||
town,
|
||||
district,
|
||||
county,
|
||||
d = 'B' AS category
|
||||
FROM table" --date_time_input_format best_effort < pp-complete.csv | clickhouse-client --query "INSERT INTO uk_price_paid FORMAT TSV"
|
||||
```
|
||||
|
||||
It will take about 2 minutes depending on where you are in the world, and where your ClickHouse servers are. Almost all of the time is the download time of the CSV file from the UK government server.
|
||||
It will take about 40 seconds.
|
||||
|
||||
## Validate the Data {#validate-data}
|
||||
|
||||
@ -106,13 +112,13 @@ SELECT count() FROM uk_price_paid;
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
```text
|
||||
┌──count()─┐
|
||||
│ 27450499 │
|
||||
│ 26321785 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
The size of dataset in ClickHouse is just 540 MiB, check it.
|
||||
The size of dataset in ClickHouse is just 278 MiB, check it.
|
||||
|
||||
Query:
|
||||
|
||||
@ -124,14 +130,10 @@ Result:
|
||||
|
||||
```text
|
||||
┌─formatReadableSize(total_bytes)─┐
|
||||
│ 545.04 MiB │
|
||||
│ 278.80 MiB │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
:::note
|
||||
The above size is for a replicated table, if you are using this dataset with a single instance the size will be half.
|
||||
:::
|
||||
|
||||
## Run Some Queries {#run-queries}
|
||||
|
||||
### Query 1. Average Price Per Year {#average-price}
|
||||
@ -144,7 +146,7 @@ SELECT toYear(date) AS year, round(avg(price)) AS price, bar(price, 0, 1000000,
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
```text
|
||||
┌─year─┬──price─┬─bar(round(avg(price)), 0, 1000000, 80)─┐
|
||||
│ 1995 │ 67932 │ █████▍ │
|
||||
│ 1996 │ 71505 │ █████▋ │
|
||||
|
@ -175,6 +175,10 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP
|
||||
- `br`
|
||||
- `deflate`
|
||||
- `xz`
|
||||
- `zstd`
|
||||
- `lz4`
|
||||
- `bz2`
|
||||
- `snappy`
|
||||
|
||||
To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`.
|
||||
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods.
|
||||
|
@ -94,6 +94,21 @@ It is also possible for `Flat`, `Hashed`, `ComplexKeyHashed` dictionaries to onl
|
||||
- If the source is HTTP then `update_field` will be added as a query parameter with the last update time as the parameter value.
|
||||
- If the source is Executable then `update_field` will be added as an executable script argument with the last update time as the argument value.
|
||||
- If the source is ClickHouse, MySQL, PostgreSQL, ODBC there will be an additional part of `WHERE`, where `update_field` is compared as greater or equal with the last update time.
|
||||
- Per default, this `WHERE`-condition is checked at the highest level of the SQL-Query. Alternatively, the condition can be checked in any other `WHERE`-clause within the query using the `{condition}`-keyword. Example:
|
||||
```sql
|
||||
...
|
||||
SOURCE(CLICKHOUSE(...
|
||||
update_field 'added_time'
|
||||
QUERY '
|
||||
SELECT my_arr.1 AS x, my_arr.2 AS y, creation_time
|
||||
FROM (
|
||||
SELECT arrayZip(x_arr, y_arr) AS my_arr, creation_time
|
||||
FROM dictionary_source
|
||||
WHERE {condition}
|
||||
)'
|
||||
))
|
||||
...
|
||||
```
|
||||
|
||||
If `update_field` option is set, additional option `update_lag` can be set. Value of `update_lag` option is subtracted from previous update time before request updated data.
|
||||
|
||||
|
@ -267,7 +267,7 @@ Result:
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
:::Attention
|
||||
:::note
|
||||
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday` functions described below is `Date` or `DateTime`.
|
||||
Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results.
|
||||
In case argument is out of normal range:
|
||||
|
@ -430,5 +430,119 @@ Result:
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapApply(func, map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `func` - [Lamda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map obtained from the original map by application of `func(map1[i], …, mapN[i])` for each element.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapApply((k, v) -> (k, v * 10), _map) AS r
|
||||
FROM
|
||||
(
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r─────────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key1':10,'key2':20} │
|
||||
│ {'key1':20,'key2':40} │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## mapFilter
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFilter(func, map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `func` - [Lamda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map containing only the elements in `map` for which `func(map1[i], …, mapN[i])` returns something other than 0.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapFilter((k, v) -> ((v % 2) = 0), _map) AS r
|
||||
FROM
|
||||
(
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r───────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key2':2} │
|
||||
│ {'key1':2,'key2':4} │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## mapUpdate
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapUpdate(map1, map2)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `map1` [Map](../../sql-reference/data-types/map.md).
|
||||
- `map2` [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map1 with values updated of values for the corresponding keys in map2.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map────────────────────────────┐
|
||||
│ {'key3':0,'key1':10,'key2':10} │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/tuple-map-functions/) <!--hide-->
|
||||
|
94
docs/en/sql-reference/functions/uniqtheta-functions.md
Normal file
94
docs/en/sql-reference/functions/uniqtheta-functions.md
Normal file
@ -0,0 +1,94 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/uniqtheta-functions
|
||||
---
|
||||
|
||||
# uniqTheta Functions
|
||||
|
||||
uniqTheta functions work for two uniqThetaSketch objects to do set operation calculations such as ∪ / ∩ / × (union/intersect/not), it is to return a new uniqThetaSketch object contain the result.
|
||||
|
||||
A uniqThetaSketch object is to be constructed by aggregation function uniqTheta with -State.
|
||||
|
||||
UniqThetaSketch is a data structure storage of approximate values set.
|
||||
For more information on RoaringBitmap, see: [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
## uniqThetaUnion
|
||||
|
||||
Two uniqThetaSketch objects to do union calculation(set operation ∪), the result is a new uniqThetaSketch.
|
||||
|
||||
``` sql
|
||||
uniqThetaUnion(uniqThetaSketch,uniqThetaSketch)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `uniqThetaSketch` – uniqThetaSketch object.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select finalizeAggregation(uniqThetaUnion(a, b)) as a_union_b, finalizeAggregation(a) as a_cardinality, finalizeAggregation(b) as b_cardinality
|
||||
from
|
||||
(select arrayReduce('uniqThetaState',[1,2]) as a, arrayReduce('uniqThetaState',[2,3,4]) as b );
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─a_union_b─┬─a_cardinality─┬─b_cardinality─┐
|
||||
│ 4 │ 2 │ 3 │
|
||||
└───────────┴───────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## uniqThetaIntersect
|
||||
|
||||
Two uniqThetaSketch objects to do intersect calculation(set operation ∩), the result is a new uniqThetaSketch.
|
||||
|
||||
``` sql
|
||||
uniqThetaIntersect(uniqThetaSketch,uniqThetaSketch)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `uniqThetaSketch` – uniqThetaSketch object.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select finalizeAggregation(uniqThetaIntersect(a, b)) as a_intersect_b, finalizeAggregation(a) as a_cardinality, finalizeAggregation(b) as b_cardinality
|
||||
from
|
||||
(select arrayReduce('uniqThetaState',[1,2]) as a, arrayReduce('uniqThetaState',[2,3,4]) as b );
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─a_intersect_b─┬─a_cardinality─┬─b_cardinality─┐
|
||||
│ 1 │ 2 │ 3 │
|
||||
└───────────────┴───────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## uniqThetaNot
|
||||
|
||||
Two uniqThetaSketch objects to do a_not_b calculation(set operation ×), the result is a new uniqThetaSketch.
|
||||
|
||||
``` sql
|
||||
uniqThetaNot(uniqThetaSketch,uniqThetaSketch)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `uniqThetaSketch` – uniqThetaSketch object.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
select finalizeAggregation(uniqThetaNot(a, b)) as a_not_b, finalizeAggregation(a) as a_cardinality, finalizeAggregation(b) as b_cardinality
|
||||
from
|
||||
(select arrayReduce('uniqThetaState',[2,3,4]) as a, arrayReduce('uniqThetaState',[1,2]) as b );
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─a_not_b─┬─a_cardinality─┬─b_cardinality─┐
|
||||
│ 2 │ 3 │ 2 │
|
||||
└─────────┴───────────────┴───────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [uniqThetaSketch](../../sql-reference/aggregate-functions/reference/uniqthetasketch.md#agg_function-uniqthetasketch)
|
@ -303,7 +303,7 @@ SHOW USERS
|
||||
|
||||
## SHOW ROLES
|
||||
|
||||
Returns a list of [roles](../../operations/access-rights.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role-grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||
Returns a list of [roles](../../operations/access-rights.md#role-management). To view another parameters, see system tables [system.roles](../../operations/system-tables/roles.md#system_tables-roles) and [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||
|
||||
### Syntax
|
||||
|
||||
|
@ -267,7 +267,7 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
:::Attention
|
||||
:::note
|
||||
Тип возвращаемого описанными далее функциями `toStartOf*`, `toMonday` значения - `Date` или `DateTime`.
|
||||
Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
||||
Возвращаемые значения для значений вне нормального диапазона:
|
||||
@ -277,7 +277,7 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
||||
* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`.
|
||||
:::
|
||||
|
||||
:::Attention
|
||||
:::note
|
||||
Тип возвращаемого описанными далее функциями `toStartOf*`, `toLastDayOfMonth`, `toMonday` значения - `Date` или `DateTime`.
|
||||
Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
||||
Возвращаемые значения для значений вне нормального диапазона:
|
||||
|
@ -305,7 +305,7 @@ SHOW USERS
|
||||
|
||||
## SHOW ROLES {#show-roles-statement}
|
||||
|
||||
Выводит список [ролей](../../operations/access-rights.md#role-management). Для просмотра параметров ролей, см. системные таблицы [system.roles](../../operations/system-tables/roles.md#system_tables-roles) и [system.role-grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||
Выводит список [ролей](../../operations/access-rights.md#role-management). Для просмотра параметров ролей, см. системные таблицы [system.roles](../../operations/system-tables/roles.md#system_tables-roles) и [system.role_grants](../../operations/system-tables/role-grants.md#system_tables-role_grants).
|
||||
|
||||
### Синтаксис {#show-roles-syntax}
|
||||
|
||||
|
@ -1,338 +1,297 @@
|
||||
---
|
||||
slug: /zh/development/tests
|
||||
slug: /en/development/tests
|
||||
sidebar_position: 70
|
||||
sidebar_label: Testing
|
||||
title: ClickHouse Testing
|
||||
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
---
|
||||
# ClickHouse 测试 {#clickhouse-testing}
|
||||
|
||||
## 功能测试 {#functional-tests}
|
||||
## Functional Tests
|
||||
|
||||
功能测试使用起来最简单方便. 大多数 ClickHouse 特性都可以通过功能测试进行测试, 并且对于可以通过功能测试进行测试的 ClickHouse 代码的每一个更改, 都必须使用这些特性
|
||||
Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
|
||||
|
||||
每个功能测试都会向正在运行的 ClickHouse 服务器发送一个或多个查询, 并将结果与参考进行比较.
|
||||
Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference.
|
||||
|
||||
测试位于 `查询` 目录中. 有两个子目录: `无状态` 和 `有状态`. 无状态测试在没有任何预加载测试数据的情况下运行查询 - 它们通常在测试本身内即时创建小型合成数据集. 状态测试需要来自 Yandex.Metrica 的预加载测试数据, 它对公众开放.
|
||||
Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from ClickHouse and it is available to general public.
|
||||
|
||||
每个测试可以是两种类型之一: `.sql` 和 `.sh`. `.sql` 测试是简单的 SQL 脚本, 它通过管道传输到 `clickhouse-client --multiquery --testmode`. `.sh` 测试是一个自己运行的脚本. SQL 测试通常比 `.sh` 测试更可取. 仅当您必须测试某些无法从纯 SQL 中执行的功能时才应使用 `.sh` 测试, 例如将一些输入数据传送到 `clickhouse-client` 或测试 `clickhouse-local`.
|
||||
Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`.
|
||||
|
||||
### 在本地运行测试 {#functional-test-locally}
|
||||
### Running a Test Locally {#functional-test-locally}
|
||||
|
||||
在本地启动ClickHouse服务器, 监听默认端口(9000). 例如, 要运行测试 `01428_hash_set_nan_key`, 请切换到存储库文件夹并运行以下命令:
|
||||
Start the ClickHouse server locally, listening on the default port (9000). To
|
||||
run, for example, the test `01428_hash_set_nan_key`, change to the repository
|
||||
folder and run the following command:
|
||||
|
||||
```
|
||||
PATH=$PATH:<path to clickhouse-client> tests/clickhouse-test 01428_hash_set_nan_key
|
||||
```
|
||||
|
||||
有关更多选项, 请参阅`tests/clickhouse-test --help`. 您可以简单地运行所有测试或运行由测试名称中的子字符串过滤的测试子集:`./clickhouse-test substring`. 还有并行或随机顺序运行测试的选项.
|
||||
For more options, see `tests/clickhouse-test --help`. You can simply run all tests or run subset of tests filtered by substring in test name: `./clickhouse-test substring`. There are also options to run tests in parallel or in randomized order.
|
||||
|
||||
### 添加新测试 {#adding-new-test}
|
||||
### Adding a New Test
|
||||
|
||||
添加新的测试, 在 `queries/0_stateless` 目录下创建 `.sql` 或 `.sh` 文件, 手动检查, 然后通过以下方式生成`.reference`文件:`clickhouse-client -n --testmode < 00000_test.sql > 00000_test.reference` 或 `./00000_test.sh > ./00000_test.reference`.
|
||||
To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client --multiquery < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`.
|
||||
|
||||
测试应仅使用(创建、删除等)`test` 数据库中假定已预先创建的表; 测试也可以使用临时表.
|
||||
Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables.
|
||||
|
||||
### 选择测试名称 {#choosing-test-name}
|
||||
### Choosing the Test Name
|
||||
|
||||
测试名称以五位数前缀开头, 后跟描述性名称, 例如 `00422_hash_function_constexpr.sql`. 要选择前缀, 请找到目录中已存在的最大前缀, 并将其加一. 在此期间, 可能会添加一些具有相同数字前缀的其他测试, 但这没关系并且不会导致任何问题, 您以后不必更改它.
|
||||
The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and does not lead to any problems, you don't have to change it later.
|
||||
|
||||
一些测试的名称中标有 `zookeeper`、`shard` 或 `long` . `zookeeper` 用于使用 ZooKeeper 的测试. `shard` 用于需要服务器监听 `127.0.0.*` 的测试; `distributed` 或 `global` 具有相同的含义. `long` 用于运行时间稍长于一秒的测试. Yo你可以分别使用 `--no-zookeeper`、`--no-shard` 和 `--no-long` 选项禁用这些测试组. 如果需要 ZooKeeper 或分布式查询,请确保为您的测试名称添加适当的前缀.
|
||||
Some tests are marked with `zookeeper`, `shard` or `long` in their names. `zookeeper` is for tests that are using ZooKeeper. `shard` is for tests that requires server to listen `127.0.0.*`; `distributed` or `global` have the same meaning. `long` is for tests that run slightly longer that one second. You can disable these groups of tests using `--no-zookeeper`, `--no-shard` and `--no-long` options, respectively. Make sure to add a proper prefix to your test name if it needs ZooKeeper or distributed queries.
|
||||
|
||||
### 检查必须发生的错误 {#checking-error-must-occur}
|
||||
### Checking for an Error that Must Occur
|
||||
|
||||
有时您想测试是否因不正确的查询而发生服务器错误. 我们支持在 SQL 测试中对此进行特殊注释, 形式如下:
|
||||
Sometimes you want to test that a server error occurs for an incorrect query. We support special annotations for this in SQL tests, in the following form:
|
||||
```
|
||||
select x; -- { serverError 49 }
|
||||
```
|
||||
此测试确保服务器返回关于未知列“x”的错误代码为 49. 如果没有错误, 或者错误不同, 则测试失败. 如果您想确保错误发生在客户端, 请改用 `clientError` 注释.
|
||||
This test ensures that the server returns an error with code 49 about unknown column `x`. If there is no error, or the error is different, the test will fail. If you want to ensure that an error occurs on the client side, use `clientError` annotation instead.
|
||||
|
||||
不要检查错误消息的特定措辞, 它将来可能会发生变化, 并且测试将不必要地中断. 只检查错误代码. 如果现有的错误代码不足以满足您的需求, 请考虑添加一个新的.
|
||||
Do not check for a particular wording of error message, it may change in the future, and the test will needlessly break. Check only the error code. If the existing error code is not precise enough for your needs, consider adding a new one.
|
||||
|
||||
### 测试分布式查询 {#testing-distributed-query}
|
||||
### Testing a Distributed Query
|
||||
|
||||
如果你想在功能测试中使用分布式查询, 你可以使用 `127.0.0.{1..2}` 的地址, 以便服务器查询自己; 或者您可以在服务器配置文件中使用预定义的测试集群, 例如`test_shard_localhost`. 请记住在测试名称中添加 `shard` 或 `distributed` 字样, 以便它以正确的配置在 CI 中运行, 其中服务器配置为支持分布式查询.
|
||||
If you want to use distributed queries in functional tests, you can leverage `remote` table function with `127.0.0.{1..2}` addresses for the server to query itself; or you can use predefined test clusters in server configuration file like `test_shard_localhost`. Remember to add the words `shard` or `distributed` to the test name, so that it is run in CI in correct configurations, where the server is configured to support distributed queries.
|
||||
|
||||
|
||||
## 已知错误 {#known-bugs}
|
||||
## Known Bugs {#known-bugs}
|
||||
|
||||
如果我们知道一些可以通过功能测试轻松重现的错误, 我们将准备好的功能测试放在 `tests/queries/bugs` 目录中. 修复错误后, 这些测试将移至 `tests/queries/0_stateless` .
|
||||
If we know some bugs that can be easily reproduced by functional tests, we place prepared functional tests in `tests/queries/bugs` directory. These tests will be moved to `tests/queries/0_stateless` when bugs are fixed.
|
||||
|
||||
## 集成测试 {#integration-tests}
|
||||
## Integration Tests {#integration-tests}
|
||||
|
||||
集成测试允许在集群配置中测试 ClickHouse 以及 ClickHouse 与其他服务器(如 MySQL、Postgres、MongoDB)的交互. 它们可以用来模拟网络分裂、丢包等情况. 这些测试在Docker下运行, 并使用各种软件创建多个容器.
|
||||
Integration tests allow testing ClickHouse in clustered configuration and ClickHouse interaction with other servers like MySQL, Postgres, MongoDB. They are useful to emulate network splits, packet drops, etc. These tests are run under Docker and create multiple containers with various software.
|
||||
|
||||
有关如何运行这些测试, 请参阅 `tests/integration/README.md` .
|
||||
See `tests/integration/README.md` on how to run these tests.
|
||||
|
||||
注意, ClickHouse与第三方驱动程序的集成没有经过测试. 另外, 我们目前还没有JDBC和ODBC驱动程序的集成测试.
|
||||
Note that integration of ClickHouse with third-party drivers is not tested. Also, we currently do not have integration tests with our JDBC and ODBC drivers.
|
||||
|
||||
## 单元测试 {#unit-tests}
|
||||
## Unit Tests {#unit-tests}
|
||||
|
||||
当您想测试的不是 ClickHouse 整体, 而是单个独立库或类时,单元测试很有用. 您可以使用 `ENABLE_TESTS` CMake 选项启用或禁用测试构建. 单元测试(和其他测试程序)位于代码中的 `tests` 子目录中. 要运行单元测试, 请键入 `ninja test` 。有些测试使用 `gtest` , 但有些程序在测试失败时会返回非零退出码.
|
||||
Unit tests are useful when you want to test not the ClickHouse as a whole, but a single isolated library or class. You can enable or disable build of tests with `ENABLE_TESTS` CMake option. Unit tests (and other test programs) are located in `tests` subdirectories across the code. To run unit tests, type `ninja test`. Some tests use `gtest`, but some are just programs that return non-zero exit code on test failure.
|
||||
|
||||
如果代码已经被功能测试覆盖了, 就没有必要进行单元测试(而且功能测试通常更易于使用).
|
||||
It’s not necessary to have unit tests if the code is already covered by functional tests (and functional tests are usually much more simple to use).
|
||||
|
||||
例如, 您可以通过直接调用可执行文件来运行单独的 gtest 检查:
|
||||
You can run individual gtest checks by calling the executable directly, for example:
|
||||
|
||||
```bash
|
||||
$ ./src/unit_tests_dbms --gtest_filter=LocalAddress*
|
||||
```
|
||||
|
||||
## 性能测试 {#performance-tests}
|
||||
## Performance Tests {#performance-tests}
|
||||
|
||||
性能测试允许测量和比较 ClickHouse 的某些孤立部分在合成查询上的性能. 测试位于 `tests/performance`. 每个测试都由带有测试用例描述的 `.xml` 文件表示. 测试使用 `docker/tests/performance-comparison` 工具运行. 请参阅自述文件以进行调用.
|
||||
Performance tests allow to measure and compare performance of some isolated part of ClickHouse on synthetic queries. Performance tests are located at `tests/performance/`. Each test is represented by an `.xml` file with a description of the test case. Tests are run with `docker/test/performance-comparison` tool . See the readme file for invocation.
|
||||
|
||||
每个测试在循环中运行一个或多个查询(可能带有参数组合). 一些测试可以包含预加载测试数据集的先决条件.
|
||||
Each test run one or multiple queries (possibly with combinations of parameters) in a loop.
|
||||
|
||||
如果您希望在某些场景中提高ClickHouse的性能,并且如果可以在简单的查询中观察到改进,那么强烈建议编写性能测试。在测试期间使用 `perf top` 或其他perf工具总是有意义的.
|
||||
If you want to improve performance of ClickHouse in some scenario, and if improvements can be observed on simple queries, it is highly recommended to write a performance test. Also, it is recommended to write performance tests when you add or modify SQL functions which are relatively isolated and not too obscure. It always makes sense to use `perf top` or other `perf` tools during your tests.
|
||||
|
||||
## 测试工具和脚本 {#test-tools-and-scripts}
|
||||
## Test Tools and Scripts {#test-tools-and-scripts}
|
||||
|
||||
`tests` 目录中的一些程序不是准备好的测试,而是测试工具. 例如, 对于 `Lexer`, 有一个工具 `src/Parsers/tests/lexer` , 它只是对标准输入进行标记化并将着色结果写入标准输出. 您可以将这些类型的工具用作代码示例以及用于探索和手动测试.
|
||||
Some programs in `tests` directory are not prepared tests, but are test tools. For example, for `Lexer` there is a tool `src/Parsers/tests/lexer` that just do tokenization of stdin and writes colorized result to stdout. You can use these kind of tools as a code examples and for exploration and manual testing.
|
||||
|
||||
## 其他测试 {#miscellaneous-tests}
|
||||
## Miscellaneous Tests {#miscellaneous-tests}
|
||||
|
||||
在 `tests/external_models` 中有机器学习模型的测试. 这些测试不会更新, 必须转移到集成测试.
|
||||
There are tests for machine learned models in `tests/external_models`. These tests are not updated and must be transferred to integration tests.
|
||||
|
||||
仲裁插入有单独的测试. 该测试在不同的服务器上运行 ClickHouse 集群并模拟各种故障情况:网络分裂、丢包(ClickHouse 节点之间、ClickHouse 和 ZooKeeper 之间、ClickHouse 服务器和客户端之间等)、`kill -9`、`kill -STOP` 和 `kill -CONT` , 比如 [Jepsen](https://aphyr.com/tags/Jepsen). 然后测试检查所有已确认的插入是否已写入并且所有被拒绝的插入均未写入.
|
||||
There is separate test for quorum inserts. This test run ClickHouse cluster on separate servers and emulate various failure cases: network split, packet drop (between ClickHouse nodes, between ClickHouse and ZooKeeper, between ClickHouse server and client, etc.), `kill -9`, `kill -STOP` and `kill -CONT` , like [Jepsen](https://aphyr.com/tags/Jepsen). Then the test checks that all acknowledged inserts was written and all rejected inserts was not.
|
||||
|
||||
在 ClickHouse 开源之前, Quorum 测试是由单独的团队编写的. 这个团队不再与ClickHouse合作. 测试碰巧是用Java编写的. 由于这些原因, 必须重写仲裁测试并将其转移到集成测试.
|
||||
Quorum test was written by separate team before ClickHouse was open-sourced. This team no longer work with ClickHouse. Test was accidentally written in Java. For these reasons, quorum test must be rewritten and moved to integration tests.
|
||||
|
||||
## 手动测试 {#manual-testing}
|
||||
## Manual Testing {#manual-testing}
|
||||
|
||||
当您开发一个新特性时, 手动测试它也是合理的. 您可以按照以下步骤进行操作:
|
||||
When you develop a new feature, it is reasonable to also test it manually. You can do it with the following steps:
|
||||
|
||||
构建 ClickHouse. 从终端运行 ClickHouse:将目录更改为 `programs/clickhouse-server` 并使用 `./clickhouse-server` 运行它. 默认情况下, 它将使用当前目录中的配置(`config.xml`、`users.xml` 和`config.d` 和`users.d` 目录中的文件). 要连接到 ClickHouse 服务器, 请运行 `programs/clickhouse-client/clickhouse-client` .
|
||||
Build ClickHouse. Run ClickHouse from the terminal: change directory to `programs/clickhouse-server` and run it with `./clickhouse-server`. It will use configuration (`config.xml`, `users.xml` and files within `config.d` and `users.d` directories) from the current directory by default. To connect to ClickHouse server, run `programs/clickhouse-client/clickhouse-client`.
|
||||
|
||||
请注意, 所有 clickhouse 工具(服务器、客户端等)都只是指向名为 `clickhouse` 的单个二进制文件的符号链接. 你可以在 `programs/clickhouse` 找到这个二进制文件. 所有工具也可以作为 `clickhouse tool` 而不是 `clickhouse-tool` 调用.
|
||||
Note that all clickhouse tools (server, client, etc) are just symlinks to a single binary named `clickhouse`. You can find this binary at `programs/clickhouse`. All tools can also be invoked as `clickhouse tool` instead of `clickhouse-tool`.
|
||||
|
||||
或者, 您可以安装 ClickHouse 包: 从 Yandex 存储库稳定发布, 或者您可以在 ClickHouse 源根目录中使用 `./release` 为自己构建包. 然后使用 `sudo service clickhouse-server start` 启动服务器(或停止以停止服务器). 在 `/etc/clickhouse-server/clickhouse-server.log` 中查找日志.
|
||||
Alternatively you can install ClickHouse package: either stable release from ClickHouse repository or you can build package for yourself with `./release` in ClickHouse sources root. Then start the server with `sudo clickhouse start` (or stop to stop the server). Look for logs at `/etc/clickhouse-server/clickhouse-server.log`.
|
||||
|
||||
当您的系统上已经安装了 ClickHouse 时,您可以构建一个新的 `clickhouse` 二进制文件并替换现有的二进制文件:
|
||||
When ClickHouse is already installed on your system, you can build a new `clickhouse` binary and replace the existing binary:
|
||||
|
||||
``` bash
|
||||
$ sudo service clickhouse-server stop
|
||||
$ sudo clickhouse stop
|
||||
$ sudo cp ./clickhouse /usr/bin/
|
||||
$ sudo service clickhouse-server start
|
||||
$ sudo clickhouse start
|
||||
```
|
||||
|
||||
您也可以停止系统 clickhouse-server 并使用相同的配置运行您自己的服务器, 但登录到终端:
|
||||
Also you can stop system clickhouse-server and run your own with the same configuration but with logging to terminal:
|
||||
|
||||
``` bash
|
||||
$ sudo service clickhouse-server stop
|
||||
$ sudo clickhouse stop
|
||||
$ sudo -u clickhouse /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml
|
||||
```
|
||||
|
||||
使用 gdb 的示例:
|
||||
Example with gdb:
|
||||
|
||||
``` bash
|
||||
$ sudo -u clickhouse gdb --args /usr/bin/clickhouse server --config-file /etc/clickhouse-server/config.xml
|
||||
```
|
||||
|
||||
如果系统 clickhouse-server 已经在运行并且你不想停止它, 你可以在你的 `config.xml` 中更改端口号(或在 `config.d` 目录中的文件中覆盖它们), 提供适当的数据路径, 并运行它.
|
||||
If the system clickhouse-server is already running and you do not want to stop it, you can change port numbers in your `config.xml` (or override them in a file in `config.d` directory), provide appropriate data path, and run it.
|
||||
|
||||
`clickhouse` 二进制文件几乎没有依赖关系, 可以在广泛的 Linux 发行版中使用. 要在服务器上快速而肮脏地测试您的更改, 您可以简单地将新构建的 `clickhouse` 二进制文件 `scp` 到您的服务器, 然后按照上面的示例运行它.
|
||||
`clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above.
|
||||
|
||||
## 测试环境 {#testing-environment}
|
||||
## Build Tests {#build-tests}
|
||||
|
||||
在发布稳定版之前, 我们将其部署在测试环境中.测试环境是一个集群,处理 [Yandex.Metrica](https://metrica.yandex.com/) 数据的 1/39 部分. 我们与 Yandex.Metrica 团队共享我们的测试环境. ClickHouse无需在现有数据上停机即可升级. 我们首先看到的是, 数据被成功地处理了, 没有滞后于实时, 复制继续工作, Yandex.Metrica 团队没有发现任何问题. 第一次检查可以通过以下方式进行:
|
||||
Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. These tests are automated as well.
|
||||
|
||||
``` sql
|
||||
SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h;
|
||||
```
|
||||
Examples:
|
||||
- cross-compile for Darwin x86_64 (Mac OS X)
|
||||
- cross-compile for FreeBSD x86_64
|
||||
- cross-compile for Linux AArch64
|
||||
- build on Ubuntu with libraries from system packages (discouraged)
|
||||
- build with shared linking of libraries (discouraged)
|
||||
|
||||
在某些情况下, 我们还会部署到 Yandex 中我们朋友团队的测试环境:Market、Cloud 等. 此外, 我们还有一些用于开发目的的硬件服务器.
|
||||
For example, build with system packages is bad practice, because we cannot guarantee what exact version of packages a system will have. But this is really needed by Debian maintainers. For this reason we at least have to support this variant of build. Another example: shared linking is a common source of trouble, but it is needed for some enthusiasts.
|
||||
|
||||
## 负载测试 {#load-testing}
|
||||
Though we cannot run all tests on all variant of builds, we want to check at least that various build variants are not broken. For this purpose we use build tests.
|
||||
|
||||
部署到测试环境后, 我们使用来自生产集群的查询运行负载测试. 这是手动完成的.
|
||||
We also test that there are no translation units that are too long to compile or require too much RAM.
|
||||
|
||||
确保您在生产集群上启用了 `query_log`.
|
||||
We also test that there are no too large stack frames.
|
||||
|
||||
收集一天或更长时间的查询日志:
|
||||
## Testing for Protocol Compatibility {#testing-for-protocol-compatibility}
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv
|
||||
```
|
||||
When we extend ClickHouse network protocol, we test manually that old clickhouse-client works with new clickhouse-server and new clickhouse-client works with old clickhouse-server (simply by running binaries from corresponding packages).
|
||||
|
||||
这是一个复杂的例子. `type = 2` 将过滤成功执行的查询. `query LIKE '%ym:%'` 是从 Yandex.Metrica 中选择相关查询. `is_initial_query` 是只选择客户端发起的查询, 而不是 ClickHouse 本身(作为分布式查询处理的一部分).
|
||||
We also test some cases automatically with integrational tests:
|
||||
- if data written by old version of ClickHouse can be successfully read by the new version;
|
||||
- do distributed queries work in a cluster with different ClickHouse versions.
|
||||
|
||||
`scp` 将此日志记录到您的测试集群并按如下方式运行它:
|
||||
## Help from the Compiler {#help-from-the-compiler}
|
||||
|
||||
``` bash
|
||||
$ clickhouse benchmark --concurrency 16 < queries.tsv
|
||||
```
|
||||
Main ClickHouse code (that is located in `dbms` directory) is built with `-Wall -Wextra -Werror` and with some additional enabled warnings. Although these options are not enabled for third-party libraries.
|
||||
|
||||
(可能你还想指定一个 `--user`)
|
||||
Clang has even more useful warnings - you can look for them with `-Weverything` and pick something to default build.
|
||||
|
||||
然后把它留到晚上或周末, 去休息一下.
|
||||
For production builds, clang is used, but we also test make gcc builds. For development, clang is usually more convenient to use. You can build on your own machine with debug mode (to save battery of your laptop), but please note that compiler is able to generate more warnings with `-O3` due to better control flow and inter-procedure analysis. When building with clang in debug mode, debug version of `libc++` is used that allows to catch more errors at runtime.
|
||||
|
||||
您应该检查 `clickhouse-server` 没有崩溃, 内存占用是有限的, 且性能不会随着时间的推移而降低.
|
||||
## Sanitizers {#sanitizers}
|
||||
|
||||
由于查询和环境的高度可变性, 没有记录和比较精确的查询执行时间.
|
||||
### Address sanitizer
|
||||
We run functional, integration, stress and unit tests under ASan on per-commit basis.
|
||||
|
||||
## 构建测试 {#build-tests}
|
||||
### Thread sanitizer
|
||||
We run functional, integration, stress and unit tests under TSan on per-commit basis.
|
||||
|
||||
构建测试允许检查在各种可选配置和一些外部系统上的构建是否被破坏. 这些测试也是自动化的.
|
||||
### Memory sanitizer
|
||||
We run functional, integration, stress and unit tests under MSan on per-commit basis.
|
||||
|
||||
示例:
|
||||
- Darwin x86_64 (Mac OS X) 交叉编译
|
||||
- FreeBSD x86_64 交叉编译
|
||||
- Linux AArch64 交叉编译
|
||||
- 使用系统包中的库在 Ubuntu 上构建(不鼓励)
|
||||
- 使用库的共享链接构建(不鼓励)
|
||||
|
||||
例如, 使用系统包构建是不好的做法, 因为我们无法保证系统将拥有哪个确切版本的包. 但这确实是 Debian 维护者所需要的. 出于这个原因, 我们至少必须支持这种构建变体. 另一个例子: 共享链接是一个常见的麻烦来源, 但对于一些爱好者来说是需要的.
|
||||
|
||||
虽然我们无法对所有构建变体运行所有测试, 但我们希望至少检查各种构建变体没有被破坏. 为此, 我们使用构建测试.
|
||||
|
||||
我们还测试了那些太长而无法编译或需要太多RAM的没有翻译单元.
|
||||
|
||||
我们还测试没有太大的堆栈帧.
|
||||
|
||||
## 协议兼容性测试 {#testing-for-protocol-compatibility}
|
||||
|
||||
当我们扩展 ClickHouse 网络协议时, 我们手动测试旧的 clickhouse-client 与新的 clickhouse-server 一起工作, 而新的 clickhouse-client 与旧的 clickhouse-server 一起工作(只需从相应的包中运行二进制文件).
|
||||
|
||||
我们还使用集成测试自动测试一些案例:
|
||||
- 旧版本ClickHouse写入的数据是否可以被新版本成功读取;
|
||||
- 在具有不同 ClickHouse 版本的集群中执行分布式查询.
|
||||
|
||||
## 编译器的帮助 {#help-from-the-compiler}
|
||||
|
||||
主要的 ClickHouse 代码(位于 `dbms` 目录中)是用 `-Wall -Wextra -Werror` 和一些额外的启用警告构建的. 虽然没有为第三方库启用这些选项.
|
||||
|
||||
Clang 有更多有用的警告 - 你可以用 `-Weverything` 寻找它们并选择一些东西来默认构建.
|
||||
|
||||
对于生产构建, 使用 clang, 但我们也测试 make gcc 构建. 对于开发, clang 通常使用起来更方便. 您可以使用调试模式在自己的机器上构建(以节省笔记本电脑的电池), 但请注意, 由于更好的控制流和过程间分析, 编译器能够使用 `-O3` 生成更多警告. 在调试模式下使用 clang 构建时, 使用调试版本的 `libc++` 允许在运行时捕获更多错误.
|
||||
|
||||
## 地址清理器 {#sanitizers}
|
||||
|
||||
### 地址清理器
|
||||
我们在ASan上运行功能测试、集成测试、压力测试和单元测试.
|
||||
|
||||
### 线程清理器
|
||||
我们在TSan下运行功能测试、集成测试、压力测试和单元测试.
|
||||
|
||||
### 内存清理器
|
||||
我们在MSan上运行功能测试、集成测试、压力测试和单元测试.
|
||||
|
||||
### 未定义的行为清理器
|
||||
我们在UBSan下运行功能测试、集成测试、压力测试和单元测试. 某些第三方库的代码未针对 UB 进行清理.
|
||||
### Undefined behaviour sanitizer
|
||||
We run functional, integration, stress and unit tests under UBSan on per-commit basis. The code of some third-party libraries is not sanitized for UB.
|
||||
|
||||
### Valgrind (Memcheck)
|
||||
我们曾经在 Valgrind 下通宵运行功能测试, 但不再这样做了. 这需要几个小时. 目前在`re2`库中有一个已知的误报, 见[这篇文章](https://research.swtch.com/sparse).
|
||||
We used to run functional tests under Valgrind overnight, but don't do it anymore. It takes multiple hours. Currently there is one known false positive in `re2` library, see [this article](https://research.swtch.com/sparse).
|
||||
|
||||
## 模糊测试 {#fuzzing}
|
||||
## Fuzzing {#fuzzing}
|
||||
|
||||
ClickHouse 模糊测试是使用 [libFuzzer](https://llvm.org/docs/LibFuzzer.html) 和随机 SQL 查询实现的. 所有模糊测试都应使用sanitizers(地址和未定义)进行.
|
||||
ClickHouse fuzzing is implemented both using [libFuzzer](https://llvm.org/docs/LibFuzzer.html) and random SQL queries.
|
||||
All the fuzz testing should be performed with sanitizers (Address and Undefined).
|
||||
|
||||
LibFuzzer 用于库代码的隔离模糊测试. Fuzzer 作为测试代码的一部分实现, 并具有 `_fuzzer` 名称后缀.
|
||||
Fuzzer 示例可以在 `src/Parsers/tests/lexer_fuzzer.cpp` 中找到. LibFuzzer 特定的配置、字典和语料库存储在 `tests/fuzz` 中.
|
||||
我们鼓励您为处理用户输入的每个功能编写模糊测试.
|
||||
LibFuzzer is used for isolated fuzz testing of library code. Fuzzers are implemented as part of test code and have “_fuzzer” name postfixes.
|
||||
Fuzzer example can be found at `src/Parsers/fuzzers/lexer_fuzzer.cpp`. LibFuzzer-specific configs, dictionaries and corpus are stored at `tests/fuzz`.
|
||||
We encourage you to write fuzz tests for every functionality that handles user input.
|
||||
|
||||
默认情况下不构建模糊器. 要构建模糊器, 应设置` -DENABLE_FUZZING=1` 和 `-DENABLE_TESTS=1` 选项.
|
||||
我们建议在构建模糊器时禁用 Jemalloc. 用于将 ClickHouse fuzzing 集成到 Google OSS-Fuzz 的配置可以在 `docker/fuzz` 中找到.
|
||||
Fuzzers are not built by default. To build fuzzers both `-DENABLE_FUZZING=1` and `-DENABLE_TESTS=1` options should be set.
|
||||
We recommend to disable Jemalloc while building fuzzers. Configuration used to integrate ClickHouse fuzzing to
|
||||
Google OSS-Fuzz can be found at `docker/fuzz`.
|
||||
|
||||
我们还使用简单的模糊测试来生成随机SQL查询, 并检查服务器在执行这些查询时是否会死亡.
|
||||
你可以在 `00746_sql_fuzzy.pl` 中找到它. 这个测试应该连续运行(通宵或更长时间).
|
||||
We also use simple fuzz test to generate random SQL queries and to check that the server does not die executing them.
|
||||
You can find it in `00746_sql_fuzzy.pl`. This test should be run continuously (overnight and longer).
|
||||
|
||||
我们还使用复杂的基于 AST 的查询模糊器, 它能够找到大量的极端情况. 它在查询 AST 中进行随机排列和替换. 它会记住先前测试中的 AST 节点, 以使用它们对后续测试进行模糊测试, 同时以随机顺序处理它们. 您可以在 [这篇博客文章](https://clickhouse.com/blog/en/2021/fuzzing-clickhouse/) 中了解有关此模糊器的更多信息.
|
||||
We also use sophisticated AST-based query fuzzer that is able to find huge amount of corner cases. It does random permutations and substitutions in queries AST. It remembers AST nodes from previous tests to use them for fuzzing of subsequent tests while processing them in random order. You can learn more about this fuzzer in [this blog article](https://clickhouse.com/blog/en/2021/fuzzing-clickhouse/).
|
||||
|
||||
## 压力测试 {#stress-test}
|
||||
## Stress test
|
||||
|
||||
压力测试是另一种模糊测试. 它使用单个服务器以随机顺序并行运行所有功能测试. 不检查测试结果.
|
||||
Stress tests are another case of fuzzing. It runs all functional tests in parallel in random order with a single server. Results of the tests are not checked.
|
||||
|
||||
经检查:
|
||||
- 服务器不会崩溃,不会触发调试或清理程序陷阱;
|
||||
- 没有死锁;
|
||||
- 数据库结构一致;
|
||||
- 服务器可以在测试后成功停止并重新启动,没有异常;
|
||||
It is checked that:
|
||||
- server does not crash, no debug or sanitizer traps are triggered;
|
||||
- there are no deadlocks;
|
||||
- the database structure is consistent;
|
||||
- server can successfully stop after the test and start again without exceptions.
|
||||
|
||||
有五种变体 (Debug, ASan, TSan, MSan, UBSan).
|
||||
There are five variants (Debug, ASan, TSan, MSan, UBSan).
|
||||
|
||||
## 线程模糊器 {#thread-fuzzer}
|
||||
## Thread Fuzzer
|
||||
|
||||
Thread Fuzzer(请不要与 Thread Sanitizer 混淆)是另一种允许随机化线程执行顺序的模糊测试. 它有助于找到更多特殊情况.
|
||||
Thread Fuzzer (please don't mix up with Thread Sanitizer) is another kind of fuzzing that allows to randomize thread order of execution. It helps to find even more special cases.
|
||||
|
||||
## 安全审计 {#security-audit}
|
||||
## Security Audit
|
||||
|
||||
Yandex安全团队的人员从安全的角度对ClickHouse的功能做了一些基本的概述.
|
||||
Our Security Team did some basic overview of ClickHouse capabilities from the security standpoint.
|
||||
|
||||
## 静态分析仪 {#static-analyzers}
|
||||
## Static Analyzers {#static-analyzers}
|
||||
|
||||
我们在每次提交的基础上运行 `clang-tidy`. `clang-static-analyzer` 检查也被启用. `clang-tidy` 也用于一些样式检查.
|
||||
We run `clang-tidy` on per-commit basis. `clang-static-analyzer` checks are also enabled. `clang-tidy` is also used for some style checks.
|
||||
|
||||
我们已经评估了 `clang-tidy`、`Coverity`、`cppcheck`、`PVS-Studio`、`tscancode`、`CodeQL`. 您将在 `tests/instructions/` 目录中找到使用说明. 你也可以阅读[俄文文章](https://habr.com/company/yandex/blog/342018/).
|
||||
We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`, `CodeQL`. You will find instructions for usage in `tests/instructions/` directory.
|
||||
|
||||
如果你使用 `CLion` 作为 IDE, 你可以利用一些开箱即用的 `clang-tidy` 检查
|
||||
If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box.
|
||||
|
||||
我们还使用 `shellcheck` 对shell脚本进行静态分析.
|
||||
We also use `shellcheck` for static analysis of shell scripts.
|
||||
|
||||
## 硬化 {#hardening}
|
||||
## Hardening {#hardening}
|
||||
|
||||
在调试版本中, 我们使用自定义分配器执行用户级分配的 ASLR.
|
||||
In debug build we are using custom allocator that does ASLR of user-level allocations.
|
||||
|
||||
我们还手动保护在分配后预期为只读的内存区域.
|
||||
We also manually protect memory regions that are expected to be readonly after allocation.
|
||||
|
||||
在调试构建中, 我们还需要对libc进行自定义, 以确保不会调用 "有害的" (过时的、不安全的、非线程安全的)函数.
|
||||
In debug build we also involve a customization of libc that ensures that no "harmful" (obsolete, insecure, not thread-safe) functions are called.
|
||||
|
||||
Debug 断言被广泛使用.
|
||||
Debug assertions are used extensively.
|
||||
|
||||
在调试版本中,如果抛出带有 "逻辑错误" 代码(暗示错误)的异常, 则程序会过早终止. 它允许在发布版本中使用异常, 但在调试版本中使其成为断言.
|
||||
In debug build, if exception with "logical error" code (implies a bug) is being thrown, the program is terminated prematurely. It allows to use exceptions in release build but make it an assertion in debug build.
|
||||
|
||||
jemalloc 的调试版本用于调试版本.
|
||||
libc++ 的调试版本用于调试版本.
|
||||
Debug version of jemalloc is used for debug builds.
|
||||
Debug version of libc++ is used for debug builds.
|
||||
|
||||
## 运行时完整性检查
|
||||
## Runtime Integrity Checks
|
||||
|
||||
对存储在磁盘上的数据是校验和. MergeTree 表中的数据同时以三种方式进行校验和*(压缩数据块、未压缩数据块、跨块的总校验和). 客户端和服务器之间或服务器之间通过网络传输的数据也会进行校验和. 复制确保副本上的数据位相同.
|
||||
Data stored on disk is checksummed. Data in MergeTree tables is checksummed in three ways simultaneously* (compressed data blocks, uncompressed data blocks, the total checksum across blocks). Data transferred over network between client and server or between servers is also checksummed. Replication ensures bit-identical data on replicas.
|
||||
|
||||
需要防止硬件故障(存储介质上的位腐烂、服务器上 RAM 中的位翻转、网络控制器 RAM 中的位翻转、网络交换机 RAM 中的位翻转、客户端 RAM 中的位翻转、线路上的位翻转). 请注意,比特位操作很常见, 即使对于 ECC RAM 和 TCP 校验和(如果您每天设法运行数千台处理 PB 数据的服务器, 也可能发生比特位操作. [观看视频(俄语)](https://www.youtube.com/watch?v=ooBAQIe0KlQ).
|
||||
It is required to protect from faulty hardware (bit rot on storage media, bit flips in RAM on server, bit flips in RAM of network controller, bit flips in RAM of network switch, bit flips in RAM of client, bit flips on the wire). Note that bit flips are common and likely to occur even for ECC RAM and in presence of TCP checksums (if you manage to run thousands of servers processing petabytes of data each day). [See the video (russian)](https://www.youtube.com/watch?v=ooBAQIe0KlQ).
|
||||
|
||||
ClickHouse 提供诊断功能, 可帮助运维工程师找到故障硬件.
|
||||
ClickHouse provides diagnostics that will help ops engineers to find faulty hardware.
|
||||
|
||||
\* 它并不慢.
|
||||
\* and it is not slow.
|
||||
|
||||
## 代码风格 {#code-style}
|
||||
## Code Style {#code-style}
|
||||
|
||||
[此处](style.md)描述了代码样式规则.
|
||||
Code style rules are described [here](style.md).
|
||||
|
||||
要检查一些常见的样式违规,您可以使用 `utils/check-style` 脚本.
|
||||
To check for some common style violations, you can use `utils/check-style` script.
|
||||
|
||||
要强制使用正确的代码样式, 您可以使用 `clang-format`. 文件 `.clang-format` 位于源根目录. 它大多与我们的实际代码风格相对应. 但是不建议将 `clang-format` 应用于现有文件, 因为它会使格式变得更糟. 您可以使用可以在 clang 源代码库中找到的 `clang-format-diff` 工具.
|
||||
To force proper style of your code, you can use `clang-format`. File `.clang-format` is located at the sources root. It mostly corresponding with our actual code style. But it’s not recommended to apply `clang-format` to existing files because it makes formatting worse. You can use `clang-format-diff` tool that you can find in clang source repository.
|
||||
|
||||
或者, 您可以尝试使用 `uncrustify` 工具来重新格式化您的代码. 配置位于源根目录中的 `uncrustify.cfg` 中. 它比 `clang-format` 测试更少.
|
||||
Alternatively you can try `uncrustify` tool to reformat your code. Configuration is in `uncrustify.cfg` in the sources root. It is less tested than `clang-format`.
|
||||
|
||||
`CLion` 有自己的代码格式化程序, 必须根据我们的代码风格进行调整.
|
||||
`CLion` has its own code formatter that has to be tuned for our code style.
|
||||
|
||||
我们还使用 `codespell` 来查找代码中的拼写错误.它也是自动化的.
|
||||
We also use `codespell` to find typos in code. It is automated as well.
|
||||
|
||||
## Metrica B2B 测试 {#metrica-b2b-tests}
|
||||
## Test Coverage {#test-coverage}
|
||||
|
||||
每个 ClickHouse 版本都使用 Yandex Metrica 和 AppMetrica 引擎进行测试. ClickHouse 的测试版和稳定版部署在 VM 上, 并使用 Metrica 引擎的小副本运行, 该引擎处理输入数据的固定样本. 然后将两个 Metrica 引擎实例的结果放在一起比较.
|
||||
|
||||
这些测试由单独的团队自动化. 由于移动部件数量众多, 测试在大多数情况下都因完全不相关的原因而失败, 这些原因很难弄清楚. 这些测试很可能对我们有负面价值. 尽管如此, 这些测试在数百次中被证明是有用的.
|
||||
|
||||
## 测试覆盖率 {#test-coverage}
|
||||
|
||||
我们还跟踪测试覆盖率, 但仅针对功能测试和 clickhouse-server. 它每天进行.
|
||||
We also track test coverage but only for functional tests and only for clickhouse-server. It is performed on daily basis.
|
||||
|
||||
## Tests for Tests
|
||||
|
||||
有自动检测薄片测试. 它运行所有新测试100次(用于功能测试)或10次(用于集成测试). 如果至少有一次测试失败,它就被认为是脆弱的.
|
||||
There is automated check for flaky tests. It runs all new tests 100 times (for functional tests) or 10 times (for integration tests). If at least single time the test failed, it is considered flaky.
|
||||
|
||||
## Testflows
|
||||
|
||||
[Testflows](https://testflows.com/) 是一个企业级的测试框架. Altinity 使用它进行一些测试, 我们在 CI 中运行这些测试.
|
||||
[Testflows](https://testflows.com/) is an enterprise-grade open-source testing framework, which is used to test a subset of ClickHouse.
|
||||
|
||||
## Yandex 检查 (only for Yandex employees)
|
||||
## Test Automation {#test-automation}
|
||||
|
||||
这些检查将ClickHouse代码导入到Yandex内部的单一存储库中, 所以ClickHouse代码库可以被Yandex的其他产品(YT和YDB)用作库. 请注意, clickhouse-server本身并不是由内部回购构建的, Yandex应用程序使用的是未经修改的开源构建的.
|
||||
We run tests with [GitHub Actions](https://github.com/features/actions).
|
||||
|
||||
## 测试自动化 {#test-automation}
|
||||
Build jobs and tests are run in Sandbox on per commit basis. Resulting packages and test results are published in GitHub and can be downloaded by direct links. Artifacts are stored for several months. When you send a pull request on GitHub, we tag it as “can be tested” and our CI system will build ClickHouse packages (release, debug, with address sanitizer, etc) for you.
|
||||
|
||||
我们使用 Yandex 内部 CI 和名为 "Sandbox" 的作业自动化系统运行测试.
|
||||
We do not use Travis CI due to the limit on time and computational power.
|
||||
We do not use Jenkins. It was used before and now we are happy we are not using Jenkins.
|
||||
|
||||
在每次提交的基础上, 构建作业和测试都在沙箱中运行. 生成的包和测试结果发布在GitHub上, 可以通过直接链接下载. 产物要保存几个月. 当你在GitHub上发送一个pull请求时, 我们会把它标记为 "可以测试" , 我们的CI系统会为你构建ClickHouse包(发布、调试、使用地址清理器等).
|
||||
|
||||
由于时间和计算能力的限制, 我们不使用 Travis CI.
|
||||
我们不用Jenkins. 以前用过, 现在我们很高兴不用Jenkins了.
|
||||
|
||||
[原始文章](https://clickhouse.com/docs/en/development/tests/) <!--hide-->
|
||||
[Original article](https://clickhouse.com/docs/en/development/tests/) <!--hide-->
|
||||
|
@ -55,6 +55,5 @@ ORDER BY id
|
||||
|
||||
## 参考
|
||||
|
||||
- [高效低基数类型](https://www.altinity.com/blog/2019/3/27/low-cardinality).
|
||||
- [使用低基数类型减少ClickHouse的存储成本 – 来自Instana工程师的分享](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [字符优化 (俄语视频分享)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [英语分享](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
- [字符优化 (俄语视频分享)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [英语分享](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
|
@ -121,8 +121,6 @@ ENGINE = <Engine>
|
||||
...
|
||||
```
|
||||
|
||||
如果指定了编解ec,则默认编解码器不适用。 编解码器可以组合在一个流水线中,例如, `CODEC(Delta, ZSTD)`. 要为您的项目选择最佳的编解码器组合,请通过类似于Altinity中描述的基准测试 [新编码提高ClickHouse效率](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) 文章.
|
||||
|
||||
!!! warning "警告"
|
||||
您无法使用外部实用程序解压缩ClickHouse数据库文件,如 `lz4`. 相反,使用特殊的 [ツ环板compressorョツ嘉ッツ偲](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) 实用程序。
|
||||
|
||||
|
@ -723,7 +723,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
// queries, for lack of a better solution.
|
||||
// There is also a problem that fuzzer substitutes positive Int64
|
||||
// literals or Decimal literals, which are then parsed back as
|
||||
// UInt64, and suddenly duplicate alias substitition starts or stops
|
||||
// UInt64, and suddenly duplicate alias substitution starts or stops
|
||||
// working (ASTWithAlias::formatImpl) or something like that.
|
||||
// So we compare not even the first and second formatting of the
|
||||
// query, but second and third.
|
||||
|
67
programs/disks/CommandMkDir.cpp
Normal file
67
programs/disks/CommandMkDir.cpp
Normal file
@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandMkDir : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandMkDir()
|
||||
{
|
||||
command_name = "mkdir";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "Create directory or directories recursively";
|
||||
usage = "mkdir [OPTION]... <PATH>";
|
||||
command_option_description->add_options()
|
||||
("recursive", "recursively create directories")
|
||||
;
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
{
|
||||
if (options.count("recursive"))
|
||||
config.setBool("recursive", true);
|
||||
}
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
DB::ContextMutablePtr & global_context,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
String path = command_arguments[0];
|
||||
|
||||
DiskPtr disk = global_context->getDisk(disk_name);
|
||||
|
||||
String full_path = fullPathWithValidate(disk, path);
|
||||
bool recursive = config.getBool("recursive", false);
|
||||
|
||||
if (recursive)
|
||||
disk->createDirectories(full_path);
|
||||
else
|
||||
disk->createDirectory(full_path);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandMkDir()
|
||||
{
|
||||
return std::make_unique<DB::CommandMkDir>();
|
||||
}
|
@ -63,7 +63,7 @@ void DisksApp::addOptions(
|
||||
|
||||
positional_options_description.add("command_name", 1);
|
||||
|
||||
supported_commands = {"list-disks", "list", "move", "remove", "link", "copy", "write", "read"};
|
||||
supported_commands = {"list-disks", "list", "move", "remove", "link", "copy", "write", "read", "mkdir"};
|
||||
|
||||
command_descriptions.emplace("list-disks", makeCommandListDisks());
|
||||
command_descriptions.emplace("list", makeCommandList());
|
||||
@ -73,6 +73,7 @@ void DisksApp::addOptions(
|
||||
command_descriptions.emplace("copy", makeCommandCopy());
|
||||
command_descriptions.emplace("write", makeCommandWrite());
|
||||
command_descriptions.emplace("read", makeCommandRead());
|
||||
command_descriptions.emplace("mkdir", makeCommandMkDir());
|
||||
}
|
||||
|
||||
void DisksApp::processOptions()
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "CommandLink.cpp"
|
||||
#include "CommandList.cpp"
|
||||
#include "CommandListDisks.cpp"
|
||||
#include "CommandMkDir.cpp"
|
||||
#include "CommandMove.cpp"
|
||||
#include "CommandRead.cpp"
|
||||
#include "CommandRemove.cpp"
|
||||
|
@ -65,3 +65,4 @@ std::unique_ptr <DB::ICommand> makeCommandMove();
|
||||
std::unique_ptr <DB::ICommand> makeCommandRead();
|
||||
std::unique_ptr <DB::ICommand> makeCommandRemove();
|
||||
std::unique_ptr <DB::ICommand> makeCommandWrite();
|
||||
std::unique_ptr <DB::ICommand> makeCommandMkDir();
|
||||
|
@ -67,7 +67,7 @@ Run this tool inside your git repository. It will create .tsv files that can be
|
||||
The tool can process large enough repositories in a reasonable time.
|
||||
It has been tested on:
|
||||
- ClickHouse: 31 seconds; 3 million rows;
|
||||
- LLVM: 8 minues; 62 million rows;
|
||||
- LLVM: 8 minutes; 62 million rows;
|
||||
- Linux - 12 minutes; 85 million rows;
|
||||
- Chromium - 67 minutes; 343 million rows;
|
||||
(the numbers as of Sep 2020)
|
||||
|
@ -9,6 +9,8 @@
|
||||
#include <base/StringRef.h>
|
||||
#include <theta_sketch.hpp>
|
||||
#include <theta_union.hpp>
|
||||
#include <theta_intersection.hpp>
|
||||
#include <theta_a_not_b.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -80,6 +82,58 @@ public:
|
||||
u->update(rhs.sk_union->get_result());
|
||||
}
|
||||
|
||||
void intersect(const ThetaSketchData & rhs)
|
||||
{
|
||||
datasketches::theta_union * u = getSkUnion();
|
||||
|
||||
if (sk_update)
|
||||
{
|
||||
u->update(*sk_update);
|
||||
sk_update.reset(nullptr);
|
||||
}
|
||||
|
||||
datasketches::theta_intersection theta_intersection;
|
||||
|
||||
theta_intersection.update(u->get_result());
|
||||
|
||||
if (rhs.sk_update)
|
||||
theta_intersection.update(*rhs.sk_update);
|
||||
else if (rhs.sk_union)
|
||||
theta_intersection.update(rhs.sk_union->get_result());
|
||||
|
||||
sk_union.reset(nullptr);
|
||||
u = getSkUnion();
|
||||
u->update(theta_intersection.get_result());
|
||||
}
|
||||
|
||||
void aNotB(const ThetaSketchData & rhs)
|
||||
{
|
||||
datasketches::theta_union * u = getSkUnion();
|
||||
|
||||
if (sk_update)
|
||||
{
|
||||
u->update(*sk_update);
|
||||
sk_update.reset(nullptr);
|
||||
}
|
||||
|
||||
datasketches::theta_a_not_b a_not_b;
|
||||
|
||||
if (rhs.sk_update)
|
||||
{
|
||||
datasketches::compact_theta_sketch result = a_not_b.compute(u->get_result(), *rhs.sk_update);
|
||||
sk_union.reset(nullptr);
|
||||
u = getSkUnion();
|
||||
u->update(result);
|
||||
}
|
||||
else if (rhs.sk_union)
|
||||
{
|
||||
datasketches::compact_theta_sketch result = a_not_b.compute(u->get_result(), rhs.sk_union->get_result());
|
||||
sk_union.reset(nullptr);
|
||||
u = getSkUnion();
|
||||
u->update(result);
|
||||
}
|
||||
}
|
||||
|
||||
/// You can only call for an empty object.
|
||||
void read(DB::ReadBuffer & in)
|
||||
{
|
||||
|
@ -557,7 +557,7 @@ void Connection::sendQuery(
|
||||
/// Send correct hash only for !INITIAL_QUERY, due to:
|
||||
/// - this will avoid extra protocol complexity for simplest cases
|
||||
/// - there is no need in hash for the INITIAL_QUERY anyway
|
||||
/// (since there is no secure/unsecure changes)
|
||||
/// (since there is no secure/non-secure changes)
|
||||
if (client_info && !cluster_secret.empty() && client_info->query_kind != ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
{
|
||||
#if USE_SSL
|
||||
|
@ -41,7 +41,7 @@ HedgedConnectionsFactory::HedgedConnectionsFactory(
|
||||
HedgedConnectionsFactory::~HedgedConnectionsFactory()
|
||||
{
|
||||
/// Stop anything that maybe in progress,
|
||||
/// to avoid interfer with the subsequent connections.
|
||||
/// to avoid interference with the subsequent connections.
|
||||
///
|
||||
/// I.e. some replcas may be in the establishing state,
|
||||
/// this means that hedged connection is waiting for TablesStatusResponse,
|
||||
|
@ -15,8 +15,8 @@ namespace DB
|
||||
|
||||
static void callback(void * arg, int status, int, struct hostent * host)
|
||||
{
|
||||
auto * ptr_records = reinterpret_cast<std::unordered_set<std::string>*>(arg);
|
||||
if (status == ARES_SUCCESS && host->h_aliases)
|
||||
auto * ptr_records = static_cast<std::unordered_set<std::string>*>(arg);
|
||||
if (ptr_records && status == ARES_SUCCESS)
|
||||
{
|
||||
/*
|
||||
* In some cases (e.g /etc/hosts), hostent::h_name is filled and hostent::h_aliases is empty.
|
||||
@ -28,11 +28,14 @@ namespace DB
|
||||
ptr_records->insert(ptr_record);
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
while (auto * ptr_record = host->h_aliases[i])
|
||||
if (host->h_aliases)
|
||||
{
|
||||
ptr_records->insert(ptr_record);
|
||||
i++;
|
||||
int i = 0;
|
||||
while (auto * ptr_record = host->h_aliases[i])
|
||||
{
|
||||
ptr_records->insert(ptr_record);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -52,15 +52,10 @@ void CurrentMemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded)
|
||||
if (current_thread)
|
||||
{
|
||||
Int64 will_be = current_thread->untracked_memory + size;
|
||||
Int64 limit = current_thread->untracked_memory_limit + current_thread->untracked_memory_limit_increase;
|
||||
|
||||
if (will_be > limit)
|
||||
if (will_be > current_thread->untracked_memory_limit)
|
||||
{
|
||||
/// Increase limit before track. If tracker throws out-of-limit we would be able to alloc up to untracked_memory_limit bytes
|
||||
/// more. It could be useful to enlarge Exception message in rethrow logic.
|
||||
current_thread->untracked_memory_limit_increase = current_thread->untracked_memory_limit;
|
||||
memory_tracker->allocImpl(will_be, throw_if_memory_exceeded);
|
||||
current_thread->untracked_memory_limit_increase = 0;
|
||||
current_thread->untracked_memory = 0;
|
||||
}
|
||||
else
|
||||
|
@ -64,7 +64,7 @@ struct IntervalKind
|
||||
const char * toNameOfFunctionExtractTimePart() const;
|
||||
|
||||
/// Converts the string representation of an interval kind to its IntervalKind equivalent.
|
||||
/// Returns false if the conversion unsucceeded.
|
||||
/// Returns false if the conversion did not succeed.
|
||||
/// For example, `IntervalKind::tryParseString('second', result)` returns `result` equals `IntervalKind::Kind::Second`.
|
||||
static bool tryParseString(const std::string & kind, IntervalKind::Kind & result);
|
||||
};
|
||||
|
@ -33,7 +33,7 @@ public:
|
||||
* max_protected_size shows how many of the most frequently used entries will not be evicted after a sequential scan.
|
||||
* max_protected_size == 0 means that the default protected size is equal to half of the total max size.
|
||||
*/
|
||||
/// TODO: construct from special struct with cache policy parametrs (also with max_protected_size).
|
||||
/// TODO: construct from special struct with cache policy parameters (also with max_protected_size).
|
||||
SLRUCachePolicy(size_t max_size_, size_t max_elements_size_ = 0, double size_ratio = 0.5, OnWeightLossFunction on_weight_loss_function_ = {})
|
||||
: max_protected_size(max_size_ * std::min(1.0, size_ratio))
|
||||
, max_size(max_size_)
|
||||
|
@ -31,7 +31,7 @@ inline UInt64 clock_gettime_ns_adjusted(UInt64 prev_time, clockid_t clock_type =
|
||||
}
|
||||
|
||||
/** Differs from Poco::Stopwatch only by using 'clock_gettime' instead of 'gettimeofday',
|
||||
* returns nanoseconds instead of microseconds, and also by other minor differencies.
|
||||
* returns nanoseconds instead of microseconds, and also by other minor differences.
|
||||
*/
|
||||
class Stopwatch
|
||||
{
|
||||
@ -152,4 +152,3 @@ private:
|
||||
/// Most significant bit is a lock. When it is set, compareAndRestartDeferred method will return false.
|
||||
UInt64 nanoseconds(UInt64 prev_time) const { return clock_gettime_ns_adjusted(prev_time, clock_type) & 0x7FFFFFFFFFFFFFFFULL; }
|
||||
};
|
||||
|
||||
|
@ -133,8 +133,6 @@ public:
|
||||
Int64 untracked_memory = 0;
|
||||
/// Each thread could new/delete memory in range of (-untracked_memory_limit, untracked_memory_limit) without access to common counters.
|
||||
Int64 untracked_memory_limit = 4 * 1024 * 1024;
|
||||
/// Increase limit in case of exception.
|
||||
Int64 untracked_memory_limit_increase = 0;
|
||||
|
||||
/// Statistics of read and write rows/bytes
|
||||
Progress progress_in;
|
||||
|
@ -497,7 +497,7 @@ private:
|
||||
/// last index of offsets that was not processed
|
||||
size_t last;
|
||||
|
||||
/// limit for adding to hashtable. In worst case with case insentive search, the table will be filled at most as half
|
||||
/// limit for adding to hashtable. In worst case with case insensitive search, the table will be filled at most as half
|
||||
static constexpr size_t small_limit = VolnitskyTraits::hash_size / 8;
|
||||
|
||||
public:
|
||||
|
@ -58,7 +58,7 @@ Fuzzing data consists of:
|
||||
else:
|
||||
read_key()
|
||||
if (7):
|
||||
read_nonce (simillar to read_key)
|
||||
read_nonce (similar to read_key)
|
||||
if (8):
|
||||
set current_key
|
||||
|
||||
|
@ -27,7 +27,7 @@ enum SnapshotVersion : uint8_t
|
||||
|
||||
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V5;
|
||||
|
||||
/// What is stored in binary shapsnot
|
||||
/// What is stored in binary snapshot
|
||||
struct SnapshotDeserializationResult
|
||||
{
|
||||
/// Storage
|
||||
|
@ -2192,7 +2192,7 @@ void KeeperStorage::rollbackRequest(int64_t rollback_zxid, bool allow_missing)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_FATAL(&Poco::Logger::get("KeeperStorage"), "Failed to rollback log. Terminating to avoid incosistencies");
|
||||
LOG_FATAL(&Poco::Logger::get("KeeperStorage"), "Failed to rollback log. Terminating to avoid inconsistencies");
|
||||
std::terminate();
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ public:
|
||||
/// Session was actually removed
|
||||
bool remove(int64_t session_id);
|
||||
|
||||
/// Update session expiry time (must be called on hearbeats)
|
||||
/// Update session expiry time (must be called on heartbeats)
|
||||
void addNewSessionOrUpdate(int64_t session_id, int64_t timeout_ms);
|
||||
|
||||
/// Get all expired sessions
|
||||
|
@ -1339,7 +1339,7 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint
|
||||
nuraft::async_result<bool>::handler_type when_done = [&snapshot_created] (bool & ret, nuraft::ptr<std::exception> &/*exception*/)
|
||||
{
|
||||
snapshot_created = ret;
|
||||
std::cerr << "Snapshot finised\n";
|
||||
std::cerr << "Snapshot finished\n";
|
||||
};
|
||||
|
||||
state_machine->create_snapshot(s, when_done);
|
||||
|
@ -149,7 +149,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
\
|
||||
M(UInt64, parallel_distributed_insert_select, 0, "Process distributed INSERT SELECT query in the same cluster on local tables on every shard; if set to 1 - SELECT is executed on each shard; if set to 2 - SELECT and INSERT are executed on each shard", 0) \
|
||||
M(UInt64, distributed_group_by_no_merge, 0, "If 1, Do not merge aggregation states from different servers for distributed queries (shards will process query up to the Complete stage, initiator just proxies the data from the shards). If 2 the initiator will apply ORDER BY and LIMIT stages (it is not in case when shard process query up to the Complete stage)", 0) \
|
||||
M(UInt64, distributed_push_down_limit, 1, "If 1, LIMIT will be applied on each shard separatelly. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.", 0) \
|
||||
M(UInt64, distributed_push_down_limit, 1, "If 1, LIMIT will be applied on each shard separately. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.", 0) \
|
||||
M(Bool, optimize_distributed_group_by_sharding_key, true, "Optimize GROUP BY sharding_key queries (by avoiding costly aggregation on the initiator server).", 0) \
|
||||
M(UInt64, optimize_skip_unused_shards_limit, 1000, "Limit for number of sharding key values, turns off optimize_skip_unused_shards if the limit is reached", 0) \
|
||||
M(Bool, optimize_skip_unused_shards, false, "Assumes that data is distributed by sharding_key. Optimization to skip unused shards if SELECT query filters by sharding_key.", 0) \
|
||||
@ -366,6 +366,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(UInt64, partial_merge_join_left_table_buffer_bytes, 0, "If not 0 group left table blocks in bigger ones for left-side table in partial merge join. It uses up to 2x of specified memory per joining thread.", 0) \
|
||||
M(UInt64, partial_merge_join_rows_in_right_blocks, 65536, "Split right-hand joining data in blocks of specified size. It's a portion of data indexed by min-max values and possibly unloaded on disk.", 0) \
|
||||
M(UInt64, join_on_disk_max_files_to_merge, 64, "For MergeJoin on disk set how much files it's allowed to sort simultaneously. Then this value bigger then more memory used and then less disk I/O needed. Minimum is 2.", 0) \
|
||||
M(UInt64, max_rows_in_set_to_optimize_join, 100'000, "Maximal size of the set to filter joined tables by each other row sets before joining. 0 - disable.", 0) \
|
||||
\
|
||||
M(Bool, compatibility_ignore_collation_in_create_table, true, "Compatibility ignore collation in create table", 0) \
|
||||
\
|
||||
M(String, temporary_files_codec, "LZ4", "Set compression codec for temporary files (sort and join on disk). I.e. LZ4, NONE.", 0) \
|
||||
|
@ -89,7 +89,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
|
||||
{"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}},
|
||||
{"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}},
|
||||
{"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"},
|
||||
{"use_hedged_requests", false, true, "Enable Hedged Requests feature bu default"}}},
|
||||
{"use_hedged_requests", false, true, "Enable Hedged Requests feature by default"}}},
|
||||
{"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}},
|
||||
{"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}},
|
||||
{"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"},
|
||||
|
@ -153,7 +153,7 @@ enum class HandleKafkaErrorMode
|
||||
{
|
||||
DEFAULT = 0, // Ignore errors with threshold.
|
||||
STREAM, // Put errors to stream in the virtual column named ``_error.
|
||||
/*FIXED_SYSTEM_TABLE, Put errors to in a fixed system table likey system.kafka_errors. This is not implemented now. */
|
||||
/*FIXED_SYSTEM_TABLE, Put errors to in a fixed system table likely system.kafka_errors. This is not implemented now. */
|
||||
/*CUSTOM_SYSTEM_TABLE, Put errors to in a custom system table. This is not implemented now. */
|
||||
};
|
||||
|
||||
|
@ -180,7 +180,7 @@ namespace detail
|
||||
|
||||
/** Returns array with UInt8 represent if key from in_keys array is in hierarchy of key from keys column.
|
||||
* If value in result array is 1 that means key from in_keys array is in hierarchy of key from
|
||||
* keys array with same index, 0 therwise.
|
||||
* keys array with same index, 0 otherwise.
|
||||
* For getting hierarchy implementation uses getKeysHierarchy function.
|
||||
*
|
||||
* Not: keys size must be equal to in_keys_size.
|
||||
|
@ -118,7 +118,7 @@ MongoDBDictionarySource::MongoDBDictionarySource(
|
||||
Poco::URI poco_uri(uri);
|
||||
|
||||
// Parse database from URI. This is required for correctness -- the
|
||||
// cursor is created using database name and colleciton name, so we have
|
||||
// cursor is created using database name and collection name, so we have
|
||||
// to specify them properly.
|
||||
db = poco_uri.getPath();
|
||||
// getPath() may return a leading slash, remove it.
|
||||
|
@ -244,7 +244,7 @@ void buildAttributeExpressionIfNeeded(
|
||||
root->appendChild(expression_element);
|
||||
}
|
||||
|
||||
/** Transofrms single dictionary attribute to configuration
|
||||
/** Transforms single dictionary attribute to configuration
|
||||
* third_column UInt8 DEFAULT 2 EXPRESSION rand() % 100 * 77
|
||||
* to
|
||||
* <attribute>
|
||||
|
@ -253,6 +253,13 @@ void DiskObjectStorage::removeSharedFile(const String & path, bool delete_metada
|
||||
transaction->commit();
|
||||
}
|
||||
|
||||
void DiskObjectStorage::removeSharedFiles(const RemoveBatchRequest & files, bool keep_all_batch_data, const NameSet & file_names_remove_metadata_only)
|
||||
{
|
||||
auto transaction = createObjectStorageTransaction();
|
||||
transaction->removeSharedFiles(files, keep_all_batch_data, file_names_remove_metadata_only);
|
||||
transaction->commit();
|
||||
}
|
||||
|
||||
UInt32 DiskObjectStorage::getRefCount(const String & path) const
|
||||
{
|
||||
return metadata_storage->getHardlinkCount(path);
|
||||
|
@ -92,6 +92,8 @@ public:
|
||||
|
||||
void removeSharedRecursive(const String & path, bool keep_all_batch_data, const NameSet & file_names_remove_metadata_only) override;
|
||||
|
||||
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_all_batch_data, const NameSet & file_names_remove_metadata_only) override;
|
||||
|
||||
MetadataStoragePtr getMetadataStorage() override { return metadata_storage; }
|
||||
|
||||
UInt32 getRefCount(const String & path) const override;
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -139,6 +138,87 @@ struct RemoveObjectStorageOperation final : public IDiskObjectStorageOperation
|
||||
}
|
||||
};
|
||||
|
||||
struct RemoveManyObjectStorageOperation final : public IDiskObjectStorageOperation
|
||||
{
|
||||
RemoveBatchRequest remove_paths;
|
||||
bool keep_all_batch_data;
|
||||
NameSet file_names_remove_metadata_only;
|
||||
StoredObjects objects_to_remove;
|
||||
bool remove_from_cache = false;
|
||||
|
||||
RemoveManyObjectStorageOperation(
|
||||
IObjectStorage & object_storage_,
|
||||
IMetadataStorage & metadata_storage_,
|
||||
const RemoveBatchRequest & remove_paths_,
|
||||
bool keep_all_batch_data_,
|
||||
const NameSet & file_names_remove_metadata_only_)
|
||||
: IDiskObjectStorageOperation(object_storage_, metadata_storage_)
|
||||
, remove_paths(remove_paths_)
|
||||
, keep_all_batch_data(keep_all_batch_data_)
|
||||
, file_names_remove_metadata_only(file_names_remove_metadata_only_)
|
||||
{}
|
||||
|
||||
std::string getInfoForLog() const override
|
||||
{
|
||||
return fmt::format("RemoveManyObjectStorageOperation (paths size: {}, keep all batch {}, files to keep {})", remove_paths.size(), keep_all_batch_data, fmt::join(file_names_remove_metadata_only, ", "));
|
||||
}
|
||||
|
||||
void execute(MetadataTransactionPtr tx) override
|
||||
{
|
||||
for (const auto & [path, if_exists] : remove_paths)
|
||||
{
|
||||
|
||||
if (!metadata_storage.exists(path))
|
||||
{
|
||||
if (if_exists)
|
||||
continue;
|
||||
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Metadata path '{}' doesn't exist", path);
|
||||
}
|
||||
|
||||
if (!metadata_storage.isFile(path))
|
||||
throw Exception(ErrorCodes::BAD_FILE_TYPE, "Path '{}' is not a regular file", path);
|
||||
|
||||
try
|
||||
{
|
||||
uint32_t hardlink_count = metadata_storage.getHardlinkCount(path);
|
||||
auto objects = metadata_storage.getStorageObjects(path);
|
||||
|
||||
tx->unlinkMetadata(path);
|
||||
|
||||
/// File is really redundant
|
||||
if (hardlink_count == 0 && !keep_all_batch_data && !file_names_remove_metadata_only.contains(fs::path(path).filename()))
|
||||
objects_to_remove.insert(objects_to_remove.end(), objects.begin(), objects.end());
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// If it's impossible to read meta - just remove it from FS.
|
||||
if (e.code() == ErrorCodes::UNKNOWN_FORMAT
|
||||
|| e.code() == ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF
|
||||
|| e.code() == ErrorCodes::CANNOT_READ_ALL_DATA
|
||||
|| e.code() == ErrorCodes::CANNOT_OPEN_FILE)
|
||||
{
|
||||
tx->unlinkFile(path);
|
||||
}
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void undo() override
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void finalize() override
|
||||
{
|
||||
if (!objects_to_remove.empty())
|
||||
object_storage.removeObjects(objects_to_remove);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct RemoveRecursiveObjectStorageOperation final : public IDiskObjectStorageOperation
|
||||
{
|
||||
std::string path;
|
||||
@ -480,14 +560,8 @@ void DiskObjectStorageTransaction::removeFileIfExists(const std::string & path)
|
||||
void DiskObjectStorageTransaction::removeSharedFiles(
|
||||
const RemoveBatchRequest & files, bool keep_all_batch_data, const NameSet & file_names_remove_metadata_only)
|
||||
{
|
||||
for (const auto & file : files)
|
||||
{
|
||||
bool keep_file = keep_all_batch_data || file_names_remove_metadata_only.contains(fs::path(file.path).filename());
|
||||
if (file.if_exists)
|
||||
removeSharedFileIfExists(file.path, keep_file);
|
||||
else
|
||||
removeSharedFile(file.path, keep_file);
|
||||
}
|
||||
auto operation = std::make_unique<RemoveManyObjectStorageOperation>(object_storage, metadata_storage, files, keep_all_batch_data, file_names_remove_metadata_only);
|
||||
operations_to_execute.emplace_back(std::move(operation));
|
||||
}
|
||||
|
||||
namespace
|
||||
|
@ -124,7 +124,7 @@ public:
|
||||
|
||||
virtual ~IMetadataStorage() = default;
|
||||
|
||||
/// ==== More specefic methods. Previous were almost general purpose. ====
|
||||
/// ==== More specific methods. Previous were almost general purpose. ====
|
||||
|
||||
/// Read multiple metadata files into strings and return mapping from file_path -> metadata
|
||||
virtual std::unordered_map<std::string, std::string> getSerializedMetadata(const std::vector<String> & file_paths) const = 0;
|
||||
|
@ -245,6 +245,8 @@ void S3ObjectStorage::removeObjectImpl(const StoredObject & object, bool if_exis
|
||||
auto outcome = client_ptr->DeleteObject(request);
|
||||
|
||||
throwIfUnexpectedError(outcome, if_exists);
|
||||
|
||||
LOG_TRACE(log, "Object with path {} was removed from S3", object.absolute_path);
|
||||
}
|
||||
|
||||
void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_exists)
|
||||
@ -288,6 +290,8 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e
|
||||
auto outcome = client_ptr->DeleteObjects(request);
|
||||
|
||||
throwIfUnexpectedError(outcome, if_exists);
|
||||
|
||||
LOG_TRACE(log, "Objects with paths [{}] were removed from S3", keys);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <aws/s3/model/ListObjectsV2Result.h>
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
#include <Common/MultiVersion.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -180,6 +181,7 @@ private:
|
||||
|
||||
const String version_id;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("S3ObjectStorage");
|
||||
DataSourceDescription data_source_description;
|
||||
};
|
||||
|
||||
|
@ -92,6 +92,11 @@ list (APPEND OBJECT_LIBS $<TARGET_OBJECTS:clickhouse_functions_url>)
|
||||
add_subdirectory(array)
|
||||
list (APPEND OBJECT_LIBS $<TARGET_OBJECTS:clickhouse_functions_array>)
|
||||
|
||||
if (TARGET ch_contrib::datasketches)
|
||||
add_subdirectory(UniqTheta)
|
||||
list (APPEND OBJECT_LIBS $<TARGET_OBJECTS:clickhouse_functions_uniqtheta>)
|
||||
endif()
|
||||
|
||||
add_subdirectory(JSONPath)
|
||||
list (APPEND PRIVATE_LIBS clickhouse_functions_jsonpath)
|
||||
|
||||
|
@ -134,7 +134,7 @@ using FunctionArgumentDescriptors = std::vector<FunctionArgumentDescriptor>;
|
||||
* (e.g. depending on result type or other trait).
|
||||
* First, checks that number of arguments is as expected (including optional arguments).
|
||||
* Second, checks that mandatory args present and have valid type.
|
||||
* Third, checks optional arguents types, skipping ones that are missing.
|
||||
* Third, checks optional arguments types, skipping ones that are missing.
|
||||
*
|
||||
* Please note that if you have several optional arguments, like f([a, b, c]),
|
||||
* only these calls are considered valid:
|
||||
|
9
src/Functions/UniqTheta/CMakeLists.txt
Normal file
9
src/Functions/UniqTheta/CMakeLists.txt
Normal file
@ -0,0 +1,9 @@
|
||||
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
|
||||
|
||||
add_library(clickhouse_functions_uniqtheta FunctionsUniqTheta.cpp)
|
||||
|
||||
target_link_libraries(clickhouse_functions_uniqtheta PRIVATE dbms)
|
||||
|
||||
if (TARGET ch_contrib::datasketches)
|
||||
target_link_libraries (clickhouse_functions_uniqtheta PRIVATE ch_contrib::datasketches)
|
||||
endif ()
|
68
src/Functions/UniqTheta/FunctionsUniqTheta.cpp
Normal file
68
src/Functions/UniqTheta/FunctionsUniqTheta.cpp
Normal file
@ -0,0 +1,68 @@
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
#include "FunctionsUniqTheta.h"
|
||||
|
||||
#if USE_DATASKETCHES
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(UniqTheta)
|
||||
{
|
||||
factory.registerFunction<FunctionUniqThetaIntersect>(
|
||||
{
|
||||
R"(
|
||||
Two uniqThetaSketch objects to do intersect calculation(set operation ∩), the result is a new uniqThetaSketch.
|
||||
|
||||
A uniqThetaSketch object is to be constructed by aggregation function uniqTheta with -State.
|
||||
|
||||
UniqThetaSketch is a data structure storage of approximate values set.
|
||||
For more information on RoaringBitmap, see: [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
Typical usage:
|
||||
[example:typical]
|
||||
)",
|
||||
Documentation::Examples{
|
||||
{"typical", "select finalizeAggregation(uniqThetaIntersect(arrayReduce('uniqThetaState',[1,2]), arrayReduce('uniqThetaState',[2,3,4])));"}},
|
||||
Documentation::Categories{"uniqTheta"}
|
||||
});
|
||||
|
||||
factory.registerFunction<FunctionUniqThetaUnion>(
|
||||
{
|
||||
R"(
|
||||
Two uniqThetaSketch objects to do union calculation(set operation ∪), the result is a new uniqThetaSketch.
|
||||
|
||||
A uniqThetaSketch object is to be constructed by aggregation function uniqTheta with -State.
|
||||
|
||||
UniqThetaSketch is a data structure storage of approximate values set.
|
||||
For more information on RoaringBitmap, see: [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
Typical usage:
|
||||
[example:typical]
|
||||
)",
|
||||
Documentation::Examples{
|
||||
{"typical", "select finalizeAggregation(uniqThetaUnion(arrayReduce('uniqThetaState',[1,2]), arrayReduce('uniqThetaState',[2,3,4])));"}},
|
||||
Documentation::Categories{"uniqTheta"}
|
||||
});
|
||||
factory.registerFunction<FunctionUniqThetaNot>(
|
||||
{
|
||||
R"(
|
||||
Two uniqThetaSketch objects to do a_not_b calculation(set operation ×), the result is a new uniqThetaSketch.
|
||||
|
||||
A uniqThetaSketch object is to be constructed by aggregation function uniqTheta with -State.
|
||||
|
||||
UniqThetaSketch is a data structure storage of approximate values set.
|
||||
For more information on RoaringBitmap, see: [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
Typical usage:
|
||||
[example:typical]
|
||||
)",
|
||||
Documentation::Examples{
|
||||
{"typical", "select finalizeAggregation(uniqThetaNot(arrayReduce('uniqThetaState',[1,2]), arrayReduce('uniqThetaState',[2,3,4])));"}},
|
||||
Documentation::Categories{"uniqTheta"}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
176
src/Functions/UniqTheta/FunctionsUniqTheta.h
Normal file
176
src/Functions/UniqTheta/FunctionsUniqTheta.h
Normal file
@ -0,0 +1,176 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_DATASKETCHES
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionUniq.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
struct UniqThetaIntersectImpl
|
||||
{
|
||||
static void apply(AggregateFunctionUniqThetaData & sketch_data_1, const AggregateFunctionUniqThetaData & sketch_data_2)
|
||||
{
|
||||
sketch_data_1.set.intersect(sketch_data_2.set);
|
||||
}
|
||||
};
|
||||
|
||||
struct UniqThetaUnionImpl
|
||||
{
|
||||
static void apply(AggregateFunctionUniqThetaData & sketch_data_1, const AggregateFunctionUniqThetaData & sketch_data_2)
|
||||
{
|
||||
sketch_data_1.set.merge(sketch_data_2.set);
|
||||
}
|
||||
};
|
||||
|
||||
struct UniqThetaNotImpl
|
||||
{
|
||||
static void apply(AggregateFunctionUniqThetaData & sketch_data_1, const AggregateFunctionUniqThetaData & sketch_data_2)
|
||||
{
|
||||
sketch_data_1.set.aNotB(sketch_data_2.set);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Impl, typename Name>
|
||||
class FunctionUniqTheta : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = Name::name;
|
||||
|
||||
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionUniqTheta>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isVariadic() const override { return false; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 2; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
{
|
||||
const auto * sketch_type0 = typeid_cast<const DataTypeAggregateFunction *>(arguments[0].get());
|
||||
if (!(sketch_type0 && sketch_type0->getFunctionName() == "uniqTheta"))
|
||||
throw Exception(
|
||||
"First argument for function " + getName() + " must be a uniqTheta but it has type " + arguments[0]->getName(),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
const auto * sketch_type1 = typeid_cast<const DataTypeAggregateFunction *>(arguments[1].get());
|
||||
if (!(sketch_type1 && sketch_type1->getFunctionName() == "uniqTheta"))
|
||||
throw Exception(
|
||||
"Second argument for function " + getName() + " must be a uniqTheta but it has type " + arguments[1]->getName(),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
const DataTypes & arg_data_types0 = sketch_type0->getArgumentsDataTypes();
|
||||
const DataTypes & arg_data_types1 = sketch_type1->getArgumentsDataTypes();
|
||||
|
||||
if (arg_data_types0.size() != arg_data_types1.size())
|
||||
throw Exception(
|
||||
"The nested type in uniqThetas must be the same length, but one is " + std::to_string(arg_data_types0.size())
|
||||
+ ", and the other is " + std::to_string(arg_data_types1.size()),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
size_t types_size = arg_data_types0.size();
|
||||
for (size_t i = 0; i < types_size; ++i)
|
||||
{
|
||||
if (!arg_data_types0[i]->equals(*arg_data_types1[i]))
|
||||
throw Exception(
|
||||
"The " + std::to_string(i) + "th nested type in uniqThetas must be the same, but one is " + arg_data_types0[i]->getName()
|
||||
+ ", and the other is " + arg_data_types1[i]->getName(),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
}
|
||||
|
||||
|
||||
return arguments[0];
|
||||
}
|
||||
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
{
|
||||
const ColumnAggregateFunction * column_ptrs[2];
|
||||
bool is_column_const[2];
|
||||
for (size_t i = 0; i < 2; ++i)
|
||||
{
|
||||
if (const auto * argument_column_const = typeid_cast<const ColumnConst *>(arguments[i].column.get()))
|
||||
{
|
||||
column_ptrs[i] = typeid_cast<const ColumnAggregateFunction *>(argument_column_const->getDataColumnPtr().get());
|
||||
is_column_const[i] = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
column_ptrs[i] = typeid_cast<const ColumnAggregateFunction *>(arguments[i].column.get());
|
||||
is_column_const[i] = false;
|
||||
}
|
||||
}
|
||||
|
||||
auto col_to = ColumnAggregateFunction::create(column_ptrs[0]->getAggregateFunction());
|
||||
|
||||
col_to->reserve(input_rows_count);
|
||||
|
||||
const PaddedPODArray<AggregateDataPtr> & container0 = column_ptrs[0]->getData();
|
||||
const PaddedPODArray<AggregateDataPtr> & container1 = column_ptrs[1]->getData();
|
||||
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
{
|
||||
const AggregateDataPtr data_ptr_0 = is_column_const[0] ? container0[0] : container0[i];
|
||||
const AggregateDataPtr data_ptr_1 = is_column_const[1] ? container1[0] : container1[i];
|
||||
|
||||
col_to->insertFrom(data_ptr_0);
|
||||
AggregateFunctionUniqThetaData & sketch_data_1 = *reinterpret_cast<AggregateFunctionUniqThetaData *>(col_to->getData()[i]);
|
||||
const AggregateFunctionUniqThetaData & sketch_data_2
|
||||
= *reinterpret_cast<const AggregateFunctionUniqThetaData *>(data_ptr_1);
|
||||
Impl::apply(sketch_data_1, sketch_data_2);
|
||||
}
|
||||
return col_to;
|
||||
}
|
||||
};
|
||||
|
||||
struct NameUniqThetaIntersect
|
||||
{
|
||||
static constexpr auto name = "uniqThetaIntersect";
|
||||
};
|
||||
|
||||
struct NameUniqThetaUnion
|
||||
{
|
||||
static constexpr auto name = "uniqThetaUnion";
|
||||
};
|
||||
|
||||
struct NameUniqThetaNot
|
||||
{
|
||||
static constexpr auto name = "uniqThetaNot";
|
||||
};
|
||||
|
||||
using FunctionUniqThetaIntersect = FunctionUniqTheta<UniqThetaIntersectImpl, NameUniqThetaIntersect>;
|
||||
using FunctionUniqThetaUnion = FunctionUniqTheta<UniqThetaUnionImpl, NameUniqThetaUnion>;
|
||||
using FunctionUniqThetaNot = FunctionUniqTheta<UniqThetaNotImpl, NameUniqThetaNot>;
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
@ -24,7 +24,7 @@
|
||||
# include <aws/core/utils/UUID.h>
|
||||
# include <aws/core/http/HttpClientFactory.h>
|
||||
# include <aws/s3/S3Client.h>
|
||||
# include <aws/s3/model/HeadObjectRequest.h> // Y_IGNORE
|
||||
# include <aws/s3/model/HeadObjectRequest.h>
|
||||
|
||||
# include <IO/S3/PocoHTTPClientFactory.h>
|
||||
# include <IO/S3/PocoHTTPClient.h>
|
||||
|
@ -40,7 +40,7 @@ namespace ErrorCodes
|
||||
struct WriteBufferFromS3::UploadPartTask
|
||||
{
|
||||
Aws::S3::Model::UploadPartRequest req;
|
||||
bool is_finised = false;
|
||||
bool is_finished = false;
|
||||
std::string tag;
|
||||
std::exception_ptr exception;
|
||||
};
|
||||
@ -48,7 +48,7 @@ struct WriteBufferFromS3::UploadPartTask
|
||||
struct WriteBufferFromS3::PutObjectTask
|
||||
{
|
||||
Aws::S3::Model::PutObjectRequest req;
|
||||
bool is_finised = false;
|
||||
bool is_finished = false;
|
||||
std::exception_ptr exception;
|
||||
};
|
||||
|
||||
@ -64,10 +64,10 @@ WriteBufferFromS3::WriteBufferFromS3(
|
||||
: BufferWithOwnMemory<WriteBuffer>(buffer_size_, nullptr, 0)
|
||||
, bucket(bucket_)
|
||||
, key(key_)
|
||||
, client_ptr(std::move(client_ptr_))
|
||||
, upload_part_size(s3_settings_.min_upload_part_size)
|
||||
, s3_settings(s3_settings_)
|
||||
, client_ptr(std::move(client_ptr_))
|
||||
, object_metadata(std::move(object_metadata_))
|
||||
, upload_part_size(s3_settings_.min_upload_part_size)
|
||||
, schedule(std::move(schedule_))
|
||||
, write_settings(write_settings_)
|
||||
{
|
||||
@ -218,7 +218,7 @@ void WriteBufferFromS3::writePart()
|
||||
return;
|
||||
}
|
||||
|
||||
if (part_tags.size() == S3_WARN_MAX_PARTS)
|
||||
if (TSA_SUPPRESS_WARNING_FOR_READ(part_tags).size() == S3_WARN_MAX_PARTS)
|
||||
{
|
||||
// Don't throw exception here by ourselves but leave the decision to take by S3 server.
|
||||
LOG_WARNING(log, "Maximum part number in S3 protocol has reached (too many parts). Server may not accept this whole upload.");
|
||||
@ -231,6 +231,7 @@ void WriteBufferFromS3::writePart()
|
||||
int part_number;
|
||||
{
|
||||
std::lock_guard lock(bg_tasks_mutex);
|
||||
|
||||
task = &upload_object_tasks.emplace_back();
|
||||
++num_added_bg_tasks;
|
||||
part_number = num_added_bg_tasks;
|
||||
@ -240,7 +241,7 @@ void WriteBufferFromS3::writePart()
|
||||
auto task_finish_notify = [&, task]()
|
||||
{
|
||||
std::lock_guard lock(bg_tasks_mutex);
|
||||
task->is_finised = true;
|
||||
task->is_finished = true;
|
||||
++num_finished_bg_tasks;
|
||||
|
||||
/// Notification under mutex is important here.
|
||||
@ -276,9 +277,11 @@ void WriteBufferFromS3::writePart()
|
||||
else
|
||||
{
|
||||
UploadPartTask task;
|
||||
fillUploadRequest(task.req, part_tags.size() + 1);
|
||||
auto & tags = TSA_SUPPRESS_WARNING_FOR_WRITE(part_tags); /// Suppress warning because schedule == false.
|
||||
|
||||
fillUploadRequest(task.req, tags.size() + 1);
|
||||
processUploadRequest(task);
|
||||
part_tags.push_back(task.tag);
|
||||
tags.push_back(task.tag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -302,6 +305,7 @@ void WriteBufferFromS3::processUploadRequest(UploadPartTask & task)
|
||||
if (outcome.IsSuccess())
|
||||
{
|
||||
task.tag = outcome.GetResult().GetETag();
|
||||
std::lock_guard lock(bg_tasks_mutex); /// Protect part_tags from race
|
||||
LOG_TRACE(log, "Writing part finished. Bucket: {}, Key: {}, Upload_id: {}, Etag: {}, Parts: {}", bucket, key, multipart_upload_id, task.tag, part_tags.size());
|
||||
}
|
||||
else
|
||||
@ -312,9 +316,11 @@ void WriteBufferFromS3::processUploadRequest(UploadPartTask & task)
|
||||
|
||||
void WriteBufferFromS3::completeMultipartUpload()
|
||||
{
|
||||
LOG_TRACE(log, "Completing multipart upload. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, part_tags.size());
|
||||
const auto & tags = TSA_SUPPRESS_WARNING_FOR_READ(part_tags);
|
||||
|
||||
if (part_tags.empty())
|
||||
LOG_TRACE(log, "Completing multipart upload. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, tags.size());
|
||||
|
||||
if (tags.empty())
|
||||
throw Exception("Failed to complete multipart upload. No parts have uploaded", ErrorCodes::S3_ERROR);
|
||||
|
||||
Aws::S3::Model::CompleteMultipartUploadRequest req;
|
||||
@ -323,10 +329,10 @@ void WriteBufferFromS3::completeMultipartUpload()
|
||||
req.SetUploadId(multipart_upload_id);
|
||||
|
||||
Aws::S3::Model::CompletedMultipartUpload multipart_upload;
|
||||
for (size_t i = 0; i < part_tags.size(); ++i)
|
||||
for (size_t i = 0; i < tags.size(); ++i)
|
||||
{
|
||||
Aws::S3::Model::CompletedPart part;
|
||||
multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(i + 1));
|
||||
multipart_upload.AddParts(part.WithETag(tags[i]).WithPartNumber(i + 1));
|
||||
}
|
||||
|
||||
req.SetMultipartUpload(multipart_upload);
|
||||
@ -334,12 +340,12 @@ void WriteBufferFromS3::completeMultipartUpload()
|
||||
auto outcome = client_ptr->CompleteMultipartUpload(req);
|
||||
|
||||
if (outcome.IsSuccess())
|
||||
LOG_TRACE(log, "Multipart upload has completed. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, part_tags.size());
|
||||
LOG_TRACE(log, "Multipart upload has completed. Bucket: {}, Key: {}, Upload_id: {}, Parts: {}", bucket, key, multipart_upload_id, tags.size());
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::S3_ERROR, "{} Tags:{}",
|
||||
outcome.GetError().GetMessage(),
|
||||
fmt::join(part_tags.begin(), part_tags.end(), " "));
|
||||
fmt::join(tags.begin(), tags.end(), " "));
|
||||
}
|
||||
}
|
||||
|
||||
@ -364,7 +370,7 @@ void WriteBufferFromS3::makeSinglepartUpload()
|
||||
auto task_notify_finish = [&]()
|
||||
{
|
||||
std::lock_guard lock(bg_tasks_mutex);
|
||||
put_object_task->is_finised = true;
|
||||
put_object_task->is_finished = true;
|
||||
|
||||
/// Notification under mutex is important here.
|
||||
/// Othervies, WriteBuffer could be destroyed in between
|
||||
@ -417,7 +423,7 @@ void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
|
||||
req.SetContentType("binary/octet-stream");
|
||||
}
|
||||
|
||||
void WriteBufferFromS3::processPutRequest(PutObjectTask & task)
|
||||
void WriteBufferFromS3::processPutRequest(const PutObjectTask & task)
|
||||
{
|
||||
auto outcome = client_ptr->PutObject(task.req);
|
||||
bool with_pool = static_cast<bool>(schedule);
|
||||
@ -431,23 +437,25 @@ void WriteBufferFromS3::waitForReadyBackGroundTasks()
|
||||
{
|
||||
if (schedule)
|
||||
{
|
||||
std::lock_guard lock(bg_tasks_mutex);
|
||||
std::unique_lock lock(bg_tasks_mutex);
|
||||
|
||||
/// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock
|
||||
auto & tasks = TSA_SUPPRESS_WARNING_FOR_WRITE(upload_object_tasks);
|
||||
|
||||
while (!tasks.empty() && tasks.front().is_finished)
|
||||
{
|
||||
while (!upload_object_tasks.empty() && upload_object_tasks.front().is_finised)
|
||||
auto & task = tasks.front();
|
||||
auto exception = task.exception;
|
||||
auto tag = std::move(task.tag);
|
||||
tasks.pop_front();
|
||||
|
||||
if (exception)
|
||||
{
|
||||
auto & task = upload_object_tasks.front();
|
||||
auto exception = task.exception;
|
||||
auto tag = std::move(task.tag);
|
||||
upload_object_tasks.pop_front();
|
||||
|
||||
if (exception)
|
||||
{
|
||||
waitForAllBackGroundTasks();
|
||||
std::rethrow_exception(exception);
|
||||
}
|
||||
|
||||
part_tags.push_back(tag);
|
||||
waitForAllBackGroundTasksUnlocked(lock);
|
||||
std::rethrow_exception(exception);
|
||||
}
|
||||
|
||||
TSA_SUPPRESS_WARNING_FOR_WRITE(part_tags).push_back(tag);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -457,22 +465,33 @@ void WriteBufferFromS3::waitForAllBackGroundTasks()
|
||||
if (schedule)
|
||||
{
|
||||
std::unique_lock lock(bg_tasks_mutex);
|
||||
bg_tasks_condvar.wait(lock, [this]() { return num_added_bg_tasks == num_finished_bg_tasks; });
|
||||
waitForAllBackGroundTasksUnlocked(lock);
|
||||
}
|
||||
}
|
||||
|
||||
while (!upload_object_tasks.empty())
|
||||
void WriteBufferFromS3::waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock)
|
||||
{
|
||||
if (schedule)
|
||||
{
|
||||
bg_tasks_condvar.wait(bg_tasks_lock, [this]() {return TSA_SUPPRESS_WARNING_FOR_READ(num_added_bg_tasks) == TSA_SUPPRESS_WARNING_FOR_READ(num_finished_bg_tasks); });
|
||||
|
||||
/// Suppress warnings because bg_tasks_mutex is actually hold, but tsa annotations do not understand std::unique_lock
|
||||
auto & tasks = TSA_SUPPRESS_WARNING_FOR_WRITE(upload_object_tasks);
|
||||
while (!tasks.empty())
|
||||
{
|
||||
auto & task = upload_object_tasks.front();
|
||||
auto & task = tasks.front();
|
||||
|
||||
if (task.exception)
|
||||
std::rethrow_exception(task.exception);
|
||||
|
||||
part_tags.push_back(task.tag);
|
||||
TSA_SUPPRESS_WARNING_FOR_WRITE(part_tags).push_back(task.tag);
|
||||
|
||||
upload_object_tasks.pop_front();
|
||||
tasks.pop_front();
|
||||
}
|
||||
|
||||
if (put_object_task)
|
||||
{
|
||||
bg_tasks_condvar.wait(lock, [this]() { return put_object_task->is_finised; });
|
||||
bg_tasks_condvar.wait(bg_tasks_lock, [this]() { return put_object_task->is_finished; });
|
||||
if (put_object_task->exception)
|
||||
std::rethrow_exception(put_object_task->exception);
|
||||
}
|
||||
|
@ -80,37 +80,39 @@ private:
|
||||
|
||||
struct PutObjectTask;
|
||||
void fillPutRequest(Aws::S3::Model::PutObjectRequest & req);
|
||||
void processPutRequest(PutObjectTask & task);
|
||||
void processPutRequest(const PutObjectTask & task);
|
||||
|
||||
void waitForReadyBackGroundTasks();
|
||||
void waitForAllBackGroundTasks();
|
||||
void waitForAllBackGroundTasksUnlocked(std::unique_lock<std::mutex> & bg_tasks_lock);
|
||||
|
||||
const String bucket;
|
||||
const String key;
|
||||
const S3Settings::ReadWriteSettings s3_settings;
|
||||
const std::shared_ptr<const Aws::S3::S3Client> client_ptr;
|
||||
const std::optional<std::map<String, String>> object_metadata;
|
||||
|
||||
String bucket;
|
||||
String key;
|
||||
std::shared_ptr<const Aws::S3::S3Client> client_ptr;
|
||||
size_t upload_part_size = 0;
|
||||
S3Settings::ReadWriteSettings s3_settings;
|
||||
std::optional<std::map<String, String>> object_metadata;
|
||||
|
||||
/// Buffer to accumulate data.
|
||||
std::shared_ptr<Aws::StringStream> temporary_buffer;
|
||||
std::shared_ptr<Aws::StringStream> temporary_buffer; /// Buffer to accumulate data.
|
||||
size_t last_part_size = 0;
|
||||
std::atomic<size_t> total_parts_uploaded = 0;
|
||||
|
||||
/// Upload in S3 is made in parts.
|
||||
/// We initiate upload, then upload each part and get ETag as a response, and then finalizeImpl() upload with listing all our parts.
|
||||
String multipart_upload_id;
|
||||
std::vector<String> part_tags;
|
||||
std::vector<String> TSA_GUARDED_BY(bg_tasks_mutex) part_tags;
|
||||
|
||||
bool is_prefinalized = false;
|
||||
|
||||
/// Following fields are for background uploads in thread pool (if specified).
|
||||
/// We use std::function to avoid dependency of Interpreters
|
||||
ScheduleFunc schedule;
|
||||
std::unique_ptr<PutObjectTask> put_object_task;
|
||||
std::list<UploadPartTask> upload_object_tasks;
|
||||
size_t num_added_bg_tasks = 0;
|
||||
size_t num_finished_bg_tasks = 0;
|
||||
const ScheduleFunc schedule;
|
||||
|
||||
std::unique_ptr<PutObjectTask> put_object_task; /// Does not need protection by mutex because of the logic around is_finished field.
|
||||
std::list<UploadPartTask> TSA_GUARDED_BY(bg_tasks_mutex) upload_object_tasks;
|
||||
size_t num_added_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
|
||||
size_t num_finished_bg_tasks TSA_GUARDED_BY(bg_tasks_mutex) = 0;
|
||||
|
||||
std::mutex bg_tasks_mutex;
|
||||
std::condition_variable bg_tasks_condvar;
|
||||
|
||||
|
113
src/Interpreters/AggregationUtils.cpp
Normal file
113
src/Interpreters/AggregationUtils.cpp
Normal file
@ -0,0 +1,113 @@
|
||||
#include <Interpreters/AggregationUtils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
OutputBlockColumns prepareOutputBlockColumns(
|
||||
const Aggregator::Params & params,
|
||||
const Aggregator::AggregateFunctionsPlainPtrs & aggregate_functions,
|
||||
const Block & res_header,
|
||||
Arenas & aggregates_pools,
|
||||
bool final,
|
||||
size_t rows)
|
||||
{
|
||||
MutableColumns key_columns(params.keys_size);
|
||||
MutableColumns aggregate_columns(params.aggregates_size);
|
||||
MutableColumns final_aggregate_columns(params.aggregates_size);
|
||||
Aggregator::AggregateColumnsData aggregate_columns_data(params.aggregates_size);
|
||||
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
{
|
||||
key_columns[i] = res_header.safeGetByPosition(i).type->createColumn();
|
||||
key_columns[i]->reserve(rows);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
if (!final)
|
||||
{
|
||||
const auto & aggregate_column_name = params.aggregates[i].column_name;
|
||||
aggregate_columns[i] = res_header.getByName(aggregate_column_name).type->createColumn();
|
||||
|
||||
/// The ColumnAggregateFunction column captures the shared ownership of the arena with the aggregate function states.
|
||||
ColumnAggregateFunction & column_aggregate_func = assert_cast<ColumnAggregateFunction &>(*aggregate_columns[i]);
|
||||
|
||||
for (auto & pool : aggregates_pools)
|
||||
column_aggregate_func.addArena(pool);
|
||||
|
||||
aggregate_columns_data[i] = &column_aggregate_func.getData();
|
||||
aggregate_columns_data[i]->reserve(rows);
|
||||
}
|
||||
else
|
||||
{
|
||||
final_aggregate_columns[i] = aggregate_functions[i]->getReturnType()->createColumn();
|
||||
final_aggregate_columns[i]->reserve(rows);
|
||||
|
||||
if (aggregate_functions[i]->isState())
|
||||
{
|
||||
/// The ColumnAggregateFunction column captures the shared ownership of the arena with aggregate function states.
|
||||
if (auto * column_aggregate_func = typeid_cast<ColumnAggregateFunction *>(final_aggregate_columns[i].get()))
|
||||
for (auto & pool : aggregates_pools)
|
||||
column_aggregate_func->addArena(pool);
|
||||
|
||||
/// Aggregate state can be wrapped into array if aggregate function ends with -Resample combinator.
|
||||
final_aggregate_columns[i]->forEachSubcolumn(
|
||||
[&aggregates_pools](auto & subcolumn)
|
||||
{
|
||||
if (auto * column_aggregate_func = typeid_cast<ColumnAggregateFunction *>(subcolumn.get()))
|
||||
for (auto & pool : aggregates_pools)
|
||||
column_aggregate_func->addArena(pool);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (key_columns.size() != params.keys_size)
|
||||
throw Exception{"Aggregate. Unexpected key columns size.", ErrorCodes::LOGICAL_ERROR};
|
||||
|
||||
std::vector<IColumn *> raw_key_columns;
|
||||
raw_key_columns.reserve(key_columns.size());
|
||||
for (auto & column : key_columns)
|
||||
raw_key_columns.push_back(column.get());
|
||||
|
||||
return {
|
||||
.key_columns = std::move(key_columns),
|
||||
.raw_key_columns = std::move(raw_key_columns),
|
||||
.aggregate_columns = std::move(aggregate_columns),
|
||||
.final_aggregate_columns = std::move(final_aggregate_columns),
|
||||
.aggregate_columns_data = std::move(aggregate_columns_data),
|
||||
};
|
||||
}
|
||||
|
||||
Block finalizeBlock(const Aggregator::Params & params, const Block & res_header, OutputBlockColumns && out_cols, bool final, size_t rows)
|
||||
{
|
||||
auto && [key_columns, raw_key_columns, aggregate_columns, final_aggregate_columns, aggregate_columns_data] = out_cols;
|
||||
|
||||
Block res = res_header.cloneEmpty();
|
||||
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
res.getByPosition(i).column = std::move(key_columns[i]);
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
const auto & aggregate_column_name = params.aggregates[i].column_name;
|
||||
if (final)
|
||||
res.getByName(aggregate_column_name).column = std::move(final_aggregate_columns[i]);
|
||||
else
|
||||
res.getByName(aggregate_column_name).column = std::move(aggregate_columns[i]);
|
||||
}
|
||||
|
||||
/// Change the size of the columns-constants in the block.
|
||||
size_t columns = res_header.columns();
|
||||
for (size_t i = 0; i < columns; ++i)
|
||||
if (isColumnConst(*res.getByPosition(i).column))
|
||||
res.getByPosition(i).column = res.getByPosition(i).column->cut(0, rows);
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
27
src/Interpreters/AggregationUtils.h
Normal file
27
src/Interpreters/AggregationUtils.h
Normal file
@ -0,0 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/Aggregator.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct OutputBlockColumns
|
||||
{
|
||||
MutableColumns key_columns;
|
||||
std::vector<IColumn *> raw_key_columns;
|
||||
MutableColumns aggregate_columns;
|
||||
MutableColumns final_aggregate_columns;
|
||||
Aggregator::AggregateColumnsData aggregate_columns_data;
|
||||
};
|
||||
|
||||
|
||||
OutputBlockColumns prepareOutputBlockColumns(
|
||||
const Aggregator::Params & params,
|
||||
const Aggregator::AggregateFunctionsPlainPtrs & aggregate_functions,
|
||||
const Block & res_header,
|
||||
Arenas & aggregates_pools,
|
||||
bool final,
|
||||
size_t rows);
|
||||
|
||||
Block finalizeBlock(const Aggregator::Params & params, const Block & res_header, OutputBlockColumns && out_cols, bool final, size_t rows);
|
||||
}
|
@ -34,6 +34,8 @@
|
||||
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
|
||||
#include <Interpreters/AggregationUtils.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ExternalAggregationWritePart;
|
||||
@ -1587,16 +1589,10 @@ Block Aggregator::convertOneBucketToBlock(
|
||||
bool final,
|
||||
size_t bucket) const
|
||||
{
|
||||
Block block = prepareBlockAndFill(data_variants, final, method.data.impls[bucket].size(),
|
||||
[bucket, &method, arena, this] (
|
||||
MutableColumns & key_columns,
|
||||
AggregateColumnsData & aggregate_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
bool final_)
|
||||
{
|
||||
convertToBlockImpl(method, method.data.impls[bucket],
|
||||
key_columns, aggregate_columns, final_aggregate_columns, arena, final_);
|
||||
});
|
||||
// Used in ConvertingAggregatedToChunksSource -> ConvertingAggregatedToChunksTransform (expects single chunk for each bucket_id).
|
||||
constexpr bool return_single_block = true;
|
||||
Block block = convertToBlockImpl<return_single_block>(
|
||||
method, method.data.impls[bucket], arena, data_variants.aggregates_pools, final, method.data.impls[bucket].size());
|
||||
|
||||
block.info.bucket_num = bucket;
|
||||
return block;
|
||||
@ -1702,26 +1698,17 @@ bool Aggregator::checkLimits(size_t result_size, bool & no_more_keys) const
|
||||
}
|
||||
|
||||
|
||||
template <typename Method, typename Table>
|
||||
void Aggregator::convertToBlockImpl(
|
||||
Method & method,
|
||||
Table & data,
|
||||
MutableColumns & key_columns,
|
||||
AggregateColumnsData & aggregate_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
Arena * arena,
|
||||
bool final) const
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block>
|
||||
Aggregator::convertToBlockImpl(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool final, size_t rows) const
|
||||
{
|
||||
if (data.empty())
|
||||
return;
|
||||
{
|
||||
auto && out_cols = prepareOutputBlockColumns(params, aggregate_functions, getHeader(final), aggregates_pools, final, rows);
|
||||
return {finalizeBlock(params, getHeader(final), std::move(out_cols), final, rows)};
|
||||
}
|
||||
|
||||
if (key_columns.size() != params.keys_size)
|
||||
throw Exception{"Aggregate. Unexpected key columns size.", ErrorCodes::LOGICAL_ERROR};
|
||||
|
||||
std::vector<IColumn *> raw_key_columns;
|
||||
raw_key_columns.reserve(key_columns.size());
|
||||
for (auto & column : key_columns)
|
||||
raw_key_columns.push_back(column.get());
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
|
||||
if (final)
|
||||
{
|
||||
@ -1729,20 +1716,23 @@ void Aggregator::convertToBlockImpl(
|
||||
if (compiled_aggregate_functions_holder)
|
||||
{
|
||||
static constexpr bool use_compiled_functions = !Method::low_cardinality_optimization;
|
||||
convertToBlockImplFinal<Method, use_compiled_functions>(method, data, std::move(raw_key_columns), final_aggregate_columns, arena);
|
||||
res = convertToBlockImplFinal<Method, use_compiled_functions, return_single_block>(method, data, arena, aggregates_pools, rows);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
convertToBlockImplFinal<Method, false>(method, data, std::move(raw_key_columns), final_aggregate_columns, arena);
|
||||
res = convertToBlockImplFinal<Method, false, return_single_block>(method, data, arena, aggregates_pools, rows);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
convertToBlockImplNotFinal(method, data, std::move(raw_key_columns), aggregate_columns);
|
||||
res = convertToBlockImplNotFinal<return_single_block>(method, data, aggregates_pools, rows);
|
||||
}
|
||||
|
||||
/// In order to release memory early.
|
||||
data.clearAndShrink();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@ -1811,38 +1801,9 @@ inline void Aggregator::insertAggregatesIntoColumns(Mapped & mapped, MutableColu
|
||||
}
|
||||
|
||||
|
||||
template <typename Method, bool use_compiled_functions, typename Table>
|
||||
void NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
Method & method,
|
||||
Table & data,
|
||||
std::vector<IColumn *> key_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
Arena * arena) const
|
||||
template <bool use_compiled_functions>
|
||||
Block Aggregator::insertResultsIntoColumns(PaddedPODArray<AggregateDataPtr> & places, OutputBlockColumns && out_cols, Arena * arena) const
|
||||
{
|
||||
if constexpr (Method::low_cardinality_optimization)
|
||||
{
|
||||
if (data.hasNullKeyData())
|
||||
{
|
||||
key_columns[0]->insertDefault();
|
||||
insertAggregatesIntoColumns(data.getNullKeyData(), final_aggregate_columns, arena);
|
||||
}
|
||||
}
|
||||
|
||||
auto shuffled_key_sizes = method.shuffleKeyColumns(key_columns, key_sizes);
|
||||
const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes;
|
||||
|
||||
PaddedPODArray<AggregateDataPtr> places;
|
||||
places.reserve(data.size());
|
||||
|
||||
data.forEachValue([&](const auto & key, auto & mapped)
|
||||
{
|
||||
method.insertKeyIntoColumns(key, key_columns, key_sizes_ref);
|
||||
places.emplace_back(mapped);
|
||||
|
||||
/// Mark the cell as destroyed so it will not be destroyed in destructor.
|
||||
mapped = nullptr;
|
||||
});
|
||||
|
||||
std::exception_ptr exception;
|
||||
size_t aggregate_functions_destroy_index = 0;
|
||||
|
||||
@ -1863,7 +1824,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
if (!is_aggregate_function_compiled[i])
|
||||
continue;
|
||||
|
||||
auto & final_aggregate_column = final_aggregate_columns[i];
|
||||
auto & final_aggregate_column = out_cols.final_aggregate_columns[i];
|
||||
final_aggregate_column = final_aggregate_column->cloneResized(places.size());
|
||||
columns_data.emplace_back(getColumnData(final_aggregate_column.get()));
|
||||
}
|
||||
@ -1884,7 +1845,7 @@ void NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
}
|
||||
}
|
||||
|
||||
auto & final_aggregate_column = final_aggregate_columns[aggregate_functions_destroy_index];
|
||||
auto & final_aggregate_column = out_cols.final_aggregate_columns[aggregate_functions_destroy_index];
|
||||
size_t offset = offsets_of_aggregate_states[aggregate_functions_destroy_index];
|
||||
|
||||
/** We increase aggregate_functions_destroy_index because by function contract if insertResultIntoBatch
|
||||
@ -1898,7 +1859,8 @@ void NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
bool is_state = aggregate_functions[destroy_index]->isState();
|
||||
bool destroy_place_after_insert = !is_state;
|
||||
|
||||
aggregate_functions[destroy_index]->insertResultIntoBatch(0, places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert);
|
||||
aggregate_functions[destroy_index]->insertResultIntoBatch(
|
||||
0, places.size(), places.data(), offset, *final_aggregate_column, arena, destroy_place_after_insert);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -1923,125 +1885,155 @@ void NO_INLINE Aggregator::convertToBlockImplFinal(
|
||||
|
||||
if (exception)
|
||||
std::rethrow_exception(exception);
|
||||
|
||||
return finalizeBlock(params, getHeader(/* final */ true), std::move(out_cols), /* final */ true, places.size());
|
||||
}
|
||||
|
||||
template <typename Method, typename Table>
|
||||
void NO_INLINE Aggregator::convertToBlockImplNotFinal(
|
||||
Method & method,
|
||||
Table & data,
|
||||
std::vector<IColumn *> key_columns,
|
||||
AggregateColumnsData & aggregate_columns) const
|
||||
template <typename Method, bool use_compiled_functions, bool return_single_block, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block> NO_INLINE
|
||||
Aggregator::convertToBlockImplFinal(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, size_t) const
|
||||
{
|
||||
if constexpr (Method::low_cardinality_optimization)
|
||||
const size_t max_block_size = params.max_block_size;
|
||||
const bool final = true;
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
|
||||
std::optional<OutputBlockColumns> out_cols;
|
||||
std::optional<Sizes> shuffled_key_sizes;
|
||||
PaddedPODArray<AggregateDataPtr> places;
|
||||
|
||||
auto init_out_cols = [&]()
|
||||
{
|
||||
if (data.hasNullKeyData())
|
||||
out_cols = prepareOutputBlockColumns(params, aggregate_functions, getHeader(final), aggregates_pools, final, max_block_size);
|
||||
|
||||
if constexpr (Method::low_cardinality_optimization)
|
||||
{
|
||||
key_columns[0]->insertDefault();
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_columns[i]->push_back(data.getNullKeyData() + offsets_of_aggregate_states[i]);
|
||||
|
||||
data.getNullKeyData() = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
auto shuffled_key_sizes = method.shuffleKeyColumns(key_columns, key_sizes);
|
||||
const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes;
|
||||
|
||||
data.forEachValue([&](const auto & key, auto & mapped)
|
||||
{
|
||||
method.insertKeyIntoColumns(key, key_columns, key_sizes_ref);
|
||||
|
||||
/// reserved, so push_back does not throw exceptions
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_columns[i]->push_back(mapped + offsets_of_aggregate_states[i]);
|
||||
|
||||
mapped = nullptr;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
template <typename Filler>
|
||||
Block Aggregator::prepareBlockAndFill(
|
||||
AggregatedDataVariants & data_variants,
|
||||
bool final,
|
||||
size_t rows,
|
||||
Filler && filler) const
|
||||
{
|
||||
MutableColumns key_columns(params.keys_size);
|
||||
MutableColumns aggregate_columns(params.aggregates_size);
|
||||
MutableColumns final_aggregate_columns(params.aggregates_size);
|
||||
AggregateColumnsData aggregate_columns_data(params.aggregates_size);
|
||||
|
||||
Block res_header = getHeader(final);
|
||||
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
{
|
||||
key_columns[i] = res_header.safeGetByPosition(i).type->createColumn();
|
||||
key_columns[i]->reserve(rows);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
{
|
||||
if (!final)
|
||||
{
|
||||
const auto & aggregate_column_name = params.aggregates[i].column_name;
|
||||
aggregate_columns[i] = res_header.getByName(aggregate_column_name).type->createColumn();
|
||||
|
||||
/// The ColumnAggregateFunction column captures the shared ownership of the arena with the aggregate function states.
|
||||
ColumnAggregateFunction & column_aggregate_func = assert_cast<ColumnAggregateFunction &>(*aggregate_columns[i]);
|
||||
|
||||
for (auto & pool : data_variants.aggregates_pools)
|
||||
column_aggregate_func.addArena(pool);
|
||||
|
||||
aggregate_columns_data[i] = &column_aggregate_func.getData();
|
||||
aggregate_columns_data[i]->reserve(rows);
|
||||
}
|
||||
else
|
||||
{
|
||||
final_aggregate_columns[i] = aggregate_functions[i]->getReturnType()->createColumn();
|
||||
final_aggregate_columns[i]->reserve(rows);
|
||||
|
||||
if (aggregate_functions[i]->isState())
|
||||
if (data.hasNullKeyData())
|
||||
{
|
||||
/// The ColumnAggregateFunction column captures the shared ownership of the arena with aggregate function states.
|
||||
if (auto * column_aggregate_func = typeid_cast<ColumnAggregateFunction *>(final_aggregate_columns[i].get()))
|
||||
for (auto & pool : data_variants.aggregates_pools)
|
||||
column_aggregate_func->addArena(pool);
|
||||
|
||||
/// Aggregate state can be wrapped into array if aggregate function ends with -Resample combinator.
|
||||
final_aggregate_columns[i]->forEachSubcolumn([&data_variants](auto & subcolumn)
|
||||
{
|
||||
if (auto * column_aggregate_func = typeid_cast<ColumnAggregateFunction *>(subcolumn.get()))
|
||||
for (auto & pool : data_variants.aggregates_pools)
|
||||
column_aggregate_func->addArena(pool);
|
||||
});
|
||||
out_cols->key_columns[0]->insertDefault();
|
||||
insertAggregatesIntoColumns(data.getNullKeyData(), out_cols->final_aggregate_columns, arena);
|
||||
data.hasNullKeyData() = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filler(key_columns, aggregate_columns_data, final_aggregate_columns, final);
|
||||
shuffled_key_sizes = method.shuffleKeyColumns(out_cols->raw_key_columns, key_sizes);
|
||||
|
||||
Block res = res_header.cloneEmpty();
|
||||
places.reserve(max_block_size);
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
res.getByPosition(i).column = std::move(key_columns[i]);
|
||||
// should be invoked at least once, because null data might be the only content of the `data`
|
||||
init_out_cols();
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
data.forEachValue(
|
||||
[&](const auto & key, auto & mapped)
|
||||
{
|
||||
if (!out_cols.has_value())
|
||||
init_out_cols();
|
||||
|
||||
const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes;
|
||||
method.insertKeyIntoColumns(key, out_cols->raw_key_columns, key_sizes_ref);
|
||||
places.emplace_back(mapped);
|
||||
|
||||
/// Mark the cell as destroyed so it will not be destroyed in destructor.
|
||||
mapped = nullptr;
|
||||
|
||||
if constexpr (!return_single_block)
|
||||
{
|
||||
if (places.size() >= max_block_size)
|
||||
{
|
||||
res.emplace_back(insertResultsIntoColumns<use_compiled_functions>(places, std::move(out_cols.value()), arena));
|
||||
places.clear();
|
||||
out_cols.reset();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if constexpr (return_single_block)
|
||||
{
|
||||
const auto & aggregate_column_name = params.aggregates[i].column_name;
|
||||
if (final)
|
||||
res.getByName(aggregate_column_name).column = std::move(final_aggregate_columns[i]);
|
||||
else
|
||||
res.getByName(aggregate_column_name).column = std::move(aggregate_columns[i]);
|
||||
return insertResultsIntoColumns<use_compiled_functions>(places, std::move(out_cols.value()), arena);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (out_cols.has_value())
|
||||
res.emplace_back(insertResultsIntoColumns<use_compiled_functions>(places, std::move(out_cols.value()), arena));
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
/// Change the size of the columns-constants in the block.
|
||||
size_t columns = res_header.columns();
|
||||
for (size_t i = 0; i < columns; ++i)
|
||||
if (isColumnConst(*res.getByPosition(i).column))
|
||||
res.getByPosition(i).column = res.getByPosition(i).column->cut(0, rows);
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
Aggregator::ConvertToBlockRes<return_single_block> NO_INLINE
|
||||
Aggregator::convertToBlockImplNotFinal(Method & method, Table & data, Arenas & aggregates_pools, size_t) const
|
||||
{
|
||||
const size_t max_block_size = params.max_block_size;
|
||||
const bool final = false;
|
||||
ConvertToBlockRes<return_single_block> res;
|
||||
|
||||
std::optional<OutputBlockColumns> out_cols;
|
||||
std::optional<Sizes> shuffled_key_sizes;
|
||||
|
||||
auto init_out_cols = [&]()
|
||||
{
|
||||
out_cols = prepareOutputBlockColumns(params, aggregate_functions, getHeader(final), aggregates_pools, final, max_block_size);
|
||||
|
||||
if constexpr (Method::low_cardinality_optimization)
|
||||
{
|
||||
if (data.hasNullKeyData())
|
||||
{
|
||||
out_cols->raw_key_columns[0]->insertDefault();
|
||||
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
out_cols->aggregate_columns_data[i]->push_back(data.getNullKeyData() + offsets_of_aggregate_states[i]);
|
||||
|
||||
data.getNullKeyData() = nullptr;
|
||||
data.hasNullKeyData() = false;
|
||||
}
|
||||
}
|
||||
|
||||
shuffled_key_sizes = method.shuffleKeyColumns(out_cols->raw_key_columns, key_sizes);
|
||||
};
|
||||
|
||||
// should be invoked at least once, because null data might be the only content of the `data`
|
||||
init_out_cols();
|
||||
|
||||
size_t rows_in_current_block = 0;
|
||||
|
||||
data.forEachValue(
|
||||
[&](const auto & key, auto & mapped)
|
||||
{
|
||||
if (!out_cols.has_value())
|
||||
init_out_cols();
|
||||
|
||||
const auto & key_sizes_ref = shuffled_key_sizes ? *shuffled_key_sizes : key_sizes;
|
||||
method.insertKeyIntoColumns(key, out_cols->raw_key_columns, key_sizes_ref);
|
||||
|
||||
/// reserved, so push_back does not throw exceptions
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
out_cols->aggregate_columns_data[i]->push_back(mapped + offsets_of_aggregate_states[i]);
|
||||
|
||||
mapped = nullptr;
|
||||
|
||||
++rows_in_current_block;
|
||||
|
||||
if constexpr (!return_single_block)
|
||||
{
|
||||
if (rows_in_current_block >= max_block_size)
|
||||
{
|
||||
res.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols.value()), final, rows_in_current_block));
|
||||
out_cols.reset();
|
||||
rows_in_current_block = 0;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if constexpr (return_single_block)
|
||||
{
|
||||
return finalizeBlock(params, getHeader(final), std::move(out_cols).value(), final, rows_in_current_block);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (rows_in_current_block)
|
||||
res.emplace_back(finalizeBlock(params, getHeader(final), std::move(out_cols).value(), final, rows_in_current_block));
|
||||
return res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -2105,39 +2097,35 @@ void Aggregator::createStatesAndFillKeyColumnsWithSingleKey(
|
||||
Block Aggregator::prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_variants, bool final, bool is_overflows) const
|
||||
{
|
||||
size_t rows = 1;
|
||||
auto && out_cols
|
||||
= prepareOutputBlockColumns(params, aggregate_functions, getHeader(final), data_variants.aggregates_pools, final, rows);
|
||||
auto && [key_columns, raw_key_columns, aggregate_columns, final_aggregate_columns, aggregate_columns_data] = out_cols;
|
||||
|
||||
auto filler = [&data_variants, this](
|
||||
MutableColumns & key_columns,
|
||||
AggregateColumnsData & aggregate_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
bool final_)
|
||||
if (data_variants.type == AggregatedDataVariants::Type::without_key || params.overflow_row)
|
||||
{
|
||||
if (data_variants.type == AggregatedDataVariants::Type::without_key || params.overflow_row)
|
||||
AggregatedDataWithoutKey & data = data_variants.without_key;
|
||||
|
||||
if (!data)
|
||||
throw Exception("Wrong data variant passed.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (!final)
|
||||
{
|
||||
AggregatedDataWithoutKey & data = data_variants.without_key;
|
||||
|
||||
if (!data)
|
||||
throw Exception("Wrong data variant passed.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (!final_)
|
||||
{
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_columns[i]->push_back(data + offsets_of_aggregate_states[i]);
|
||||
data = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Always single-thread. It's safe to pass current arena from 'aggregates_pool'.
|
||||
insertAggregatesIntoColumns(data, final_aggregate_columns, data_variants.aggregates_pool);
|
||||
}
|
||||
|
||||
if (params.overflow_row)
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
key_columns[i]->insertDefault();
|
||||
for (size_t i = 0; i < params.aggregates_size; ++i)
|
||||
aggregate_columns_data[i]->push_back(data + offsets_of_aggregate_states[i]);
|
||||
data = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Always single-thread. It's safe to pass current arena from 'aggregates_pool'.
|
||||
insertAggregatesIntoColumns(data, final_aggregate_columns, data_variants.aggregates_pool);
|
||||
}
|
||||
};
|
||||
|
||||
Block block = prepareBlockAndFill(data_variants, final, rows, filler);
|
||||
if (params.overflow_row)
|
||||
for (size_t i = 0; i < params.keys_size; ++i)
|
||||
key_columns[i]->insertDefault();
|
||||
}
|
||||
|
||||
Block block = finalizeBlock(params, getHeader(final), std::move(out_cols), final, rows);
|
||||
|
||||
if (is_overflows)
|
||||
block.info.is_overflows = true;
|
||||
@ -2148,29 +2136,22 @@ Block Aggregator::prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_va
|
||||
return block;
|
||||
}
|
||||
|
||||
Block Aggregator::prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const
|
||||
template <bool return_single_block>
|
||||
Aggregator::ConvertToBlockRes<return_single_block>
|
||||
Aggregator::prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const
|
||||
{
|
||||
size_t rows = data_variants.sizeWithoutOverflowRow();
|
||||
const size_t rows = data_variants.sizeWithoutOverflowRow();
|
||||
#define M(NAME) \
|
||||
else if (data_variants.type == AggregatedDataVariants::Type::NAME) \
|
||||
{ \
|
||||
return convertToBlockImpl<return_single_block>( \
|
||||
*data_variants.NAME, data_variants.NAME->data, data_variants.aggregates_pool, data_variants.aggregates_pools, final, rows); \
|
||||
}
|
||||
|
||||
auto filler = [&data_variants, this](
|
||||
MutableColumns & key_columns,
|
||||
AggregateColumnsData & aggregate_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
bool final_)
|
||||
{
|
||||
#define M(NAME) \
|
||||
else if (data_variants.type == AggregatedDataVariants::Type::NAME) \
|
||||
convertToBlockImpl(*data_variants.NAME, data_variants.NAME->data, \
|
||||
key_columns, aggregate_columns, final_aggregate_columns, data_variants.aggregates_pool, final_);
|
||||
|
||||
if (false) {} // NOLINT
|
||||
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
|
||||
#undef M
|
||||
else
|
||||
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
||||
};
|
||||
|
||||
return prepareBlockAndFill(data_variants, final, rows, filler);
|
||||
if (false) {} // NOLINT
|
||||
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
|
||||
#undef M
|
||||
else throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
||||
}
|
||||
|
||||
|
||||
@ -2292,7 +2273,7 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b
|
||||
if (data_variants.type != AggregatedDataVariants::Type::without_key)
|
||||
{
|
||||
if (!data_variants.isTwoLevel())
|
||||
blocks.emplace_back(prepareBlockAndFillSingleLevel(data_variants, final));
|
||||
blocks.splice(blocks.end(), prepareBlockAndFillSingleLevel</* return_single_block */ false>(data_variants, final));
|
||||
else
|
||||
blocks.splice(blocks.end(), prepareBlocksAndFillTwoLevel(data_variants, final, thread_pool.get()));
|
||||
}
|
||||
@ -3044,9 +3025,15 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final)
|
||||
|
||||
Block block;
|
||||
if (result.type == AggregatedDataVariants::Type::without_key || is_overflows)
|
||||
{
|
||||
block = prepareBlockAndFillWithoutKey(result, final, is_overflows);
|
||||
}
|
||||
else
|
||||
block = prepareBlockAndFillSingleLevel(result, final);
|
||||
{
|
||||
// Used during memory efficient merging (SortingAggregatedTransform expects single chunk for each bucket_id).
|
||||
constexpr bool return_single_block = true;
|
||||
block = prepareBlockAndFillSingleLevel<return_single_block>(result, final);
|
||||
}
|
||||
/// NOTE: two-level data is not possible here - chooseAggregationMethod chooses only among single-level methods.
|
||||
|
||||
if (!final)
|
||||
@ -3247,4 +3234,6 @@ void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result) cons
|
||||
}
|
||||
|
||||
|
||||
template Aggregator::ConvertToBlockRes<false>
|
||||
Aggregator::prepareBlockAndFillSingleLevel<false>(AggregatedDataVariants & data_variants, bool final) const;
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <type_traits>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
@ -872,6 +873,7 @@ using ManyAggregatedDataVariantsPtr = std::shared_ptr<ManyAggregatedDataVariants
|
||||
|
||||
class CompiledAggregateFunctionsHolder;
|
||||
class NativeWriter;
|
||||
struct OutputBlockColumns;
|
||||
|
||||
/** How are "total" values calculated with WITH TOTALS?
|
||||
* (For more details, see TotalsHavingTransform.)
|
||||
@ -933,6 +935,8 @@ public:
|
||||
bool compile_aggregate_expressions;
|
||||
size_t min_count_to_compile_aggregate_expression;
|
||||
|
||||
size_t max_block_size;
|
||||
|
||||
bool only_merge;
|
||||
|
||||
struct StatsCollectingParams
|
||||
@ -969,6 +973,7 @@ public:
|
||||
size_t min_free_disk_space_,
|
||||
bool compile_aggregate_expressions_,
|
||||
size_t min_count_to_compile_aggregate_expression_,
|
||||
size_t max_block_size_,
|
||||
bool only_merge_ = false, // true for projections
|
||||
const StatsCollectingParams & stats_collecting_params_ = {})
|
||||
: keys(keys_)
|
||||
@ -987,15 +992,16 @@ public:
|
||||
, min_free_disk_space(min_free_disk_space_)
|
||||
, compile_aggregate_expressions(compile_aggregate_expressions_)
|
||||
, min_count_to_compile_aggregate_expression(min_count_to_compile_aggregate_expression_)
|
||||
, max_block_size(max_block_size_)
|
||||
, only_merge(only_merge_)
|
||||
, stats_collecting_params(stats_collecting_params_)
|
||||
{
|
||||
}
|
||||
|
||||
/// Only parameters that matter during merge.
|
||||
Params(const Names & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_)
|
||||
Params(const Names & keys_, const AggregateDescriptions & aggregates_, bool overflow_row_, size_t max_threads_, size_t max_block_size_)
|
||||
: Params(
|
||||
keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, true, {})
|
||||
keys_, aggregates_, overflow_row_, 0, OverflowMode::THROW, 0, 0, 0, false, nullptr, max_threads_, 0, false, 0, max_block_size_, true, {})
|
||||
{
|
||||
}
|
||||
|
||||
@ -1277,15 +1283,12 @@ private:
|
||||
void mergeSingleLevelDataImpl(
|
||||
ManyAggregatedDataVariants & non_empty_data) const;
|
||||
|
||||
template <typename Method, typename Table>
|
||||
void convertToBlockImpl(
|
||||
Method & method,
|
||||
Table & data,
|
||||
MutableColumns & key_columns,
|
||||
AggregateColumnsData & aggregate_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
Arena * arena,
|
||||
bool final) const;
|
||||
template <bool return_single_block>
|
||||
using ConvertToBlockRes = std::conditional_t<return_single_block, Block, BlocksList>;
|
||||
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
ConvertToBlockRes<return_single_block>
|
||||
convertToBlockImpl(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, bool final, size_t rows) const;
|
||||
|
||||
template <typename Mapped>
|
||||
void insertAggregatesIntoColumns(
|
||||
@ -1293,27 +1296,16 @@ private:
|
||||
MutableColumns & final_aggregate_columns,
|
||||
Arena * arena) const;
|
||||
|
||||
template <typename Method, bool use_compiled_functions, typename Table>
|
||||
void convertToBlockImplFinal(
|
||||
Method & method,
|
||||
Table & data,
|
||||
std::vector<IColumn *> key_columns,
|
||||
MutableColumns & final_aggregate_columns,
|
||||
Arena * arena) const;
|
||||
template <bool use_compiled_functions>
|
||||
Block insertResultsIntoColumns(PaddedPODArray<AggregateDataPtr> & places, OutputBlockColumns && out_cols, Arena * arena) const;
|
||||
|
||||
template <typename Method, typename Table>
|
||||
void convertToBlockImplNotFinal(
|
||||
Method & method,
|
||||
Table & data,
|
||||
std::vector<IColumn *> key_columns,
|
||||
AggregateColumnsData & aggregate_columns) const;
|
||||
template <typename Method, bool use_compiled_functions, bool return_single_block, typename Table>
|
||||
ConvertToBlockRes<return_single_block>
|
||||
convertToBlockImplFinal(Method & method, Table & data, Arena * arena, Arenas & aggregates_pools, size_t rows) const;
|
||||
|
||||
template <typename Filler>
|
||||
Block prepareBlockAndFill(
|
||||
AggregatedDataVariants & data_variants,
|
||||
bool final,
|
||||
size_t rows,
|
||||
Filler && filler) const;
|
||||
template <bool return_single_block, typename Method, typename Table>
|
||||
ConvertToBlockRes<return_single_block>
|
||||
convertToBlockImplNotFinal(Method & method, Table & data, Arenas & aggregates_pools, size_t rows) const;
|
||||
|
||||
template <typename Method>
|
||||
Block convertOneBucketToBlock(
|
||||
@ -1331,9 +1323,11 @@ private:
|
||||
std::atomic<bool> * is_cancelled = nullptr) const;
|
||||
|
||||
Block prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_variants, bool final, bool is_overflows) const;
|
||||
Block prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const;
|
||||
BlocksList prepareBlocksAndFillTwoLevel(AggregatedDataVariants & data_variants, bool final, ThreadPool * thread_pool) const;
|
||||
|
||||
template <bool return_single_block>
|
||||
ConvertToBlockRes<return_single_block> prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_variants, bool final) const;
|
||||
|
||||
template <typename Method>
|
||||
BlocksList prepareBlocksAndFillTwoLevelImpl(
|
||||
AggregatedDataVariants & data_variants,
|
||||
|
@ -122,6 +122,7 @@ void FileCache::initialize()
|
||||
fs::create_directories(cache_base_path);
|
||||
}
|
||||
|
||||
status_file = make_unique<StatusFile>(fs::path(cache_base_path) / "status", StatusFile::write_full_info);
|
||||
is_initialized = true;
|
||||
}
|
||||
}
|
||||
@ -963,12 +964,19 @@ void FileCache::loadCacheInfoIntoMemory(std::lock_guard<std::mutex> & cache_lock
|
||||
fs::directory_iterator key_prefix_it{cache_base_path};
|
||||
for (; key_prefix_it != fs::directory_iterator(); ++key_prefix_it)
|
||||
{
|
||||
if (!key_prefix_it->is_directory())
|
||||
{
|
||||
if (key_prefix_it->path().filename() != "status")
|
||||
LOG_DEBUG(log, "Unexpected file {} (not a directory), will skip it", key_prefix_it->path().string());
|
||||
continue;
|
||||
}
|
||||
|
||||
fs::directory_iterator key_it{key_prefix_it->path()};
|
||||
for (; key_it != fs::directory_iterator(); ++key_it)
|
||||
{
|
||||
if (!key_it->is_directory())
|
||||
{
|
||||
LOG_WARNING(log, "Unexpected file: {}. Expected a directory", key_it->path().string());
|
||||
LOG_DEBUG(log, "Unexpected file {} (not a directory), will skip it", key_it->path().string());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <Interpreters/Cache/IFileCachePriority.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Interpreters/Cache/FileCacheKey.h>
|
||||
#include <Common/StatusFile.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -143,6 +144,7 @@ private:
|
||||
|
||||
bool is_initialized = false;
|
||||
std::exception_ptr initialization_exception;
|
||||
std::unique_ptr<StatusFile> status_file;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
||||
|
@ -16,6 +16,7 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
InterpreterRenameQuery::InterpreterRenameQuery(const ASTPtr & query_ptr_, ContextPtr context_)
|
||||
@ -31,11 +32,11 @@ BlockIO InterpreterRenameQuery::execute()
|
||||
if (!rename.cluster.empty())
|
||||
{
|
||||
DDLQueryOnClusterParams params;
|
||||
params.access_to_check = getRequiredAccess();
|
||||
params.access_to_check = getRequiredAccess(rename.database ? RenameType::RenameDatabase : RenameType::RenameTable);
|
||||
return executeDDLQueryOnCluster(query_ptr, getContext(), params);
|
||||
}
|
||||
|
||||
getContext()->checkAccess(getRequiredAccess());
|
||||
getContext()->checkAccess(getRequiredAccess(rename.database ? RenameType::RenameDatabase : RenameType::RenameTable));
|
||||
|
||||
String path = getContext()->getPath();
|
||||
String current_database = getContext()->getCurrentDatabase();
|
||||
@ -165,18 +166,30 @@ BlockIO InterpreterRenameQuery::executeToDatabase(const ASTRenameQuery &, const
|
||||
return {};
|
||||
}
|
||||
|
||||
AccessRightsElements InterpreterRenameQuery::getRequiredAccess() const
|
||||
AccessRightsElements InterpreterRenameQuery::getRequiredAccess(InterpreterRenameQuery::RenameType type) const
|
||||
{
|
||||
AccessRightsElements required_access;
|
||||
const auto & rename = query_ptr->as<const ASTRenameQuery &>();
|
||||
for (const auto & elem : rename.elements)
|
||||
{
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_TABLE, elem.from.database, elem.from.table);
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT, elem.to.database, elem.to.table);
|
||||
if (rename.exchange)
|
||||
if (type == RenameType::RenameTable)
|
||||
{
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT, elem.from.database, elem.from.table);
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_TABLE, elem.to.database, elem.to.table);
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_TABLE, elem.from.database, elem.from.table);
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT, elem.to.database, elem.to.table);
|
||||
if (rename.exchange)
|
||||
{
|
||||
required_access.emplace_back(AccessType::CREATE_TABLE | AccessType::INSERT , elem.from.database, elem.from.table);
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_TABLE, elem.to.database, elem.to.table);
|
||||
}
|
||||
}
|
||||
else if (type == RenameType::RenameDatabase)
|
||||
{
|
||||
required_access.emplace_back(AccessType::SELECT | AccessType::DROP_DATABASE, elem.from.database);
|
||||
required_access.emplace_back(AccessType::CREATE_DATABASE | AccessType::INSERT, elem.to.database);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type of rename query");
|
||||
}
|
||||
}
|
||||
return required_access;
|
||||
|
@ -63,7 +63,13 @@ private:
|
||||
BlockIO executeToTables(const ASTRenameQuery & rename, const RenameDescriptions & descriptions, TableGuards & ddl_guards);
|
||||
BlockIO executeToDatabase(const ASTRenameQuery & rename, const RenameDescriptions & descriptions);
|
||||
|
||||
AccessRightsElements getRequiredAccess() const;
|
||||
enum class RenameType
|
||||
{
|
||||
RenameTable,
|
||||
RenameDatabase
|
||||
};
|
||||
|
||||
AccessRightsElements getRequiredAccess(RenameType type) const;
|
||||
|
||||
ASTPtr query_ptr;
|
||||
bool renamed_instead_of_exchange{false};
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
#include <Processors/QueryPlan/AggregatingStep.h>
|
||||
#include <Processors/QueryPlan/ArrayJoinStep.h>
|
||||
#include <Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h>
|
||||
#include <Processors/QueryPlan/CreatingSetsStep.h>
|
||||
#include <Processors/QueryPlan/CubeStep.h>
|
||||
#include <Processors/QueryPlan/DistinctStep.h>
|
||||
@ -1436,7 +1437,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
if (!joined_plan)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no joined plan for query");
|
||||
|
||||
auto add_sorting = [&settings, this] (QueryPlan & plan, const Names & key_names, bool is_right)
|
||||
auto add_sorting = [&settings, this] (QueryPlan & plan, const Names & key_names, JoinTableSide join_pos)
|
||||
{
|
||||
SortDescription order_descr;
|
||||
order_descr.reserve(key_names.size());
|
||||
@ -1455,15 +1456,43 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
||||
this->context->getTemporaryVolume(),
|
||||
settings.min_free_disk_space_for_temporary_data,
|
||||
settings.optimize_sorting_by_input_stream_properties);
|
||||
sorting_step->setStepDescription(fmt::format("Sort {} before JOIN", is_right ? "right" : "left"));
|
||||
sorting_step->setStepDescription(fmt::format("Sort {} before JOIN", join_pos));
|
||||
plan.addStep(std::move(sorting_step));
|
||||
};
|
||||
|
||||
auto crosswise_connection = CreateSetAndFilterOnTheFlyStep::createCrossConnection();
|
||||
auto add_create_set = [&settings, crosswise_connection](QueryPlan & plan, const Names & key_names, JoinTableSide join_pos)
|
||||
{
|
||||
auto creating_set_step = std::make_unique<CreateSetAndFilterOnTheFlyStep>(
|
||||
plan.getCurrentDataStream(), key_names, settings.max_rows_in_set_to_optimize_join, crosswise_connection, join_pos);
|
||||
creating_set_step->setStepDescription(fmt::format("Create set and filter {} joined stream", join_pos));
|
||||
|
||||
auto * step_raw_ptr = creating_set_step.get();
|
||||
plan.addStep(std::move(creating_set_step));
|
||||
return step_raw_ptr;
|
||||
};
|
||||
|
||||
if (expressions.join->pipelineType() == JoinPipelineType::YShaped)
|
||||
{
|
||||
const auto & join_clause = expressions.join->getTableJoin().getOnlyClause();
|
||||
add_sorting(query_plan, join_clause.key_names_left, false);
|
||||
add_sorting(*joined_plan, join_clause.key_names_right, true);
|
||||
const auto & table_join = expressions.join->getTableJoin();
|
||||
const auto & join_clause = table_join.getOnlyClause();
|
||||
|
||||
auto join_kind = table_join.kind();
|
||||
bool kind_allows_filtering = isInner(join_kind) || isLeft(join_kind) || isRight(join_kind);
|
||||
if (settings.max_rows_in_set_to_optimize_join > 0 && kind_allows_filtering)
|
||||
{
|
||||
auto * left_set = add_create_set(query_plan, join_clause.key_names_left, JoinTableSide::Left);
|
||||
auto * right_set = add_create_set(*joined_plan, join_clause.key_names_right, JoinTableSide::Right);
|
||||
|
||||
if (isInnerOrLeft(join_kind))
|
||||
right_set->setFiltering(left_set->getSet());
|
||||
|
||||
if (isInnerOrRight(join_kind))
|
||||
left_set->setFiltering(right_set->getSet());
|
||||
}
|
||||
|
||||
add_sorting(query_plan, join_clause.key_names_left, JoinTableSide::Left);
|
||||
add_sorting(*joined_plan, join_clause.key_names_right, JoinTableSide::Right);
|
||||
}
|
||||
|
||||
QueryPlanStepPtr join_step = std::make_unique<JoinStep>(
|
||||
@ -1734,7 +1763,7 @@ static void executeMergeAggregatedImpl(
|
||||
* but it can work more slowly.
|
||||
*/
|
||||
|
||||
Aggregator::Params params(keys, aggregates, overflow_row, settings.max_threads);
|
||||
Aggregator::Params params(keys, aggregates, overflow_row, settings.max_threads, settings.max_block_size);
|
||||
|
||||
auto merging_aggregated = std::make_unique<MergingAggregatedStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
@ -2330,6 +2359,7 @@ static Aggregator::Params getAggregatorParams(
|
||||
settings.min_free_disk_space_for_temporary_data,
|
||||
settings.compile_aggregate_expressions,
|
||||
settings.min_count_to_compile_aggregate_expression,
|
||||
settings.max_block_size,
|
||||
/* only_merge */ false,
|
||||
stats_collecting_params
|
||||
};
|
||||
|
@ -1,16 +1,17 @@
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTQueryParameter.h>
|
||||
#include <Interpreters/IdentifierSemantic.h>
|
||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||
#include <Interpreters/addTypeConversionToAST.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTQueryParameter.h>
|
||||
#include <Parsers/TablePropertiesQueriesASTs.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -30,7 +31,12 @@ void ReplaceQueryParameterVisitor::visit(ASTPtr & ast)
|
||||
else if (ast->as<ASTIdentifier>() || ast->as<ASTTableIdentifier>())
|
||||
visitIdentifier(ast);
|
||||
else
|
||||
visitChildren(ast);
|
||||
{
|
||||
if (auto * describe_query = dynamic_cast<ASTDescribeQuery *>(ast.get()); describe_query && describe_query->table_expression)
|
||||
visitChildren(describe_query->table_expression);
|
||||
else
|
||||
visitChildren(ast);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Processors/Chunk.h>
|
||||
|
||||
#include <Storages/MergeTree/KeyCondition.h>
|
||||
|
||||
#include <base/range.h>
|
||||
@ -162,8 +164,16 @@ void Set::setHeader(const ColumnsWithTypeAndName & header)
|
||||
data.init(data.chooseMethod(key_columns, key_sizes));
|
||||
}
|
||||
|
||||
|
||||
bool Set::insertFromBlock(const ColumnsWithTypeAndName & columns)
|
||||
{
|
||||
Columns cols;
|
||||
cols.reserve(columns.size());
|
||||
for (const auto & column : columns)
|
||||
cols.emplace_back(column.column);
|
||||
return insertFromBlock(cols);
|
||||
}
|
||||
|
||||
bool Set::insertFromBlock(const Columns & columns)
|
||||
{
|
||||
std::lock_guard<std::shared_mutex> lock(rwlock);
|
||||
|
||||
@ -179,11 +189,11 @@ bool Set::insertFromBlock(const ColumnsWithTypeAndName & columns)
|
||||
/// Remember the columns we will work with
|
||||
for (size_t i = 0; i < keys_size; ++i)
|
||||
{
|
||||
materialized_columns.emplace_back(columns.at(i).column->convertToFullIfNeeded());
|
||||
materialized_columns.emplace_back(columns.at(i)->convertToFullIfNeeded());
|
||||
key_columns.emplace_back(materialized_columns.back().get());
|
||||
}
|
||||
|
||||
size_t rows = columns.at(0).column->size();
|
||||
size_t rows = columns.at(0)->size();
|
||||
|
||||
/// We will insert to the Set only keys, where all components are not NULL.
|
||||
ConstNullMapPtr null_map{};
|
||||
@ -393,7 +403,13 @@ void Set::checkColumnsNumber(size_t num_key_columns) const
|
||||
|
||||
bool Set::areTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) const
|
||||
{
|
||||
return removeNullable(recursiveRemoveLowCardinality(data_types[set_type_idx]))->equals(*removeNullable(recursiveRemoveLowCardinality(other_type)));
|
||||
/// Out-of-bound access can happen when same set expression built with different columns.
|
||||
/// Caller may call this method to make sure that the set is indeed the one they want
|
||||
/// without awaring data_types.size().
|
||||
if (set_type_idx >= data_types.size())
|
||||
return false;
|
||||
return removeNullable(recursiveRemoveLowCardinality(data_types[set_type_idx]))
|
||||
->equals(*removeNullable(recursiveRemoveLowCardinality(other_type)));
|
||||
}
|
||||
|
||||
void Set::checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) const
|
||||
|
@ -20,6 +20,7 @@ class Context;
|
||||
class IFunctionBase;
|
||||
using FunctionBasePtr = std::shared_ptr<IFunctionBase>;
|
||||
|
||||
class Chunk;
|
||||
|
||||
/** Data structure for implementation of IN expression.
|
||||
*/
|
||||
@ -45,11 +46,14 @@ public:
|
||||
void setHeader(const ColumnsWithTypeAndName & header);
|
||||
|
||||
/// Returns false, if some limit was exceeded and no need to insert more data.
|
||||
bool insertFromBlock(const Columns & columns);
|
||||
bool insertFromBlock(const ColumnsWithTypeAndName & columns);
|
||||
|
||||
/// Call after all blocks were inserted. To get the information that set is already created.
|
||||
void finishInsert() { is_created = true; }
|
||||
|
||||
bool isCreated() const { return is_created; }
|
||||
/// finishInsert and isCreated are thread-safe
|
||||
bool isCreated() const { return is_created.load(); }
|
||||
|
||||
/** For columns of 'block', check belonging of corresponding rows to the set.
|
||||
* Return UInt8 column with the result.
|
||||
@ -111,7 +115,7 @@ private:
|
||||
bool transform_null_in;
|
||||
|
||||
/// Check if set contains all the data.
|
||||
bool is_created = false;
|
||||
std::atomic<bool> is_created = false;
|
||||
|
||||
/// If in the left part columns contains the same types as the elements of the set.
|
||||
void executeOrdinary(
|
||||
|
@ -73,16 +73,32 @@ public:
|
||||
return key_names_right.size();
|
||||
}
|
||||
|
||||
String formatDebug() const
|
||||
String formatDebug(bool short_format = false) const
|
||||
{
|
||||
return fmt::format("Left keys: [{}] Right keys [{}] Condition columns: '{}', '{}'",
|
||||
fmt::join(key_names_left, ", "), fmt::join(key_names_right, ", "),
|
||||
condColumnNames().first, condColumnNames().second);
|
||||
const auto & [left_cond, right_cond] = condColumnNames();
|
||||
|
||||
if (short_format)
|
||||
{
|
||||
return fmt::format("({}) = ({}){}{}", fmt::join(key_names_left, ", "), fmt::join(key_names_right, ", "),
|
||||
!left_cond.empty() ? " AND " + left_cond : "", !right_cond.empty() ? " AND " + right_cond : "");
|
||||
}
|
||||
|
||||
return fmt::format(
|
||||
"Left keys: [{}] Right keys [{}] Condition columns: '{}', '{}'",
|
||||
fmt::join(key_names_left, ", "), fmt::join(key_names_right, ", "), left_cond, right_cond);
|
||||
}
|
||||
};
|
||||
|
||||
using Clauses = std::vector<JoinOnClause>;
|
||||
|
||||
static std::string formatClauses(const Clauses & clauses, bool short_format = false)
|
||||
{
|
||||
std::vector<std::string> res;
|
||||
for (const auto & clause : clauses)
|
||||
res.push_back("[" + clause.formatDebug(short_format) + "]");
|
||||
return fmt::format("{}", fmt::join(res, "; "));
|
||||
}
|
||||
|
||||
private:
|
||||
/** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k`
|
||||
* The join is made by column k.
|
||||
|
@ -453,7 +453,7 @@ void optimizeMonotonousFunctionsInOrderBy(ASTSelectQuery * select_query, Context
|
||||
return;
|
||||
|
||||
/// Do not apply optimization for Distributed and Merge storages,
|
||||
/// because we can't get the sorting key of their undelying tables
|
||||
/// because we can't get the sorting key of their underlying tables
|
||||
/// and we can break the matching of the sorting key for `read_in_order`
|
||||
/// optimization by removing monotonous functions from the prefix of key.
|
||||
if (result.is_remote_storage || (result.storage && result.storage->getName() == "Merge"))
|
||||
|
@ -521,10 +521,15 @@ void removeUnneededColumnsFromSelectClause(ASTSelectQuery * select_query, const
|
||||
++new_elements_size;
|
||||
}
|
||||
/// removing aggregation can change number of rows, so `count()` result in outer sub-query would be wrong
|
||||
if (func && AggregateUtils::isAggregateFunction(*func) && !select_query->groupBy())
|
||||
if (func && !select_query->groupBy())
|
||||
{
|
||||
new_elements[result_index] = elem;
|
||||
++new_elements_size;
|
||||
GetAggregatesVisitor::Data data = {};
|
||||
GetAggregatesVisitor(data).visit(elem);
|
||||
if (!data.aggregates.empty())
|
||||
{
|
||||
new_elements[result_index] = elem;
|
||||
++new_elements_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,6 @@ TEST(FileCache, get)
|
||||
{
|
||||
if (fs::exists(cache_base_path))
|
||||
fs::remove_all(cache_base_path);
|
||||
fs::create_directories(cache_base_path);
|
||||
|
||||
DB::ThreadStatus thread_status;
|
||||
|
||||
@ -103,373 +102,376 @@ TEST(FileCache, get)
|
||||
DB::FileCacheSettings settings;
|
||||
settings.max_size = 30;
|
||||
settings.max_elements = 5;
|
||||
auto cache = DB::FileCache(cache_base_path, settings);
|
||||
cache.initialize();
|
||||
auto key = cache.hash("key1");
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 0, 10, false); /// Add range [0, 9]
|
||||
auto segments = fromHolder(holder);
|
||||
/// Range was not present in cache. It should be added in cache as one while file segment.
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
auto cache = DB::FileCache(cache_base_path, settings);
|
||||
cache.initialize();
|
||||
auto key = cache.hash("key1");
|
||||
|
||||
assertRange(1, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::EMPTY);
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 0, 10, false); /// Add range [0, 9]
|
||||
auto segments = fromHolder(holder);
|
||||
/// Range was not present in cache. It should be added in cache as one while file segment.
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
|
||||
/// Exception because space not reserved.
|
||||
/// EXPECT_THROW(download(segments[0]), DB::Exception);
|
||||
/// Exception because space can be reserved only by downloader
|
||||
/// EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception);
|
||||
assertRange(1, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[0]->reserve(segments[0]->range().size()));
|
||||
assertRange(2, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADING);
|
||||
/// Exception because space not reserved.
|
||||
/// EXPECT_THROW(download(segments[0]), DB::Exception);
|
||||
/// Exception because space can be reserved only by downloader
|
||||
/// EXPECT_THROW(segments[0]->reserve(segments[0]->range().size()), DB::Exception);
|
||||
|
||||
download(segments[0]);
|
||||
segments[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(3, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[0]->reserve(segments[0]->range().size()));
|
||||
assertRange(2, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
/// Current cache: [__________]
|
||||
/// ^ ^
|
||||
/// 0 9
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 1);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 10);
|
||||
download(segments[0]);
|
||||
segments[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(3, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
{
|
||||
/// Want range [5, 14], but [0, 9] already in cache, so only [10, 14] will be put in cache.
|
||||
auto holder = cache.getOrSet(key, 5, 10, false);
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 2);
|
||||
/// Current cache: [__________]
|
||||
/// ^ ^
|
||||
/// 0 9
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 1);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 10);
|
||||
|
||||
assertRange(4, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(5, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::EMPTY);
|
||||
{
|
||||
/// Want range [5, 14], but [0, 9] already in cache, so only [10, 14] will be put in cache.
|
||||
auto holder = cache.getOrSet(key, 5, 10, false);
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 2);
|
||||
|
||||
ASSERT_TRUE(segments[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[1]);
|
||||
segments[1]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(6, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
assertRange(4, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(5, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::EMPTY);
|
||||
|
||||
/// Current cache: [__________][_____]
|
||||
/// ^ ^^ ^
|
||||
/// 0 910 14
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 2);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 15);
|
||||
ASSERT_TRUE(segments[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[1]);
|
||||
segments[1]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(6, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 9, 1, false); /// Get [9, 9]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(7, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
/// Current cache: [__________][_____]
|
||||
/// ^ ^^ ^
|
||||
/// 0 910 14
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 2);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 15);
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 9, 2, false); /// Get [9, 10]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 2);
|
||||
assertRange(8, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(9, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 9, 1, false); /// Get [9, 9]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(7, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 10, 1, false); /// Get [10, 10]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(10, segments[0], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 9, 2, false); /// Get [9, 10]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 2);
|
||||
assertRange(8, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(9, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
complete(cache.getOrSet(key, 17, 4, false)); /// Get [17, 20]
|
||||
complete(cache.getOrSet(key, 24, 3, false)); /// Get [24, 26]
|
||||
/// complete(cache.getOrSet(key, 27, 1, false)); /// Get [27, 27]
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 10, 1, false); /// Get [10, 10]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(10, segments[0], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
/// Current cache: [__________][_____] [____] [___][]
|
||||
/// ^ ^^ ^ ^ ^ ^ ^^^
|
||||
/// 0 910 14 17 20 24 2627
|
||||
///
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 4);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 22);
|
||||
complete(cache.getOrSet(key, 17, 4, false)); /// Get [17, 20]
|
||||
complete(cache.getOrSet(key, 24, 3, false)); /// Get [24, 26]
|
||||
/// complete(cache.getOrSet(key, 27, 1, false)); /// Get [27, 27]
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 0, 26, false); /// Get [0, 25]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 6);
|
||||
|
||||
assertRange(11, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(12, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
/// Missing [15, 16] should be added in cache.
|
||||
assertRange(13, segments[2], DB::FileSegment::Range(15, 16), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[2]);
|
||||
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
assertRange(14, segments[3], DB::FileSegment::Range(17, 20), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
/// New [21, 23], but will not be added in cache because of elements limit (5)
|
||||
assertRange(15, segments[4], DB::FileSegment::Range(21, 23), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[4]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_FALSE(segments[4]->reserve(1));
|
||||
|
||||
assertRange(16, segments[5], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
/// Current cache: [__________][_____][ ][____] [___]
|
||||
/// ^ ^ ^
|
||||
/// 0 20 24
|
||||
/// Current cache: [__________][_____] [____] [___][]
|
||||
/// ^ ^^ ^ ^ ^ ^ ^^^
|
||||
/// 0 910 14 17 20 24 2627
|
||||
///
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 4);
|
||||
ASSERT_EQ(cache.getUsedCacheSize(), 22);
|
||||
|
||||
/// Range [27, 27] must be evicted in previous getOrSet [0, 25].
|
||||
/// Let's not invalidate pointers to returned segments from range [0, 25] and
|
||||
/// as max elements size is reached, next attempt to put something in cache should fail.
|
||||
/// This will also check that [27, 27] was indeed evicted.
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 0, 26, false); /// Get [0, 25]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 6);
|
||||
|
||||
auto holder1 = cache.getOrSet(key, 27, 1, false);
|
||||
auto segments_1 = fromHolder(holder1); /// Get [27, 27]
|
||||
ASSERT_EQ(segments_1.size(), 1);
|
||||
assertRange(17, segments_1[0], DB::FileSegment::Range(27, 27), DB::FileSegment::State::EMPTY);
|
||||
}
|
||||
assertRange(11, segments[0], DB::FileSegment::Range(0, 9), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(12, segments[1], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 12, 10, false); /// Get [12, 21]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 4);
|
||||
/// Missing [15, 16] should be added in cache.
|
||||
assertRange(13, segments[2], DB::FileSegment::Range(15, 16), DB::FileSegment::State::EMPTY);
|
||||
|
||||
assertRange(18, segments[0], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(19, segments[1], DB::FileSegment::Range(15, 16), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(20, segments[2], DB::FileSegment::Range(17, 20), DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[2]);
|
||||
|
||||
assertRange(21, segments[3], DB::FileSegment::Range(21, 21), DB::FileSegment::State::EMPTY);
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments[3]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[3]);
|
||||
assertRange(14, segments[3], DB::FileSegment::Range(17, 20), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
segments[3]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[3]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
/// New [21, 23], but will not be added in cache because of elements limit (5)
|
||||
assertRange(15, segments[4], DB::FileSegment::Range(21, 23), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[4]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_FALSE(segments[4]->reserve(1));
|
||||
|
||||
/// Current cache: [_____][__][____][_] [___]
|
||||
/// ^ ^ ^ ^ ^
|
||||
/// 10 17 21 24 26
|
||||
assertRange(16, segments[5], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 5);
|
||||
/// Current cache: [__________][_____][ ][____] [___]
|
||||
/// ^ ^ ^
|
||||
/// 0 20 24
|
||||
///
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 23, 5, false); /// Get [23, 28]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
/// Range [27, 27] must be evicted in previous getOrSet [0, 25].
|
||||
/// Let's not invalidate pointers to returned segments from range [0, 25] and
|
||||
/// as max elements size is reached, next attempt to put something in cache should fail.
|
||||
/// This will also check that [27, 27] was indeed evicted.
|
||||
|
||||
assertRange(22, segments[0], DB::FileSegment::Range(23, 23), DB::FileSegment::State::EMPTY);
|
||||
assertRange(23, segments[1], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(24, segments[2], DB::FileSegment::Range(27, 27), DB::FileSegment::State::EMPTY);
|
||||
auto holder1 = cache.getOrSet(key, 27, 1, false);
|
||||
auto segments_1 = fromHolder(holder1); /// Get [27, 27]
|
||||
ASSERT_EQ(segments_1.size(), 1);
|
||||
assertRange(17, segments_1[0], DB::FileSegment::Range(27, 27), DB::FileSegment::State::EMPTY);
|
||||
}
|
||||
|
||||
ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[0]);
|
||||
prepareAndDownload(segments[2]);
|
||||
segments[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 12, 10, false); /// Get [12, 21]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 4);
|
||||
|
||||
/// Current cache: [____][_] [][___][__]
|
||||
/// ^ ^ ^^^ ^^ ^
|
||||
/// 17 21 2324 26 28
|
||||
assertRange(18, segments[0], DB::FileSegment::Range(10, 14), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(19, segments[1], DB::FileSegment::Range(15, 16), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(20, segments[2], DB::FileSegment::Range(17, 20), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
{
|
||||
auto holder5 = cache.getOrSet(key, 2, 3,false); /// Get [2, 4]
|
||||
auto s5 = fromHolder(holder5);
|
||||
ASSERT_EQ(s5.size(), 1);
|
||||
assertRange(25, s5[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::EMPTY);
|
||||
assertRange(21, segments[3], DB::FileSegment::Range(21, 21), DB::FileSegment::State::EMPTY);
|
||||
|
||||
auto holder1 = cache.getOrSet(key, 30, 2, false); /// Get [30, 31]
|
||||
auto s1 = fromHolder(holder1);
|
||||
ASSERT_EQ(s1.size(), 1);
|
||||
assertRange(26, s1[0], DB::FileSegment::Range(30, 31), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[3]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[3]);
|
||||
|
||||
ASSERT_TRUE(s5[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(s1[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(s5[0]);
|
||||
prepareAndDownload(s1[0]);
|
||||
s5[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
s1[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
segments[3]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[3]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
/// Current cache: [_____][__][____][_] [___]
|
||||
/// ^ ^ ^ ^ ^
|
||||
/// 10 17 21 24 26
|
||||
|
||||
ASSERT_EQ(cache.getFileSegmentsNum(), 5);
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 23, 5, false); /// Get [23, 28]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
assertRange(22, segments[0], DB::FileSegment::Range(23, 23), DB::FileSegment::State::EMPTY);
|
||||
assertRange(23, segments[1], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(24, segments[2], DB::FileSegment::Range(27, 27), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(segments[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments[0]);
|
||||
prepareAndDownload(segments[2]);
|
||||
segments[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
/// Current cache: [____][_] [][___][__]
|
||||
/// ^ ^ ^^^ ^^ ^
|
||||
/// 17 21 2324 26 28
|
||||
|
||||
{
|
||||
auto holder5 = cache.getOrSet(key, 2, 3,false); /// Get [2, 4]
|
||||
auto s5 = fromHolder(holder5);
|
||||
ASSERT_EQ(s5.size(), 1);
|
||||
assertRange(25, s5[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::EMPTY);
|
||||
|
||||
auto holder1 = cache.getOrSet(key, 30, 2, false); /// Get [30, 31]
|
||||
auto s1 = fromHolder(holder1);
|
||||
ASSERT_EQ(s1.size(), 1);
|
||||
assertRange(26, s1[0], DB::FileSegment::Range(30, 31), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(s5[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(s1[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(s5[0]);
|
||||
prepareAndDownload(s1[0]);
|
||||
s5[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
s1[0]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
/// Current cache: [___] [_][___][_] [__]
|
||||
/// ^ ^ ^ ^ ^ ^ ^ ^
|
||||
/// 2 4 23 24 26 27 30 31
|
||||
|
||||
auto holder2 = cache.getOrSet(key, 23, 1, false); /// Get [23, 23]
|
||||
auto s2 = fromHolder(holder2);
|
||||
ASSERT_EQ(s2.size(), 1);
|
||||
|
||||
auto holder3 = cache.getOrSet(key, 24, 3, false); /// Get [24, 26]
|
||||
auto s3 = fromHolder(holder3);
|
||||
ASSERT_EQ(s3.size(), 1);
|
||||
|
||||
auto holder4 = cache.getOrSet(key, 27, 1, false); /// Get [27, 27]
|
||||
auto s4 = fromHolder(holder4);
|
||||
ASSERT_EQ(s4.size(), 1);
|
||||
|
||||
/// All cache is now unreleasable because pointers are still hold
|
||||
auto holder6 = cache.getOrSet(key, 0, 40, false);
|
||||
auto f = fromHolder(holder6);
|
||||
ASSERT_EQ(f.size(), 9);
|
||||
|
||||
assertRange(27, f[0], DB::FileSegment::Range(0, 1), DB::FileSegment::State::EMPTY);
|
||||
assertRange(28, f[2], DB::FileSegment::Range(5, 22), DB::FileSegment::State::EMPTY);
|
||||
assertRange(29, f[6], DB::FileSegment::Range(28, 29), DB::FileSegment::State::EMPTY);
|
||||
assertRange(30, f[8], DB::FileSegment::Range(32, 39), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(f[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[6]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[8]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
|
||||
ASSERT_FALSE(f[0]->reserve(1));
|
||||
ASSERT_FALSE(f[2]->reserve(1));
|
||||
ASSERT_FALSE(f[6]->reserve(1));
|
||||
ASSERT_FALSE(f[8]->reserve(1));
|
||||
}
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 2, 3, false); /// Get [2, 4]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(31, segments[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
/// Current cache: [___] [_][___][_] [__]
|
||||
/// ^ ^ ^ ^ ^ ^ ^ ^
|
||||
/// 2 4 23 24 26 27 30 31
|
||||
|
||||
auto holder2 = cache.getOrSet(key, 23, 1, false); /// Get [23, 23]
|
||||
auto s2 = fromHolder(holder2);
|
||||
ASSERT_EQ(s2.size(), 1);
|
||||
|
||||
auto holder3 = cache.getOrSet(key, 24, 3, false); /// Get [24, 26]
|
||||
auto s3 = fromHolder(holder3);
|
||||
ASSERT_EQ(s3.size(), 1);
|
||||
|
||||
auto holder4 = cache.getOrSet(key, 27, 1, false); /// Get [27, 27]
|
||||
auto s4 = fromHolder(holder4);
|
||||
ASSERT_EQ(s4.size(), 1);
|
||||
|
||||
/// All cache is now unreleasable because pointers are still hold
|
||||
auto holder6 = cache.getOrSet(key, 0, 40, false);
|
||||
auto f = fromHolder(holder6);
|
||||
ASSERT_EQ(f.size(), 9);
|
||||
|
||||
assertRange(27, f[0], DB::FileSegment::Range(0, 1), DB::FileSegment::State::EMPTY);
|
||||
assertRange(28, f[2], DB::FileSegment::Range(5, 22), DB::FileSegment::State::EMPTY);
|
||||
assertRange(29, f[6], DB::FileSegment::Range(28, 29), DB::FileSegment::State::EMPTY);
|
||||
assertRange(30, f[8], DB::FileSegment::Range(32, 39), DB::FileSegment::State::EMPTY);
|
||||
|
||||
ASSERT_TRUE(f[0]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[6]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(f[8]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
|
||||
ASSERT_FALSE(f[0]->reserve(1));
|
||||
ASSERT_FALSE(f[2]->reserve(1));
|
||||
ASSERT_FALSE(f[6]->reserve(1));
|
||||
ASSERT_FALSE(f[8]->reserve(1));
|
||||
}
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 2, 3, false); /// Get [2, 4]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 1);
|
||||
assertRange(31, segments[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
|
||||
/// Current cache: [___] [_][___][_] [__]
|
||||
/// ^ ^ ^ ^ ^ ^ ^ ^
|
||||
/// 2 4 23 24 26 27 30 31
|
||||
|
||||
{
|
||||
auto holder = cache.getOrSet(key, 25, 5, false); /// Get [25, 29]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
assertRange(32, segments[0], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(33, segments[1], DB::FileSegment::Range(27, 27), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
assertRange(34, segments[2], DB::FileSegment::Range(28, 29), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[2]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
bool lets_start_download = false;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread other_1([&]
|
||||
{
|
||||
DB::ThreadStatus thread_status_1;
|
||||
auto query_context_1 = DB::Context::createCopy(getContext().context);
|
||||
query_context_1->makeQueryContext();
|
||||
query_context_1->setCurrentQueryId("query_id_1");
|
||||
DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1);
|
||||
thread_status_1.attachQueryContext(query_context_1);
|
||||
|
||||
auto holder_2 = cache.getOrSet(key, 25, 5, false); /// Get [25, 29] once again.
|
||||
auto segments_2 = fromHolder(holder_2);
|
||||
auto holder = cache.getOrSet(key, 25, 5, false); /// Get [25, 29]
|
||||
auto segments = fromHolder(holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
assertRange(35, segments_2[0], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(36, segments_2[1], DB::FileSegment::Range(27, 27), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(37, segments_2[2], DB::FileSegment::Range(28, 29), DB::FileSegment::State::DOWNLOADING);
|
||||
assertRange(32, segments[0], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(33, segments[1], DB::FileSegment::Range(27, 27), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() != DB::FileSegment::getCallerId());
|
||||
assertRange(34, segments[2], DB::FileSegment::Range(28, 29), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[2]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
bool lets_start_download = false;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread other_1([&]
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
lets_start_download = true;
|
||||
}
|
||||
cv.notify_one();
|
||||
DB::ThreadStatus thread_status_1;
|
||||
auto query_context_1 = DB::Context::createCopy(getContext().context);
|
||||
query_context_1->makeQueryContext();
|
||||
query_context_1->setCurrentQueryId("query_id_1");
|
||||
DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1);
|
||||
thread_status_1.attachQueryContext(query_context_1);
|
||||
|
||||
segments_2[2]->wait();
|
||||
ASSERT_TRUE(segments_2[2]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
});
|
||||
auto holder_2 = cache.getOrSet(key, 25, 5, false); /// Get [25, 29] once again.
|
||||
auto segments_2 = fromHolder(holder_2);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
cv.wait(lock, [&]{ return lets_start_download; });
|
||||
}
|
||||
assertRange(35, segments_2[0], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(36, segments_2[1], DB::FileSegment::Range(27, 27), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(37, segments_2[2], DB::FileSegment::Range(28, 29), DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
prepareAndDownload(segments[2]);
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[2]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[2]->getOrSetDownloader() != DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[2]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
other_1.join();
|
||||
}
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
lets_start_download = true;
|
||||
}
|
||||
cv.notify_one();
|
||||
|
||||
/// Current cache: [___] [___][_][__][__]
|
||||
/// ^ ^ ^ ^ ^^ ^^ ^
|
||||
/// 2 4 24 26 27 2930 31
|
||||
|
||||
{
|
||||
/// Now let's check the similar case but getting ERROR state after segment->wait(), when
|
||||
/// state is changed not manually via segment->complete(state) but from destructor of holder
|
||||
/// and notify_all() is also called from destructor of holder.
|
||||
|
||||
std::optional<DB::FileSegmentsHolder> holder;
|
||||
holder.emplace(cache.getOrSet(key, 3, 23, false)); /// Get [3, 25]
|
||||
|
||||
auto segments = fromHolder(*holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
assertRange(38, segments[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
assertRange(39, segments[1], DB::FileSegment::Range(5, 23), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[1]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
assertRange(40, segments[2], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
bool lets_start_download = false;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread other_1([&]
|
||||
{
|
||||
DB::ThreadStatus thread_status_1;
|
||||
auto query_context_1 = DB::Context::createCopy(getContext().context);
|
||||
query_context_1->makeQueryContext();
|
||||
query_context_1->setCurrentQueryId("query_id_1");
|
||||
DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1);
|
||||
thread_status_1.attachQueryContext(query_context_1);
|
||||
|
||||
auto holder_2 = cache.getOrSet(key, 3, 23, false); /// Get [3, 25] once again
|
||||
auto segments_2 = fromHolder(*holder);
|
||||
ASSERT_EQ(segments_2.size(), 3);
|
||||
|
||||
assertRange(41, segments_2[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(42, segments_2[1], DB::FileSegment::Range(5, 23), DB::FileSegment::State::DOWNLOADING);
|
||||
assertRange(43, segments_2[2], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments_2[1]->getDownloader() != DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments_2[1]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
segments_2[2]->wait();
|
||||
ASSERT_TRUE(segments_2[2]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
});
|
||||
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
lets_start_download = true;
|
||||
std::unique_lock lock(mutex);
|
||||
cv.wait(lock, [&]{ return lets_start_download; });
|
||||
}
|
||||
cv.notify_one();
|
||||
|
||||
segments_2[1]->wait();
|
||||
printRanges(segments_2);
|
||||
ASSERT_TRUE(segments_2[1]->state() == DB::FileSegment::State::PARTIALLY_DOWNLOADED);
|
||||
prepareAndDownload(segments[2]);
|
||||
segments[2]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
ASSERT_TRUE(segments[2]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments_2[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments_2[1]);
|
||||
segments_2[1]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
});
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
cv.wait(lock, [&]{ return lets_start_download; });
|
||||
other_1.join();
|
||||
}
|
||||
|
||||
holder.reset();
|
||||
other_1.join();
|
||||
printRanges(segments);
|
||||
ASSERT_TRUE(segments[1]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
/// Current cache: [___] [___][_][__][__]
|
||||
/// ^ ^ ^ ^ ^^ ^^ ^
|
||||
/// 2 4 24 26 27 2930 31
|
||||
|
||||
{
|
||||
/// Now let's check the similar case but getting ERROR state after segment->wait(), when
|
||||
/// state is changed not manually via segment->complete(state) but from destructor of holder
|
||||
/// and notify_all() is also called from destructor of holder.
|
||||
|
||||
std::optional<DB::FileSegmentsHolder> holder;
|
||||
holder.emplace(cache.getOrSet(key, 3, 23, false)); /// Get [3, 25]
|
||||
|
||||
auto segments = fromHolder(*holder);
|
||||
ASSERT_EQ(segments.size(), 3);
|
||||
|
||||
assertRange(38, segments[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
assertRange(39, segments[1], DB::FileSegment::Range(5, 23), DB::FileSegment::State::EMPTY);
|
||||
ASSERT_TRUE(segments[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments[1]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
assertRange(40, segments[2], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
bool lets_start_download = false;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread other_1([&]
|
||||
{
|
||||
DB::ThreadStatus thread_status_1;
|
||||
auto query_context_1 = DB::Context::createCopy(getContext().context);
|
||||
query_context_1->makeQueryContext();
|
||||
query_context_1->setCurrentQueryId("query_id_1");
|
||||
DB::CurrentThread::QueryScope query_scope_holder_1(query_context_1);
|
||||
thread_status_1.attachQueryContext(query_context_1);
|
||||
|
||||
auto holder_2 = cache.getOrSet(key, 3, 23, false); /// Get [3, 25] once again
|
||||
auto segments_2 = fromHolder(*holder);
|
||||
ASSERT_EQ(segments_2.size(), 3);
|
||||
|
||||
assertRange(41, segments_2[0], DB::FileSegment::Range(2, 4), DB::FileSegment::State::DOWNLOADED);
|
||||
assertRange(42, segments_2[1], DB::FileSegment::Range(5, 23), DB::FileSegment::State::DOWNLOADING);
|
||||
assertRange(43, segments_2[2], DB::FileSegment::Range(24, 26), DB::FileSegment::State::DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments_2[1]->getDownloader() != DB::FileSegment::getCallerId());
|
||||
ASSERT_TRUE(segments_2[1]->state() == DB::FileSegment::State::DOWNLOADING);
|
||||
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
lets_start_download = true;
|
||||
}
|
||||
cv.notify_one();
|
||||
|
||||
segments_2[1]->wait();
|
||||
printRanges(segments_2);
|
||||
ASSERT_TRUE(segments_2[1]->state() == DB::FileSegment::State::PARTIALLY_DOWNLOADED);
|
||||
|
||||
ASSERT_TRUE(segments_2[1]->getOrSetDownloader() == DB::FileSegment::getCallerId());
|
||||
prepareAndDownload(segments_2[1]);
|
||||
segments_2[1]->completeWithState(DB::FileSegment::State::DOWNLOADED);
|
||||
});
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
cv.wait(lock, [&]{ return lets_start_download; });
|
||||
}
|
||||
|
||||
holder.reset();
|
||||
other_1.join();
|
||||
printRanges(segments);
|
||||
ASSERT_TRUE(segments[1]->state() == DB::FileSegment::State::DOWNLOADED);
|
||||
}
|
||||
}
|
||||
|
||||
/// Current cache: [___][ ][___][_][__]
|
||||
@ -481,6 +483,7 @@ TEST(FileCache, get)
|
||||
|
||||
auto cache2 = DB::FileCache(cache_base_path, settings);
|
||||
cache2.initialize();
|
||||
auto key = cache2.hash("key1");
|
||||
|
||||
auto holder1 = cache2.getOrSet(key, 2, 28, false); /// Get [2, 29]
|
||||
|
||||
@ -501,6 +504,7 @@ TEST(FileCache, get)
|
||||
settings2.max_file_segment_size = 10;
|
||||
auto cache2 = DB::FileCache(caches_dir / "cache2", settings2);
|
||||
cache2.initialize();
|
||||
auto key = cache2.hash("key1");
|
||||
|
||||
auto holder1 = cache2.getOrSet(key, 0, 25, false); /// Get [0, 24]
|
||||
auto segments1 = fromHolder(holder1);
|
||||
|
@ -55,7 +55,7 @@ void InsertQuerySettingsPushDownMatcher::visit(ASTSelectQuery & select_query, AS
|
||||
insert_settings.push_back(setting);
|
||||
else
|
||||
{
|
||||
/// Do not ovewrite setting that was passed for INSERT
|
||||
/// Do not overwrite setting that was passed for INSERT
|
||||
/// by settings that was passed for SELECT
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ struct SettingChange;
|
||||
class SettingsChanges;
|
||||
|
||||
/// Pushdown SETTINGS clause that goes after FORMAT to the SELECT query:
|
||||
/// (since settings after FORMAT parsed separatelly not in the ParserSelectQuery but in ParserQueryWithOutput)
|
||||
/// (since settings after FORMAT parsed separately not in the ParserSelectQuery but in ParserQueryWithOutput)
|
||||
///
|
||||
/// SELECT 1 FORMAT Null SETTINGS max_block_size = 1 ->
|
||||
/// SELECT 1 SETTINGS max_block_size = 1 FORMAT Null SETTINGS max_block_size = 1
|
||||
|
198
src/Processors/PingPongProcessor.cpp
Normal file
198
src/Processors/PingPongProcessor.cpp
Normal file
@ -0,0 +1,198 @@
|
||||
#include <Processors/PingPongProcessor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Create list with `num_ports` of regular ports and 1 auxiliary port with empty header.
|
||||
template <typename T> requires std::is_same_v<T, InputPorts> || std::is_same_v<T, OutputPorts>
|
||||
static T createPortsWithSpecial(const Block & header, size_t num_ports)
|
||||
{
|
||||
T res(num_ports, header);
|
||||
res.emplace_back(Block());
|
||||
return res;
|
||||
}
|
||||
|
||||
PingPongProcessor::PingPongProcessor(const Block & header, size_t num_ports, Order order_)
|
||||
: IProcessor(createPortsWithSpecial<InputPorts>(header, num_ports),
|
||||
createPortsWithSpecial<OutputPorts>(header, num_ports))
|
||||
, aux_in_port(inputs.back())
|
||||
, aux_out_port(outputs.back())
|
||||
, order(order_)
|
||||
{
|
||||
assert(order == First || order == Second);
|
||||
|
||||
port_pairs.resize(num_ports);
|
||||
|
||||
auto input_it = inputs.begin();
|
||||
auto output_it = outputs.begin();
|
||||
for (size_t i = 0; i < num_ports; ++i)
|
||||
{
|
||||
port_pairs[i].input_port = &*input_it;
|
||||
++input_it;
|
||||
|
||||
port_pairs[i].output_port = &*output_it;
|
||||
++output_it;
|
||||
}
|
||||
}
|
||||
|
||||
void PingPongProcessor::finishPair(PortsPair & pair)
|
||||
{
|
||||
if (!pair.is_finished)
|
||||
{
|
||||
pair.output_port->finish();
|
||||
pair.input_port->close();
|
||||
|
||||
pair.is_finished = true;
|
||||
++num_finished_pairs;
|
||||
}
|
||||
}
|
||||
|
||||
bool PingPongProcessor::processPair(PortsPair & pair)
|
||||
{
|
||||
if (pair.output_port->isFinished())
|
||||
{
|
||||
finishPair(pair);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pair.input_port->isFinished())
|
||||
{
|
||||
finishPair(pair);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pair.output_port->canPush())
|
||||
{
|
||||
pair.input_port->setNotNeeded();
|
||||
return false;
|
||||
}
|
||||
|
||||
pair.input_port->setNeeded();
|
||||
if (pair.input_port->hasData())
|
||||
{
|
||||
Chunk chunk = pair.input_port->pull(true);
|
||||
ready_to_send |= consume(chunk);
|
||||
pair.output_port->push(std::move(chunk));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PingPongProcessor::isPairsFinished() const
|
||||
{
|
||||
return num_finished_pairs == port_pairs.size();
|
||||
}
|
||||
|
||||
IProcessor::Status PingPongProcessor::processRegularPorts()
|
||||
{
|
||||
if (isPairsFinished())
|
||||
return Status::Finished;
|
||||
|
||||
bool need_data = false;
|
||||
|
||||
for (auto & pair : port_pairs)
|
||||
need_data = processPair(pair) || need_data;
|
||||
|
||||
if (isPairsFinished())
|
||||
return Status::Finished;
|
||||
|
||||
if (need_data)
|
||||
return Status::NeedData;
|
||||
|
||||
return Status::PortFull;
|
||||
}
|
||||
|
||||
bool PingPongProcessor::sendPing()
|
||||
{
|
||||
if (aux_out_port.canPush())
|
||||
{
|
||||
Chunk chunk(aux_out_port.getHeader().cloneEmpty().getColumns(), 0);
|
||||
aux_out_port.push(std::move(chunk));
|
||||
is_send = true;
|
||||
aux_out_port.finish();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PingPongProcessor::recievePing()
|
||||
{
|
||||
if (aux_in_port.hasData())
|
||||
{
|
||||
aux_in_port.pull();
|
||||
is_received = true;
|
||||
aux_in_port.close();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PingPongProcessor::canSend() const
|
||||
{
|
||||
return !is_send && (ready_to_send || isPairsFinished());
|
||||
}
|
||||
|
||||
IProcessor::Status PingPongProcessor::prepare()
|
||||
{
|
||||
if (!set_needed_once && !is_received && !aux_in_port.isFinished())
|
||||
{
|
||||
set_needed_once = true;
|
||||
aux_in_port.setNeeded();
|
||||
}
|
||||
|
||||
if (order == First || is_send)
|
||||
{
|
||||
if (!is_received)
|
||||
{
|
||||
bool received = recievePing();
|
||||
if (!received)
|
||||
{
|
||||
return Status::NeedData;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (order == Second || is_received)
|
||||
{
|
||||
if (!is_send && canSend())
|
||||
{
|
||||
bool sent = sendPing();
|
||||
if (!sent)
|
||||
return Status::PortFull;
|
||||
}
|
||||
}
|
||||
|
||||
auto status = processRegularPorts();
|
||||
if (status == Status::Finished)
|
||||
{
|
||||
if (order == First || is_send)
|
||||
{
|
||||
if (!is_received)
|
||||
{
|
||||
bool received = recievePing();
|
||||
if (!received)
|
||||
{
|
||||
return Status::NeedData;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (order == Second || is_received)
|
||||
{
|
||||
if (!is_send && canSend())
|
||||
{
|
||||
bool sent = sendPing();
|
||||
if (!sent)
|
||||
return Status::PortFull;
|
||||
}
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
std::pair<InputPort *, OutputPort *> PingPongProcessor::getAuxPorts()
|
||||
{
|
||||
return std::make_pair(&aux_in_port, &aux_out_port);
|
||||
}
|
||||
|
||||
}
|
105
src/Processors/PingPongProcessor.h
Normal file
105
src/Processors/PingPongProcessor.h
Normal file
@ -0,0 +1,105 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <base/unit.h>
|
||||
#include <Processors/Chunk.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* Processor with N inputs and N outputs. Moves data from i-th input to i-th output as is.
|
||||
* It has a pair of auxiliary ports to notify another instance by sending empty chunk after some condition holds.
|
||||
* You should use this processor in pair of instances and connect auxiliary ports crosswise.
|
||||
*
|
||||
* ╭─┴───┴───┴───┴───┴─╮ ╭─┴───┴───┴───┴───┴─╮
|
||||
* │ ├─ aux ⟶│ │
|
||||
* │ PingPongProcessor │ │ PingPongProcessor │
|
||||
* │ │⟵ aux ─┤ │
|
||||
* ╰─┬───┬───┬───┬───┬─╯ ╰─┬───┬───┬───┬───┬─╯
|
||||
*
|
||||
* One of the processors starts processing data, and another waits for notification.
|
||||
* When `consume` returns true, the first stops processing, sends a ping to another and waits for notification.
|
||||
* After that, the second one also processes data until `consume`, then send a notification back to the first one.
|
||||
* After this roundtrip, processors bypass data from regular inputs to outputs.
|
||||
*/
|
||||
class PingPongProcessor : public IProcessor
|
||||
{
|
||||
public:
|
||||
enum class Order : uint8_t
|
||||
{
|
||||
/// Processor that starts processing data.
|
||||
First,
|
||||
/// Processor that waits for notification.
|
||||
Second,
|
||||
};
|
||||
|
||||
using enum Order;
|
||||
|
||||
PingPongProcessor(const Block & header, size_t num_ports, Order order_);
|
||||
|
||||
Status prepare() override;
|
||||
|
||||
std::pair<InputPort *, OutputPort *> getAuxPorts();
|
||||
|
||||
/// Returns `true` when enough data consumed
|
||||
virtual bool consume(const Chunk & chunk) = 0;
|
||||
|
||||
protected:
|
||||
struct PortsPair
|
||||
{
|
||||
InputPort * input_port = nullptr;
|
||||
OutputPort * output_port = nullptr;
|
||||
bool is_finished = false;
|
||||
};
|
||||
|
||||
bool sendPing();
|
||||
bool recievePing();
|
||||
bool canSend() const;
|
||||
|
||||
bool isPairsFinished() const;
|
||||
bool processPair(PortsPair & pair);
|
||||
void finishPair(PortsPair & pair);
|
||||
Status processRegularPorts();
|
||||
|
||||
std::vector<PortsPair> port_pairs;
|
||||
size_t num_finished_pairs = 0;
|
||||
|
||||
InputPort & aux_in_port;
|
||||
OutputPort & aux_out_port;
|
||||
|
||||
bool is_send = false;
|
||||
bool is_received = false;
|
||||
|
||||
bool ready_to_send = false;
|
||||
|
||||
/// Used to set 'needed' flag once for auxiliary input at first `prepare` call.
|
||||
bool set_needed_once = false;
|
||||
|
||||
Order order;
|
||||
};
|
||||
|
||||
/// Reads first N rows from two streams evenly.
|
||||
class ReadHeadBalancedProcessor : public PingPongProcessor
|
||||
{
|
||||
public:
|
||||
ReadHeadBalancedProcessor(const Block & header, size_t num_ports, size_t size_to_wait_, Order order_)
|
||||
: PingPongProcessor(header, num_ports, order_) , data_consumed(0) , size_to_wait(size_to_wait_)
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return "ReadHeadBalancedProcessor"; }
|
||||
|
||||
bool consume(const Chunk & chunk) override
|
||||
{
|
||||
data_consumed += chunk.getNumRows();
|
||||
return data_consumed > size_to_wait;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t data_consumed;
|
||||
size_t size_to_wait;
|
||||
};
|
||||
|
||||
}
|
@ -8,18 +8,18 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
void connect(OutputPort & output, InputPort & input)
|
||||
void connect(OutputPort & output, InputPort & input, bool reconnect)
|
||||
{
|
||||
if (input.state)
|
||||
if (!reconnect && input.state)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Port is already connected, (header: [{}])", input.header.dumpStructure());
|
||||
|
||||
if (output.state)
|
||||
if (!reconnect && output.state)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Port is already connected, (header: [{}])", output.header.dumpStructure());
|
||||
|
||||
auto out_name = output.getProcessor().getName();
|
||||
auto in_name = input.getProcessor().getName();
|
||||
auto out_name = output.processor ? output.getProcessor().getName() : "null";
|
||||
auto in_name = input.processor ? input.getProcessor().getName() : "null";
|
||||
|
||||
assertCompatibleHeader(output.getHeader(), input.getHeader(), fmt::format(" function connect between {} and {}", out_name, in_name));
|
||||
assertCompatibleHeader(output.getHeader(), input.getHeader(), fmt::format("function connect between {} and {}", out_name, in_name));
|
||||
|
||||
input.output_port = &output;
|
||||
output.input_port = &input;
|
||||
|
@ -25,7 +25,7 @@ namespace ErrorCodes
|
||||
|
||||
class Port
|
||||
{
|
||||
friend void connect(OutputPort &, InputPort &);
|
||||
friend void connect(OutputPort &, InputPort &, bool);
|
||||
friend class IProcessor;
|
||||
|
||||
public:
|
||||
@ -267,7 +267,7 @@ protected:
|
||||
/// * You can pull only if port hasData().
|
||||
class InputPort : public Port
|
||||
{
|
||||
friend void connect(OutputPort &, InputPort &);
|
||||
friend void connect(OutputPort &, InputPort &, bool);
|
||||
|
||||
private:
|
||||
OutputPort * output_port = nullptr;
|
||||
@ -390,7 +390,7 @@ public:
|
||||
/// * You can push only if port doesn't hasData().
|
||||
class OutputPort : public Port
|
||||
{
|
||||
friend void connect(OutputPort &, InputPort &);
|
||||
friend void connect(OutputPort &, InputPort &, bool);
|
||||
|
||||
private:
|
||||
InputPort * input_port = nullptr;
|
||||
@ -483,6 +483,6 @@ using InputPorts = std::list<InputPort>;
|
||||
using OutputPorts = std::list<OutputPort>;
|
||||
|
||||
|
||||
void connect(OutputPort & output, InputPort & input);
|
||||
void connect(OutputPort & output, InputPort & input, bool reconnect = false);
|
||||
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
|
||||
transform_params->params.min_free_disk_space,
|
||||
transform_params->params.compile_aggregate_expressions,
|
||||
transform_params->params.min_count_to_compile_aggregate_expression,
|
||||
transform_params->params.max_block_size,
|
||||
/* only_merge */ false,
|
||||
transform_params->params.stats_collecting_params};
|
||||
auto transform_params_for_set = std::make_shared<AggregatingTransformParams>(src_header, std::move(params_for_set), final);
|
||||
@ -376,16 +377,15 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
|
||||
});
|
||||
|
||||
/// We add the explicit resize here, but not in case of aggregating in order, since AIO don't use two-level hash tables and thus returns only buckets with bucket_number = -1.
|
||||
pipeline.resize(should_produce_results_in_order_of_bucket_number ? 1 : pipeline.getNumStreams(), true /* force */);
|
||||
pipeline.resize(should_produce_results_in_order_of_bucket_number ? 1 : params.max_threads, true /* force */);
|
||||
|
||||
aggregating = collector.detachProcessors(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
pipeline.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<AggregatingTransform>(header, transform_params);
|
||||
});
|
||||
pipeline.addSimpleTransform([&](const Block & header) { return std::make_shared<AggregatingTransform>(header, transform_params); });
|
||||
|
||||
pipeline.resize(should_produce_results_in_order_of_bucket_number ? 1 : params.max_threads, false /* force */);
|
||||
|
||||
aggregating = collector.detachProcessors(0);
|
||||
}
|
||||
|
205
src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp
Normal file
205
src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp
Normal file
@ -0,0 +1,205 @@
|
||||
#include <Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h>
|
||||
#include <Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.h>
|
||||
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
#include <Core/ColumnWithTypeAndName.h>
|
||||
#include <Core/ColumnsWithTypeAndName.h>
|
||||
#include <Processors/IProcessor.h>
|
||||
#include <Processors/PingPongProcessor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static void connectAllInputs(OutputPortRawPtrs ports, InputPorts & inputs, size_t num_ports)
|
||||
{
|
||||
auto input_it = inputs.begin();
|
||||
for (size_t i = 0; i < num_ports; ++i)
|
||||
{
|
||||
connect(*ports[i], *input_it);
|
||||
input_it++;
|
||||
}
|
||||
}
|
||||
|
||||
static ColumnsWithTypeAndName getColumnSubset(const Block & block, const Names & column_names)
|
||||
{
|
||||
ColumnsWithTypeAndName result;
|
||||
for (const auto & name : column_names)
|
||||
result.emplace_back(block.getByName(name));
|
||||
return result;
|
||||
}
|
||||
|
||||
static ITransformingStep::Traits getTraits()
|
||||
{
|
||||
return ITransformingStep::Traits
|
||||
{
|
||||
{
|
||||
.preserves_distinct_columns = true,
|
||||
.returns_single_stream = false,
|
||||
.preserves_number_of_streams = true,
|
||||
.preserves_sorting = true,
|
||||
},
|
||||
{
|
||||
.preserves_number_of_rows = false,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
class CreateSetAndFilterOnTheFlyStep::CrosswiseConnection : public boost::noncopyable
|
||||
{
|
||||
public:
|
||||
using PortPair = std::pair<InputPort *, OutputPort *>;
|
||||
|
||||
/// Remember ports passed on the first call and connect with ones from second call.
|
||||
/// Thread-safe.
|
||||
void connectPorts(PortPair rhs_ports, IProcessor * proc)
|
||||
{
|
||||
assert(!rhs_ports.first->isConnected() && !rhs_ports.second->isConnected());
|
||||
|
||||
std::lock_guard<std::mutex> lock(mux);
|
||||
if (input_port || output_port)
|
||||
{
|
||||
assert(input_port && output_port);
|
||||
assert(!input_port->isConnected());
|
||||
connect(*rhs_ports.second, *input_port);
|
||||
connect(*output_port, *rhs_ports.first, /* reconnect= */ true);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::tie(input_port, output_port) = rhs_ports;
|
||||
assert(input_port && output_port);
|
||||
assert(!input_port->isConnected() && !output_port->isConnected());
|
||||
|
||||
dummy_input_port = std::make_unique<InputPort>(output_port->getHeader(), proc);
|
||||
connect(*output_port, *dummy_input_port);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mux;
|
||||
InputPort * input_port = nullptr;
|
||||
OutputPort * output_port = nullptr;
|
||||
|
||||
/// Output ports should always be connected, and we can't add a step to the pipeline without them.
|
||||
/// So, connect the port from the first processor to this dummy port and then reconnect to the second processor.
|
||||
std::unique_ptr<InputPort> dummy_input_port;
|
||||
};
|
||||
|
||||
CreateSetAndFilterOnTheFlyStep::CrosswiseConnectionPtr CreateSetAndFilterOnTheFlyStep::createCrossConnection()
|
||||
{
|
||||
return std::make_shared<CreateSetAndFilterOnTheFlyStep::CrosswiseConnection>();
|
||||
}
|
||||
|
||||
CreateSetAndFilterOnTheFlyStep::CreateSetAndFilterOnTheFlyStep(
|
||||
const DataStream & input_stream_,
|
||||
const Names & column_names_,
|
||||
size_t max_rows_in_set_,
|
||||
CrosswiseConnectionPtr crosswise_connection_,
|
||||
JoinTableSide position_)
|
||||
: ITransformingStep(input_stream_, input_stream_.header, getTraits())
|
||||
, column_names(column_names_)
|
||||
, max_rows_in_set(max_rows_in_set_)
|
||||
, own_set(std::make_shared<SetWithState>(SizeLimits(max_rows_in_set, 0, OverflowMode::BREAK), false, true))
|
||||
, filtering_set(nullptr)
|
||||
, crosswise_connection(crosswise_connection_)
|
||||
, position(position_)
|
||||
{
|
||||
if (crosswise_connection == nullptr)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Crosswise connection is not initialized");
|
||||
|
||||
if (input_streams.size() != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Step requires exactly one input stream, got {}", input_streams.size());
|
||||
|
||||
own_set->setHeader(getColumnSubset(input_streams[0].header, column_names));
|
||||
}
|
||||
|
||||
void CreateSetAndFilterOnTheFlyStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||
{
|
||||
size_t num_streams = pipeline.getNumStreams();
|
||||
pipeline.addSimpleTransform([this, num_streams](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr
|
||||
{
|
||||
if (stream_type != QueryPipelineBuilder::StreamType::Main)
|
||||
return nullptr;
|
||||
auto res = std::make_shared<CreatingSetsOnTheFlyTransform>(header, column_names, num_streams, own_set);
|
||||
res->setDescription(this->getStepDescription());
|
||||
return res;
|
||||
});
|
||||
|
||||
Block input_header = pipeline.getHeader();
|
||||
auto pipeline_transform = [&input_header, this](OutputPortRawPtrs ports)
|
||||
{
|
||||
Processors result_transforms;
|
||||
|
||||
size_t num_ports = ports.size();
|
||||
|
||||
/// Add balancing transform
|
||||
auto idx = position == JoinTableSide::Left ? PingPongProcessor::First : PingPongProcessor::Second;
|
||||
auto stream_balancer = std::make_shared<ReadHeadBalancedProcessor>(input_header, num_ports, max_rows_in_set, idx);
|
||||
stream_balancer->setDescription(getStepDescription());
|
||||
|
||||
/// Regular inputs just bypass data for respective ports
|
||||
connectAllInputs(ports, stream_balancer->getInputs(), num_ports);
|
||||
|
||||
/// Connect auxiliary ports
|
||||
crosswise_connection->connectPorts(stream_balancer->getAuxPorts(), stream_balancer.get());
|
||||
|
||||
if (!filtering_set)
|
||||
{
|
||||
LOG_DEBUG(log, "Skip filtering {} stream", position);
|
||||
result_transforms.emplace_back(std::move(stream_balancer));
|
||||
return result_transforms;
|
||||
}
|
||||
|
||||
/// Add filtering transform, ports just connected respectively
|
||||
auto & outputs = stream_balancer->getOutputs();
|
||||
auto output_it = outputs.begin();
|
||||
for (size_t i = 0; i < outputs.size() - 1; ++i)
|
||||
{
|
||||
auto & port = *output_it++;
|
||||
auto transform = std::make_shared<FilterBySetOnTheFlyTransform>(port.getHeader(), column_names, filtering_set);
|
||||
transform->setDescription(this->getStepDescription());
|
||||
connect(port, transform->getInputPort());
|
||||
result_transforms.emplace_back(std::move(transform));
|
||||
}
|
||||
assert(output_it == std::prev(outputs.end()));
|
||||
result_transforms.emplace_back(std::move(stream_balancer));
|
||||
|
||||
return result_transforms;
|
||||
};
|
||||
|
||||
/// Auxiliary port stream_balancer can be connected later (by crosswise_connection).
|
||||
/// So, use unsafe `transform` with `check_ports = false` to avoid assertions
|
||||
pipeline.transform(std::move(pipeline_transform), /* check_ports= */ false);
|
||||
}
|
||||
|
||||
void CreateSetAndFilterOnTheFlyStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
{
|
||||
map.add(getName(), true);
|
||||
}
|
||||
|
||||
void CreateSetAndFilterOnTheFlyStep::describeActions(FormatSettings & settings) const
|
||||
{
|
||||
String prefix(settings.offset, ' ');
|
||||
settings.out << prefix << getName();
|
||||
|
||||
settings.out << '\n';
|
||||
}
|
||||
|
||||
void CreateSetAndFilterOnTheFlyStep::updateOutputStream()
|
||||
{
|
||||
if (input_streams.size() != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{} requires exactly one input stream, got {}", getName(), input_streams.size());
|
||||
|
||||
own_set->setHeader(getColumnSubset(input_streams[0].header, column_names));
|
||||
|
||||
output_stream = input_streams[0];
|
||||
}
|
||||
|
||||
|
||||
}
|
59
src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h
Normal file
59
src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h
Normal file
@ -0,0 +1,59 @@
|
||||
#pragma once
|
||||
#include <Processors/QueryPlan/ITransformingStep.h>
|
||||
#include <Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.h>
|
||||
#include <Processors/DelayedPortsProcessor.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* Used to optimize JOIN when joining a small table over a large table.
|
||||
* Currently applied only for the full sorting join.
|
||||
* It tries to build a set for each stream.
|
||||
* Once one stream is finished, it starts to filter another stream with this set.
|
||||
*/
|
||||
class CreateSetAndFilterOnTheFlyStep : public ITransformingStep
|
||||
{
|
||||
public:
|
||||
/// Two instances of step need some shared state to connect processors crosswise
|
||||
class CrosswiseConnection;
|
||||
using CrosswiseConnectionPtr = std::shared_ptr<CrosswiseConnection>;
|
||||
static CrosswiseConnectionPtr createCrossConnection();
|
||||
|
||||
CreateSetAndFilterOnTheFlyStep(
|
||||
const DataStream & input_stream_,
|
||||
const Names & column_names_,
|
||||
size_t max_rows_in_set_,
|
||||
CrosswiseConnectionPtr crosswise_connection_,
|
||||
JoinTableSide position_);
|
||||
|
||||
String getName() const override { return "CreateSetAndFilterOnTheFlyStep"; }
|
||||
void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) override;
|
||||
|
||||
void describeActions(JSONBuilder::JSONMap & map) const override;
|
||||
void describeActions(FormatSettings & settings) const override;
|
||||
|
||||
SetWithStatePtr getSet() const { return own_set; }
|
||||
|
||||
/// Set for another stream.
|
||||
void setFiltering(SetWithStatePtr filtering_set_) { filtering_set = filtering_set_; }
|
||||
|
||||
private:
|
||||
void updateOutputStream() override;
|
||||
|
||||
Names column_names;
|
||||
|
||||
size_t max_rows_in_set;
|
||||
|
||||
SetWithStatePtr own_set;
|
||||
SetWithStatePtr filtering_set;
|
||||
|
||||
CrosswiseConnectionPtr crosswise_connection;
|
||||
|
||||
JoinTableSide position;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("CreateSetAndFilterOnTheFlyStep");
|
||||
};
|
||||
|
||||
}
|
@ -108,7 +108,7 @@ void DistinctStep::transformPipeline(QueryPipelineBuilder & pipeline, const Buil
|
||||
return;
|
||||
}
|
||||
/// final distinct for sorted stream (sorting inside and among chunks)
|
||||
if (input_stream.sort_mode == DataStream::SortMode::Stream)
|
||||
if (input_stream.sort_scope == DataStream::SortScope::Global)
|
||||
{
|
||||
assert(input_stream.has_single_port);
|
||||
|
||||
|
@ -31,18 +31,18 @@ public:
|
||||
/// QueryPipeline has single port. Totals or extremes ports are not counted.
|
||||
bool has_single_port = false;
|
||||
|
||||
/// How data is sorted.
|
||||
enum class SortMode
|
||||
/// Sorting scope
|
||||
enum class SortScope
|
||||
{
|
||||
None,
|
||||
Chunk, /// Separate chunks are sorted
|
||||
Port, /// Data from each port is sorted
|
||||
Stream, /// Data is globally sorted
|
||||
Stream, /// Each data steam is sorted
|
||||
Global, /// Data is globally sorted
|
||||
};
|
||||
|
||||
/// It is not guaranteed that header has columns from sort_description.
|
||||
SortDescription sort_description = {};
|
||||
SortMode sort_mode = SortMode::None;
|
||||
SortScope sort_scope = SortScope::None;
|
||||
|
||||
/// Things which may be added:
|
||||
/// * limit
|
||||
@ -54,7 +54,7 @@ public:
|
||||
return distinct_columns == other.distinct_columns
|
||||
&& has_single_port == other.has_single_port
|
||||
&& sort_description == other.sort_description
|
||||
&& (sort_description.empty() || sort_mode == other.sort_mode);
|
||||
&& (sort_description.empty() || sort_scope == other.sort_scope);
|
||||
}
|
||||
|
||||
bool hasEqualHeaderWith(const DataStream & other) const
|
||||
|
@ -29,7 +29,7 @@ DataStream ITransformingStep::createOutputStream(
|
||||
if (stream_traits.preserves_sorting)
|
||||
{
|
||||
output_stream.sort_description = input_stream.sort_description;
|
||||
output_stream.sort_mode = input_stream.sort_mode;
|
||||
output_stream.sort_scope = input_stream.sort_scope;
|
||||
}
|
||||
|
||||
return output_stream;
|
||||
|
@ -34,8 +34,12 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "JoinStep expect two input steps");
|
||||
|
||||
if (join->pipelineType() == JoinPipelineType::YShaped)
|
||||
return QueryPipelineBuilder::joinPipelinesYShaped(
|
||||
{
|
||||
auto joined_pipeline = QueryPipelineBuilder::joinPipelinesYShaped(
|
||||
std::move(pipelines[0]), std::move(pipelines[1]), join, output_stream->header, max_block_size, &processors);
|
||||
joined_pipeline->resize(max_streams);
|
||||
return joined_pipeline;
|
||||
}
|
||||
|
||||
return QueryPipelineBuilder::joinPipelinesRightLeft(
|
||||
std::move(pipelines[0]),
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
|
||||
#include <Processors/QueryPlan/ITransformingStep.h>
|
||||
#include <Processors/QueryPlan/FilterStep.h>
|
||||
#include <Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.h>
|
||||
#include <Processors/QueryPlan/AggregatingStep.h>
|
||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||
#include <Processors/QueryPlan/JoinStep.h>
|
||||
@ -22,6 +23,7 @@
|
||||
#include <Interpreters/ActionsDAG.h>
|
||||
#include <Interpreters/ArrayJoinAction.h>
|
||||
#include <Interpreters/TableJoin.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
@ -134,10 +136,24 @@ tryAddNewFilterStep(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, con
|
||||
|
||||
static size_t
|
||||
tryAddNewFilterStep(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, const Names & allowed_inputs,
|
||||
bool can_remove_filter = true)
|
||||
bool can_remove_filter = true, size_t child_idx = 0)
|
||||
{
|
||||
if (auto split_filter = splitFilter(parent_node, allowed_inputs, 0))
|
||||
return tryAddNewFilterStep(parent_node, nodes, split_filter, can_remove_filter, 0);
|
||||
if (auto split_filter = splitFilter(parent_node, allowed_inputs, child_idx))
|
||||
return tryAddNewFilterStep(parent_node, nodes, split_filter, can_remove_filter, child_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/// Push down filter through specified type of step
|
||||
template <typename Step>
|
||||
static size_t simplePushDownOverStep(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes, QueryPlanStepPtr & child)
|
||||
{
|
||||
if (typeid_cast<Step *>(child.get()))
|
||||
{
|
||||
Names allowed_inputs = child->getOutputStream().header.getNames();
|
||||
if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs))
|
||||
return updated_steps;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -234,12 +250,8 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
|
||||
return updated_steps;
|
||||
}
|
||||
|
||||
if (auto * distinct = typeid_cast<DistinctStep *>(child.get()))
|
||||
{
|
||||
Names allowed_inputs = distinct->getOutputStream().header.getNames();
|
||||
if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs))
|
||||
return updated_steps;
|
||||
}
|
||||
if (auto updated_steps = simplePushDownOverStep<DistinctStep>(parent_node, nodes, child))
|
||||
return updated_steps;
|
||||
|
||||
if (auto * join = typeid_cast<JoinStep *>(child.get()))
|
||||
{
|
||||
@ -290,7 +302,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
|
||||
const size_t updated_steps = tryAddNewFilterStep(parent_node, nodes, split_filter, can_remove_filter, child_idx);
|
||||
if (updated_steps > 0)
|
||||
{
|
||||
LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter to {} side of join", kind);
|
||||
LOG_DEBUG(&Poco::Logger::get("QueryPlanOptimizations"), "Pushed down filter {} to the {} side of join", split_filter_column_name, kind);
|
||||
}
|
||||
return updated_steps;
|
||||
};
|
||||
@ -321,12 +333,11 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes
|
||||
// {
|
||||
// }
|
||||
|
||||
if (typeid_cast<SortingStep *>(child.get()))
|
||||
{
|
||||
Names allowed_inputs = child->getOutputStream().header.getNames();
|
||||
if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs))
|
||||
return updated_steps;
|
||||
}
|
||||
if (auto updated_steps = simplePushDownOverStep<SortingStep>(parent_node, nodes, child))
|
||||
return updated_steps;
|
||||
|
||||
if (auto updated_steps = simplePushDownOverStep<CreateSetAndFilterOnTheFlyStep>(parent_node, nodes, child))
|
||||
return updated_steps;
|
||||
|
||||
if (auto * union_step = typeid_cast<UnionStep *>(child.get()))
|
||||
{
|
||||
|
@ -333,8 +333,8 @@ static void explainStep(
|
||||
{
|
||||
if (step.hasOutputStream())
|
||||
{
|
||||
settings.out << prefix << "Sorting (" << step.getOutputStream().sort_mode << ")";
|
||||
if (step.getOutputStream().sort_mode != DataStream::SortMode::None)
|
||||
settings.out << prefix << "Sorting (" << step.getOutputStream().sort_scope << ")";
|
||||
if (step.getOutputStream().sort_scope != DataStream::SortScope::None)
|
||||
{
|
||||
settings.out << ": ";
|
||||
dumpSortDescription(step.getOutputStream().sort_description, settings.out);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user