Merge branch 'master' of github.com:ClickHouse/ClickHouse into query_plan_for_merge

This commit is contained in:
Alexander Gololobov 2024-09-10 18:27:59 +02:00
commit bb82465a5f
215 changed files with 3760 additions and 1091 deletions

31
CITATION.cff Normal file
View File

@ -0,0 +1,31 @@
# This CITATION.cff file was generated with cffinit.
cff-version: 1.2.0
title: "ClickHouse"
message: "If you use this software, please cite it as below."
type: software
authors:
- family-names: "Milovidov"
given-names: "Alexey"
repository-code: 'https://github.com/ClickHouse/ClickHouse'
url: 'https://clickhouse.com'
license: Apache-2.0
preferred-citation:
type: article
authors:
- family-names: "Schulze"
given-names: "Robert"
- family-names: "Schreiber"
given-names: "Tom"
- family-names: "Yatsishin"
given-names: "Ilya"
- family-names: "Dahimene"
given-names: "Ryadh"
- family-names: "Milovidov"
given-names: "Alexey"
journal: "Proceedings of the VLDB Endowment"
title: "ClickHouse - Lightning Fast Analytics for Everyone"
year: 2024
volume: 17
issue: 12
doi: 10.14778/3685800.3685802

View File

@ -42,21 +42,19 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
@ -64,7 +62,13 @@ Other upcoming meetups
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
Recently completed events
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

2
contrib/curl vendored

@ -1 +1 @@
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e

View File

@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/appendable.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/serv.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servlk.cpp"
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp")
set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM)

2
contrib/libarchive vendored

@ -1 +1 @@
Subproject commit ee45796171324519f0c0bfd012018dd099296336
Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543

View File

@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib)
endif()
if (TARGET ch_contrib::zstd)
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1)
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1)
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
endif()

View File

@ -334,13 +334,16 @@ typedef uint64_t uintmax_t;
/* #undef ARCHIVE_XATTR_LINUX */
/* Version number of bsdcpio */
#define BSDCPIO_VERSION_STRING "3.7.0"
#define BSDCPIO_VERSION_STRING "3.7.4"
/* Version number of bsdtar */
#define BSDTAR_VERSION_STRING "3.7.0"
#define BSDTAR_VERSION_STRING "3.7.4"
/* Version number of bsdcat */
#define BSDCAT_VERSION_STRING "3.7.0"
#define BSDCAT_VERSION_STRING "3.7.4"
/* Version number of bsdunzip */
#define BSDUNZIP_VERSION_STRING "3.7.4"
/* Define to 1 if you have the `acl_create_entry' function. */
/* #undef HAVE_ACL_CREATE_ENTRY */
@ -642,8 +645,8 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `getgrnam_r' function. */
#define HAVE_GETGRNAM_R 1
/* Define to 1 if platform uses `optreset` to reset `getopt` */
#define HAVE_GETOPT_OPTRESET 1
/* Define to 1 if you have the `getline' function. */
#define HAVE_GETLINE 1
/* Define to 1 if you have the `getpid' function. */
#define HAVE_GETPID 1
@ -750,6 +753,12 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
/* #undef HAVE_LIBPCREPOSIX */
/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */
/* #undef HAVE_LIBPCRE2 */
/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */
/* #undef HAVE_LIBPCRE2POSIX */
/* Define to 1 if you have the `xml2' library (-lxml2). */
#define HAVE_LIBXML2 1
@ -765,9 +774,8 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `zstd' library (-lzstd). */
/* #undef HAVE_LIBZSTD */
/* Define to 1 if you have the `zstd' library (-lzstd) with compression
support. */
/* #undef HAVE_LIBZSTD_COMPRESSOR */
/* Define to 1 if you have the ZSTD_compressStream function. */
/* #undef HAVE_ZSTD_compressStream */
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
@ -923,6 +931,9 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the <pcreposix.h> header file. */
/* #undef HAVE_PCREPOSIX_H */
/* Define to 1 if you have the <pcre2posix.h> header file. */
/* #undef HAVE_PCRE2POSIX_H */
/* Define to 1 if you have the `pipe' function. */
#define HAVE_PIPE 1
@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `strrchr' function. */
#define HAVE_STRRCHR 1
/* Define to 1 if the system has the type `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS */
/* Define to 1 if `f_iosize' is a member of `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t;
/* Define to 1 if you have the `symlink' function. */
#define HAVE_SYMLINK 1
/* Define to 1 if you have the `sysconf' function. */
#define HAVE_SYSCONF 1
/* Define to 1 if you have the <sys/acl.h> header file. */
/* #undef HAVE_SYS_ACL_H */
@ -1276,10 +1296,10 @@ typedef uint64_t uintmax_t;
#define ICONV_CONST
/* Version number of libarchive as a single integer */
#define LIBARCHIVE_VERSION_NUMBER "3007000"
#define LIBARCHIVE_VERSION_NUMBER "3007004"
/* Version number of libarchive */
#define LIBARCHIVE_VERSION_STRING "3.7.0"
#define LIBARCHIVE_VERSION_STRING "3.7.4"
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t;
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
/* Version number of package */
#define VERSION "3.7.0"
#define VERSION "3.7.4"
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */

2
contrib/libuv vendored

@ -1 +1 @@
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e

View File

@ -10,6 +10,7 @@ set(uv_sources
src/random.c
src/strscpy.c
src/strtok.c
src/thread-common.c
src/threadpool.c
src/timer.c
src/uv-common.c
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries rt)
list(APPEND uv_sources
src/unix/epoll.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/linux.c
src/unix/procfs-exepath.c
src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c)

2
contrib/openssl vendored

@ -1 +1 @@
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.3.59"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.3.59"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.8.3.59"
ARG VERSION="24.8.4.13"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -3,6 +3,8 @@
FROM alpine:3.18
RUN apk add --no-cache -U iproute2 \
&& for bin in iptables iptables-restore iptables-save; \
&& for bin in \
iptables iptables-restore iptables-save \
ip6tables ip6tables-restore ip6tables-save; \
do ln -sf xtables-nft-multi "/sbin/$bin"; \
done

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,16 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).

View File

@ -0,0 +1,22 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e)
#### Improvement
* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)).

View File

@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a;
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
#### Available Types of Column Statistics {#available-types-of-column-statistics}
### Available Types of Column Statistics {#available-types-of-column-statistics}
- `MinMax`
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
Syntax: `minmax`
- `TDigest`
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
Syntax: `tdigest`
- `Uniq`
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
- `count_min`
Syntax: `uniq`
- `CountMin`
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
Syntax `countmin`
### Supported Data Types {#supported-data-types}
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|-----------|----------------------------------------------------|-----------------------|
| CountMin | ✔ | ✔ |
| MinMax | ✔ | ✗ |
| TDigest | ✔ | ✗ |
| Uniq | ✔ | ✔ |
### Supported Operations {#supported-operations}
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|-----------|-----------------------|------------------------------|
| CountMin | ✔ | ✗ |
| MinMax | ✗ | ✔ |
| TDigest | ✗ | ✔ |
| Uniq | ✔ | ✗ |
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
## Column-level Settings {#column-level-settings}

View File

@ -39,6 +39,7 @@ The supported formats are:
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
@ -988,6 +989,59 @@ Example:
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
## JSONCompactWithProgress (#jsoncompactwithprogress)
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
Each row is either a metadata object, data object, progress information or statistics object:
1. **Metadata Object (`meta`)**
- Describes the structure of the data rows.
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
- Appears before any data objects.
2. **Data Object (`data`)**
- Represents a row of query results.
- Fields: An array with values corresponding to the columns defined in the metadata.
- Example: `{"data":["1", "John Doe"]}`
- Appears after the metadata object, one per row.
3. **Progress Information Object (`progress`)**
- Provides real-time progress feedback during query execution.
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
- May appear intermittently.
4. **Statistics Object (`statistics`)**
- Summarizes query execution statistics.
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
- Appears at the end.
5. **Exception Object (`exception`)**
- Represents an error that occurred during query execution.
- Fields: A single text field containing the error message.
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
- Appears when an error is encountered.
6. **Totals Object (`totals`)**
- Provides the totals for each numeric column in the result set.
- Fields: An array with total values corresponding to the columns defined in the metadata.
- Example: `{"totals": ["", "3"]}`
- Appears at the end of the data rows, if applicable.
Example:
```json
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
{"data":["1", "John Doe"]}
{"data":["2", "Joe Doe"]}
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
```
## JSONEachRow {#jsoneachrow}
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.

View File

@ -233,6 +233,16 @@ Features:
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
### CKibana {#ckibana}
[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI.
Features:
- Translates chart requests from the native Kibana UI into ClickHouse query syntax.
- Supports advanced features such as sampling and caching to enhance query performance.
- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse.
## Commercial {#commercial}
### DataGrip {#datagrip}

View File

@ -1463,26 +1463,29 @@ Examples:
## logger {#logger}
Logging settings.
The location and format of log messages.
Keys:
- `level` Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
- `log` The log file. Contains all the entries according to `level`.
- `errorlog` Error log file.
- `size` Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
- `count` The number of archived log files that ClickHouse stores.
- `console` Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
- `console_log_level` Logging level for console. Default to `level`.
- `use_syslog` - Log to syslog as well.
- `syslog_level` - Logging level for logging to syslog.
- `stream_compress` Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
- `formatting` Specify log format to be printed in console log (currently only `json` supported).
- `level` Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
`debug`, `trace`, `test`
- `log` The path to the log file.
- `errorlog` The path to the error log file.
- `size` Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
- `count` Rotation policy: How many historical log files Clickhouse are kept at most.
- `stream_compress` Compress log messages using LZ4. Set to `1` or `true` to enable.
- `console` Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
- `console_log_level` Log level for console output. Defaults to `level`.
- `formatting` Log format for console output. Currently, only `json` is supported).
- `use_syslog` - Also forward log output to syslog.
- `syslog_level` - Log level for logging to syslog.
Both log and error log file names (only file names, not directories) support date and time format specifiers.
**Log format specifiers**
**Format specifiers**
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
Column “Example” shows the output at `2023-07-06 18:32:07`.
| Specifier | Description | Example |
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin
</logger>
```
Writing to the console can be configured. Config example:
To print log messages only in the console:
``` xml
<logger>
<level>information</level>
<console>1</console>
<console>true</console>
</logger>
```
**Per-level Overrides**
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
```xml
<logger>
<levels>
<logger>
<name>Backup</name>
<level>none</level>
</logger>
<logger>
<name>RBAC</name>
<level>none</level>
</logger>
</levels>
</logger>
```
### syslog
Writing to the syslog is also supported. Config example:
To write log messages additionally to syslog:
``` xml
<logger>
@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example:
</logger>
```
Keys for syslog:
Keys for `<syslog>`:
- use_syslog — Required setting if you want to write to the syslog.
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
- hostname — Optional. The name of the host that logs are sent from.
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- format Message format. Possible values: `bsd` and `syslog.`
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
- `hostname` — The name of the host from which logs are send. Optional.
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- `format` Log message format. Possible values: `bsd` and `syslog.`
### Log formats
@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren
"source_line": "192"
}
```
To enable JSON logging support, use the following snippet:
```xml

View File

@ -47,6 +47,8 @@ keeper foo bar
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
- `cd '[path]'` -- Changes the working path (default `.`)
- `cp '<src>' '<dest>'` -- Copies 'src' node to 'dest' path
- `mv '<src>' '<dest>'` -- Moves 'src' node to the 'dest' path
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
- `create '<path>' <value> [mode]` -- Creates new node with the set value

View File

@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time:
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
**See Also**
**Syntax**
```sql
toStartOfInterval(value, INTERVAL x unit[, time_zone])
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
```
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
``` SQL
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
```
**See Also**
- [date_trunc](#date_trunc)
## toTime
Converts a date with time to a certain fixed date, while preserving the time.
**Syntax**
```sql
toTime(date[,timezone])
```
**Arguments**
- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md).
**Returned value**
- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md).
:::note
If the `date` input argument contained sub-second components,
they will be dropped in the returned `DateTime` value with second-accuracy.
:::
**Example**
Query:
```sql
SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result);
```
Result:
```response
┌──────────────result─┬─toTypeName(result)─┐
│ 1970-01-02 01:20:30 │ DateTime │
└─────────────────────┴────────────────────┘
```
## toRelativeYearNum
Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeYearNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeYearNum(toDate('2002-12-08')) AS y1,
toRelativeYearNum(toDate('2010-10-26')) AS y2
```
Result:
```response
┌───y1─┬───y2─┐
│ 2002 │ 2010 │
└──────┴──────┘
```
## toRelativeQuarterNum
Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeQuarterNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeQuarterNum(toDate('1993-11-25')) AS q1,
toRelativeQuarterNum(toDate('2005-01-05')) AS q2
```
Result:
```response
┌───q1─┬───q2─┐
│ 7975 │ 8020 │
└──────┴──────┘
```
## toRelativeMonthNum
Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeMonthNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeMonthNum(toDate('2001-04-25')) AS m1,
toRelativeMonthNum(toDate('2009-07-08')) AS m2
```
Result:
```response
┌────m1─┬────m2─┐
│ 24016 │ 24115 │
└───────┴───────┘
```
## toRelativeWeekNum
Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeWeekNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeWeekNum(toDate('2000-02-29')) AS w1,
toRelativeWeekNum(toDate('2001-01-12')) AS w2
```
Result:
```response
┌───w1─┬───w2─┐
│ 1574 │ 1619 │
└──────┴──────┘
```
## toRelativeDayNum
Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeDayNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeDayNum(toDate('1993-10-05')) AS d1,
toRelativeDayNum(toDate('2000-09-20')) AS d2
```
Result:
```response
┌───d1─┬────d2─┐
│ 8678 │ 11220 │
└──────┴───────┘
```
## toRelativeHourNum
Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeHourNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1,
toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2
```
Result:
```response
┌─────h1─┬─────h2─┐
│ 208276 │ 269292 │
└────────┴────────┘
```
## toRelativeMinuteNum
Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeMinuteNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1,
toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2
```
Result:
```response
┌───────m1─┬───────m2─┐
│ 12496580 │ 16157531 │
└──────────┴──────────┘
```
## toRelativeSecondNum
Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past.
Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past.
**Syntax**
```sql
toRelativeSecondNum(date)
```
**Arguments**
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Returned value**
- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1,
toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2
```
Result:
```response
┌────────s1─┬────────s2─┐
│ 749794836 │ 969451889 │
└───────────┴───────────┘
```
## toISOYear
@ -3884,19 +4187,29 @@ Result:
└───────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\])
## timeSlots
For a time interval starting at StartTime and continuing for Duration seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the Size in seconds. Size is an optional parameter set to 1800 (30 minutes) by default.
This is necessary, for example, when searching for pageviews in the corresponding session.
Accepts DateTime and DateTime64 as StartTime argument. For DateTime, Duration and Size arguments must be `UInt32`. For DateTime64 they must be `Decimal64`.
Returns an array of DateTime/DateTime64 (return type matches the type of StartTime). For DateTime64, the return value's scale can differ from the scale of StartTime --- the highest scale among all given arguments is taken.
Example:
**Syntax**
```sql
timeSlots(StartTime, Duration,\[, Size\])
```
**Example**
```sql
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
```
Result:
``` text
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │

View File

@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
**Parameters**
- `input`: A string type [String](../data-types/string.md).
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
**Returned value**
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
**Example**
```sql
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
```
Result:
```text
┌─res─────────────┐
│ ClickHouse CORE │
└─────────────────┘
┌─res──────────────────────┐
│ My mother is from Mexico.│
└──────────────────────────┘
```
```sql
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
```
Result:
```text
┌─res───────────────────┐
│ My dad is from Mexico.│
└───────────────────────┘
```
## overlayUTF8
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Assumes that the string contains valid UTF-8 encoded text.
If this assumption is violated, no exception is thrown and the result is undefined.
**Syntax**
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
- `s`: A string type [String](../data-types/string.md).
- `replace`: A string type [String](../data-types/string.md).
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
**Returned value**
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
**Example**
```sql
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
```
Result:
```text
┌─res────────────────────────┐
ClickHouse是开源OLAP数据库
└────────────────────────────┘
┌─res───────────────────────────
Mein Vater ist aus der Türkei.
└───────────────────────────────
```
## replaceOne

View File

@ -3906,7 +3906,7 @@ Result:
## toDateTime64
Converts the argument to the [DateTime64](../data-types/datetime64.md) data type.
Converts an input value to a value of type [DateTime64](../data-types/datetime64.md).
**Syntax**
@ -3918,7 +3918,7 @@ toDateTime64(expr, scale, [timezone])
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` - Time zone of the specified datetime64 object.
- `timezone` (optional) - Time zone of the specified datetime64 object.
**Returned value**
@ -3977,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
## toDateTime64OrZero
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrZero(expr, scale, [timezone])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
```sql
SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg
```
Result:
```response
┌─────────────invalid_arg─┐
│ 1970-01-01 01:00:00.000 │
└─────────────────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrNull](#todatetime64ornull).
- [toDateTime64OrDefault](#todatetime64ordefault).
## toDateTime64OrNull
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrNull(expr, scale, [timezone])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md).
**Example**
Query:
```sql
SELECT
toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg,
toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg
```
Result:
```response
┌───────────────valid_arg─┬─invalid_arg─┐
│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │
└─────────────────────────┴─────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrZero](#todatetime64orzero).
- [toDateTime64OrDefault](#todatetime64ordefault).
## toDateTime64OrDefault
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md),
but returns either the default value of [DateTime64](../data-types/datetime64.md)
or the provided default if an invalid argument is received.
**Syntax**
``` sql
toDateTime64OrNull(expr, scale, [timezone, default])
```
**Arguments**
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
- `timezone` (optional) - Time zone of the specified DateTime64 object.
- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md).
**Returned value**
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md).
**Example**
Query:
```sql
SELECT
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg,
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default
```
Result:
```response
┌─────────────invalid_arg─┬─invalid_arg_with_default─┐
│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │
└─────────────────────────┴──────────────────────────┘
```
**See also**
- [toDateTime64](#todatetime64).
- [toDateTime64OrZero](#todatetime64orzero).
- [toDateTime64OrNull](#todatetime64ornull).
## toDecimal32
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.

View File

@ -24,9 +24,11 @@ DELETE FROM hits WHERE Title LIKE '%hello%';
## Lightweight `DELETE` does not delete data immediately
Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations), which is executed asynchronously in the background by default. The statement is going to return almost immediately, but the data can still be visible to queries until the mutation is finished.
Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations) that marks rows as deleted but does not immediately physically delete them.
The mutation marks rows as deleted, and at that point, they will no longer show up in query results. It does not physically delete the data, this will happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
By default, `DELETE` statements wait until marking the rows as deleted is completed before returning. This can take a long time if the amount of data is large. Alternatively, you can run it asynchronously in the background using the setting [`lightweight_deletes_sync`](/en/operations/settings/settings#lightweight_deletes_sync). If disabled, the `DELETE` statement is going to return immediately, but the data can still be visible to queries until the background mutation is finished.
The mutation does not physically delete the rows that have been marked as deleted, this will only happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted.
If you need to guarantee that your data is deleted from storage in a predictable time, consider using the table setting [`min_age_to_force_merge_seconds`](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Or you can use the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER TABLE ... DELETE` may consume significant resources as it recreates all affected parts.

View File

@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
```sql
FROM table
SELECT *
```
## FINAL Modifier
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
### Example Usage
**Using the `FINAL` keyword**
Using the `FINAL` keyword
```sql
SELECT x, y FROM mytable FINAL WHERE x > 1;
```
**Using `FINAL` as a query-level setting**
Using `FINAL` as a query-level setting
```sql
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
```
**Using `FINAL` as a session-level setting**
Using `FINAL` as a session-level setting
```sql
SET final = 1;

View File

@ -459,6 +459,8 @@ public:
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
constexpr static bool parallelizeMergeWithKey() { return true; }
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
{
if constexpr (is_parallelize_merge_prepare_needed)

View File

@ -145,6 +145,8 @@ public:
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
constexpr static bool parallelizeMergeWithKey() { return false; }
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
@ -169,7 +171,7 @@ public:
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
/// then destroy states (on which src places points to).
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const = 0;
/// Serializes state (to transmit it over the network, for example).
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
@ -499,11 +501,15 @@ public:
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
}
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const override
{
for (size_t i = 0; i < size; ++i)
{
if constexpr (Derived::parallelizeMergeWithKey())
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
else
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
}
}

View File

@ -101,6 +101,13 @@ public:
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
{
/// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
if (other.size() > 40000)
{
if (isSingleLevel())
convertToTwoLevel();
}
if (isSingleLevel() && other.isTwoLevel())
convertToTwoLevel();

View File

@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
DatabasePtr database = table_info.database;
auto query_context = Context::createCopy(context);
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
/// database-specific things).
database->createTableRestoredFromBackup(
create_table_query,
context,
query_context,
restore_coordination,
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
}

View File

@ -1896,6 +1896,21 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
/// Temporarily apply query settings to context.
std::optional<Settings> old_settings;
SCOPE_EXIT_SAFE({
try
{
/// We need to park ParallelFormating threads,
/// because they can use settings from global context
/// and it can lead to data race with `setSettings`
resetOutput();
}
catch (...)
{
if (!have_error)
{
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
have_error = true;
}
}
if (old_settings)
client_context->setSettings(*old_settings);
});

View File

@ -168,7 +168,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
return getManyImpl(settings, pool_mode, try_get_entry,
/*skip_unavailable_endpoints=*/ std::nullopt,
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
/*priority_func=*/ {},
settings.distributed_insert_skip_read_only_replicas);
}

View File

@ -42,7 +42,7 @@ public:
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
using Entry = IConnectionPool::Entry;
using PoolWithFailoverBase<IConnectionPool>::isTryResultInvalid;
using PoolWithFailoverBase<IConnectionPool>::getValidTryResult;
/** Allocates connection to work. */
Entry get(const ConnectionTimeouts & timeouts) override;
@ -98,7 +98,7 @@ public:
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
size_t getMaxErrorCup() const { return Base::max_error_cap; }
size_t getMaxErrorCap() const { return Base::max_error_cap; }
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
{

View File

@ -327,7 +327,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
ShuffledPool & shuffled_pool = shuffled_pools[index];
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1);
shuffled_pool.slowdown_count = 0;
if (shuffled_pool.error_count >= max_tries)

View File

@ -122,6 +122,20 @@ public:
return result.entry.isNull() || !result.is_usable || (skip_read_only_replicas && result.is_readonly);
}
TryResult getValidTryResult(const std::vector<TryResult> & results, bool skip_read_only_replicas) const
{
if (results.empty())
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Cannot get any valid connection because all connection tries failed");
auto result = results.front();
if (isTryResultInvalid(result, skip_read_only_replicas))
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
"Got an invalid connection result: entry.isNull {}, is_usable {}, is_up_to_date {}, delay {}, is_readonly {}, skip_read_only_replicas {}",
result.entry.isNull(), result.is_usable, result.is_up_to_date, result.delay, result.is_readonly, skip_read_only_replicas);
return result;
}
size_t getPoolSize() const { return nested_pools.size(); }
protected:

View File

@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc
= info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s);
const std::string_view access =
#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__)
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
#if defined(__arm__)
"<not available on ARM>";
#elif defined(__powerpc__)
"<not available on PowerPC>";
#elif defined(OS_DARWIN)
"<not available on Darwin>";
#elif defined(OS_FREEBSD)
"<not available on FreeBSD>";
#elif !defined(__x86_64__)
"<not available>";
#else
"";
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
#endif
std::string_view message;

View File

@ -55,10 +55,29 @@ void CompressedWriteBuffer::nextImpl()
out.write(compressed_buffer.data(), compressed_size);
}
/// Increase buffer size for next data if adaptive buffer size is used and nextImpl was called because of end of buffer.
if (!available() && use_adaptive_buffer_size && memory.size() < adaptive_buffer_max_size)
{
memory.resize(std::min(memory.size() * 2, adaptive_buffer_max_size));
BufferBase::set(memory.data(), memory.size(), 0);
}
}
CompressedWriteBuffer::CompressedWriteBuffer(WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size)
: BufferWithOwnMemory<WriteBuffer>(buf_size), out(out_), codec(std::move(codec_))
void CompressedWriteBuffer::finalizeImpl()
{
/// Don't try to resize buffer in nextImpl.
use_adaptive_buffer_size = false;
next();
}
CompressedWriteBuffer::CompressedWriteBuffer(
WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size, bool use_adaptive_buffer_size_, size_t adaptive_buffer_initial_size)
: BufferWithOwnMemory<WriteBuffer>(use_adaptive_buffer_size_ ? adaptive_buffer_initial_size : buf_size)
, out(out_)
, codec(std::move(codec_))
, use_adaptive_buffer_size(use_adaptive_buffer_size_)
, adaptive_buffer_max_size(buf_size)
{
}

View File

@ -19,7 +19,9 @@ public:
explicit CompressedWriteBuffer(
WriteBuffer & out_,
CompressionCodecPtr codec_ = CompressionCodecFactory::instance().getDefaultCodec(),
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
bool use_adaptive_buffer_size_ = false,
size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE);
~CompressedWriteBuffer() override;
@ -45,10 +47,17 @@ public:
private:
void nextImpl() override;
void finalizeImpl() override;
WriteBuffer & out;
CompressionCodecPtr codec;
/// If true, the size of internal buffer will be exponentially increased up to
/// adaptive_buffer_max_size after each nextImpl call. It can be used to avoid
/// large buffer allocation when actual size of written data is small.
bool use_adaptive_buffer_size;
size_t adaptive_buffer_max_size;
PODArray<char> compressed_buffer;
};

View File

@ -20,6 +20,9 @@ static constexpr auto DBMS_DEFAULT_POLL_INTERVAL = 10;
/// The size of the I/O buffer by default.
static constexpr auto DBMS_DEFAULT_BUFFER_SIZE = 1048576ULL;
/// The initial size of adaptive I/O buffer by default.
static constexpr auto DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE = 16384ULL;
static constexpr auto PADDING_FOR_SIMD = 64;
/** Which blocks by default read the data (by number of rows).
@ -40,7 +43,7 @@ static constexpr auto SHOW_CHARS_ON_SYNTAX_ERROR = ptrdiff_t(160);
/// each period reduces the error counter by 2 times
/// too short a period can cause errors to disappear immediately after creation.
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD = 60;
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking to long to recover.
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking too long to recover.
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT = 1000;
/// The boundary on which the blocks for asynchronous file operations should be aligned.

View File

@ -710,7 +710,8 @@ class IColumn;
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \
M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \
M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \
M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \
M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \

View File

@ -76,12 +76,14 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"create_if_not_exists", false, false, "New setting."},
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
{"output_format_always_quote_identifiers", false, false, "New setting."},
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."}
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."},
{"database_replicated_allow_replicated_engine_arguments", 1, 0, "Don't allow explicit arguments by default"},
{"database_replicated_allow_explicit_uuid", 0, 0, "Added a new setting to disallow explicitly specifying table UUID"},
}
},
{"24.8",
{
{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
{"rows_before_aggregation", false, false, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
{"restore_replace_external_engines_to_null", false, false, "New setting."},
{"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."},

View File

@ -420,6 +420,21 @@ bool ISerialization::isEphemeralSubcolumn(const DB::ISerialization::SubstreamPat
return path[last_elem].type == Substream::VariantElementNullMap;
}
bool ISerialization::isDynamicSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len)
{
if (prefix_len == 0 || prefix_len > path.size())
return false;
for (size_t i = 0; i != prefix_len; ++i)
{
if (path[i].type == SubstreamType::DynamicData || path[i].type == SubstreamType::DynamicStructure
|| path[i].type == SubstreamType::ObjectData || path[i].type == SubstreamType::ObjectStructure)
return true;
}
return false;
}
ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len)
{
assert(prefix_len <= path.size());

View File

@ -457,6 +457,9 @@ public:
/// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);.
static bool isEphemeralSubcolumn(const SubstreamPath & path, size_t prefix_len);
/// Returns true if stream with specified path corresponds to dynamic subcolumn.
static bool isDynamicSubcolumn(const SubstreamPath & path, size_t prefix_len);
protected:
template <typename State, typename StatePtr>
State * checkAndGetState(const StatePtr & state) const;

View File

@ -441,7 +441,8 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
bool is_create_query = mode == LoadingStrictnessLevel::CREATE;
String replica_host_id;
if (current_zookeeper->tryGet(replica_path, replica_host_id))
bool replica_exists_in_zk = current_zookeeper->tryGet(replica_path, replica_host_id);
if (replica_exists_in_zk)
{
if (replica_host_id == DROPPED_MARK && !is_create_query)
{
@ -454,7 +455,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
String host_id_default = getHostID(getContext(), db_uuid, false);
if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default))
if (replica_host_id != host_id && replica_host_id != host_id_default)
{
throw Exception(
ErrorCodes::REPLICA_ALREADY_EXISTS,
@ -484,13 +485,20 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
current_zookeeper->set(replica_path + "/replica_group", replica_group_name, -1);
createEmptyLogEntry(current_zookeeper);
}
/// Needed to mark all the queries
/// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful.
String max_log_ptr_at_creation_str;
if (current_zookeeper->tryGet(replica_path + "/max_log_ptr_at_creation", max_log_ptr_at_creation_str))
max_log_ptr_at_creation = parse<UInt32>(max_log_ptr_at_creation_str);
}
else if (is_create_query)
if (is_create_query)
{
/// Create new replica. Throws if replica with the same name already exists
/// Create replica nodes in ZooKeeper. If newly initialized nodes already exist, reuse them.
createReplicaNodesInZooKeeper(current_zookeeper);
}
else
else if (!replica_exists_in_zk)
{
/// It's not CREATE query, but replica does not exist. Probably it was dropped.
/// Do not create anything, continue as readonly.
@ -606,37 +614,84 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt
"already contains some data and it does not look like Replicated database path.", zookeeper_path);
/// Write host name to replica_path, it will protect from multiple replicas with the same name
auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
const auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
const std::vector<String> check_paths = {
replica_path,
replica_path + "/replica_group",
replica_path + "/digest",
};
bool nodes_exist = true;
auto check_responses = current_zookeeper->tryGet(check_paths);
for (size_t i = 0; i < check_responses.size(); ++i)
{
const auto response = check_responses[i];
if (response.error == Coordination::Error::ZNONODE)
{
nodes_exist = false;
break;
} else if (response.error != Coordination::Error::ZOK)
{
throw zkutil::KeeperException::fromPath(response.error, check_paths[i]);
}
}
if (nodes_exist)
{
const std::vector<String> expected_data = {
host_id,
replica_group_name,
"0",
};
for (size_t i = 0; i != expected_data.size(); ++i)
{
if (check_responses[i].data != expected_data[i])
{
throw Exception(
ErrorCodes::REPLICA_ALREADY_EXISTS,
"Replica node {} in ZooKeeper already exists and contains unexpected value: {}",
quoteString(check_paths[i]), quoteString(check_responses[i].data));
}
}
LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them");
createEmptyLogEntry(current_zookeeper);
return;
}
for (int attempts = 10; attempts > 0; --attempts)
{
Coordination::Stat stat;
String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
const String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
Coordination::Requests ops;
ops.emplace_back(zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent));
/// In addition to creating the replica nodes, we record the max_log_ptr at the instant where
/// we declared ourself as an existing replica. We'll need this during recoverLostReplica to
/// notify other nodes that issued new queries while this node was recovering.
ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version));
const Coordination::Requests ops = {
zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent),
/// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory.
/// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation.
zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent),
zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version),
};
Coordination::Responses ops_responses;
const auto code = current_zookeeper->tryMulti(ops, ops_responses);
Coordination::Responses responses;
const auto code = current_zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZOK)
{
max_log_ptr_at_creation = parse<UInt32>(max_log_ptr_str);
break;
}
else if (code == Coordination::Error::ZNODEEXISTS || attempts == 1)
{
/// If its our last attempt, or if the replica already exists, fail immediately.
zkutil::KeeperMultiException::check(code, ops, responses);
}
}
createEmptyLogEntry(current_zookeeper);
return;
}
if (attempts == 1)
{
zkutil::KeeperMultiException::check(code, ops, ops_responses);
}
}
}
void DatabaseReplicated::beforeLoadingMetadata(ContextMutablePtr context_, LoadingStrictnessLevel mode)
@ -852,18 +907,6 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
bool maybe_replica_macros = info.expanded_other;
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
if (!enable_functional_tests_helper)
{
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
else
throw Exception(ErrorCodes::INCORRECT_QUERY,
"It's not allowed to specify explicit zookeeper_path and replica_name "
"for ReplicatedMergeTree arguments in Replicated database. If you really want to "
"specify them explicitly, enable setting "
"database_replicated_allow_replicated_engine_arguments.");
}
if (maybe_shard_macros && maybe_replica_macros)
return;
@ -876,6 +919,8 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
return;
}
/// We will replace it with default arguments if the setting is 2
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments != 2)
throw Exception(ErrorCodes::INCORRECT_QUERY,
"Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. "
"If you really want to specify it explicitly, then you should use some macros "
@ -1145,6 +1190,9 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
/// so we need to allow experimental features that can be used in a CREATE query
enableAllExperimentalSettings(query_context);
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
auto txn = std::make_shared<ZooKeeperMetadataTransaction>(current_zookeeper, zookeeper_path, false, "");
query_context->initZooKeeperMetadataTransaction(txn);
return query_context;

View File

@ -339,7 +339,15 @@ DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, const
{
int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1;
return std::make_unique<WriteBufferFromFile>(
fs::path(disk_path) / path, buf_size, flags, settings.local_throttler);
fs::path(disk_path) / path,
buf_size,
flags,
settings.local_throttler,
0666,
nullptr,
0,
settings.use_adaptive_write_buffer,
settings.adaptive_write_buffer_initial_size);
}
std::vector<String> DiskLocal::getBlobPath(const String & path) const

View File

@ -59,7 +59,7 @@ WriteBufferFromAzureBlobStorage::WriteBufferFromAzureBlobStorage(
const WriteSettings & write_settings_,
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings_,
ThreadPoolCallbackRunnerUnsafe<void> schedule_)
: WriteBufferFromFileBase(buf_size_, nullptr, 0)
: WriteBufferFromFileBase(std::min(buf_size_, static_cast<size_t>(DBMS_DEFAULT_BUFFER_SIZE)), nullptr, 0)
, log(getLogger("WriteBufferFromAzureBlobStorage"))
, buffer_allocation_policy(createBufferAllocationPolicy(*settings_))
, max_single_part_upload_size(settings_->max_single_part_upload_size)
@ -244,11 +244,21 @@ void WriteBufferFromAzureBlobStorage::allocateBuffer()
buffer_allocation_policy->nextBuffer();
chassert(0 == hidden_size);
auto size = buffer_allocation_policy->getBufferSize();
/// First buffer was already allocated in BufferWithOwnMemory constructor with buffer size provided in constructor.
/// It will be reallocated in subsequent nextImpl calls up to the desired buffer size from buffer_allocation_policy.
if (buffer_allocation_policy->getBufferNumber() == 1)
size = std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), size);
{
/// Reduce memory size if initial size was larger then desired size from buffer_allocation_policy.
/// Usually it doesn't happen but we have it in unit tests.
if (memory.size() > buffer_allocation_policy->getBufferSize())
{
memory.resize(buffer_allocation_policy->getBufferSize());
WriteBuffer::set(memory.data(), memory.size());
}
return;
}
auto size = buffer_allocation_policy->getBufferSize();
memory = Memory(size);
WriteBuffer::set(memory.data(), memory.size());
}

View File

@ -289,7 +289,7 @@ std::unique_ptr<WriteBufferFromFileBase> AzureObjectStorage::writeObject( /// NO
return std::make_unique<WriteBufferFromAzureBlobStorage>(
client.get(),
object.remote_path,
buf_size,
write_settings.use_adaptive_write_buffer ? write_settings.adaptive_write_buffer_initial_size : buf_size,
patchSettings(write_settings),
settings.get(),
std::move(scheduler));

View File

@ -282,7 +282,7 @@ std::unique_ptr<WriteBufferFromFileBase> S3ObjectStorage::writeObject( /// NOLIN
client.get(),
uri.bucket,
object.remote_path,
buf_size,
write_settings.use_adaptive_write_buffer ? write_settings.adaptive_write_buffer_initial_size : buf_size,
request_settings,
std::move(blob_storage_log),
attributes,

View File

@ -483,6 +483,33 @@ namespace JSONUtils
writeArrayEnd(out, 1);
}
void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out)
{
writeCompactArrayStart(out, 0, "meta");
for (size_t i = 0; i < names.size(); ++i)
{
writeCompactObjectStart(out);
writeTitle("name", out, 0, "");
/// The field names are pre-escaped to be put into JSON string literal.
writeChar('"', out);
writeString(names[i], out);
writeChar('"', out);
writeFieldCompactDelimiter(out);
writeTitle("type", out, 0, "");
writeJSONString(types[i]->getName(), out, settings);
writeCompactObjectEnd(out);
if (i + 1 < names.size())
writeFieldCompactDelimiter(out);
}
writeCompactArrayEnd(out);
}
void writeAdditionalInfo(
size_t rows,
size_t rows_before_limit,
@ -530,6 +557,45 @@ namespace JSONUtils
}
}
void writeCompactAdditionalInfo(
size_t rows,
size_t rows_before_limit,
bool applied_limit,
const Stopwatch & watch,
const Progress & progress,
bool write_statistics,
WriteBuffer & out)
{
writeCompactObjectStart(out);
writeCompactObjectStart(out, 0, "statistics");
writeTitle("rows", out, 0, "");
writeIntText(rows, out);
if (applied_limit)
{
writeFieldCompactDelimiter(out);
writeTitle("rows_before_limit_at_least", out, 0, "");
writeIntText(rows_before_limit, out);
}
if (write_statistics)
{
writeFieldCompactDelimiter(out);
writeTitle("elapsed", out, 0, "");
writeText(watch.elapsedSeconds(), out);
writeFieldCompactDelimiter(out);
writeTitle("rows_read", out, 0, "");
writeText(progress.read_rows.load(), out);
writeFieldCompactDelimiter(out);
writeTitle("bytes_read", out, 0, "");
writeText(progress.read_bytes.load(), out);
}
writeCompactObjectEnd(out);
writeCompactObjectEnd(out);
}
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent)
{
writeTitle("exception", out, indent, " ");

View File

@ -99,6 +99,7 @@ namespace JSONUtils
WriteBuffer & out);
void writeMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
void writeAdditionalInfo(
size_t rows,
@ -111,6 +112,15 @@ namespace JSONUtils
bool write_statistics,
WriteBuffer & out);
void writeCompactAdditionalInfo(
size_t rows,
size_t rows_before_limit,
bool applied_limit,
const Stopwatch & watch,
const Progress & progress,
bool write_statistics,
WriteBuffer & out);
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent = 0);
void skipColon(ReadBuffer & in);

View File

@ -95,6 +95,7 @@ void registerOutputFormatMarkdown(FormatFactory & factory);
void registerOutputFormatPostgreSQLWire(FormatFactory & factory);
void registerOutputFormatPrometheus(FormatFactory & factory);
void registerOutputFormatSQLInsert(FormatFactory & factory);
void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory);
/// Input only formats.
@ -242,6 +243,7 @@ void registerFormats()
registerOutputFormatCapnProto(factory);
registerOutputFormatPrometheus(factory);
registerOutputFormatSQLInsert(factory);
registerOutputFormatJSONCompactWithProgress(factory);
registerInputFormatRegexp(factory);
registerInputFormatJSONAsString(factory);

View File

@ -492,7 +492,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Nanosecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000000)
{
@ -527,7 +527,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Microsecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000)
{
@ -570,7 +570,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Millisecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000)
{
@ -613,7 +613,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Second>
{
return time_zone.toStartOfSecondInterval(t, seconds);
}
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfSecondInterval(t / scale_multiplier, seconds);
}
@ -634,7 +634,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Minute>
{
return time_zone.toStartOfMinuteInterval(t, minutes);
}
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfMinuteInterval(t / scale_multiplier, minutes);
}
@ -655,7 +655,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Hour>
{
return time_zone.toStartOfHourInterval(t, hours);
}
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfHourInterval(t / scale_multiplier, hours);
}
@ -676,7 +676,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Day>
{
return static_cast<UInt32>(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days));
}
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfDayInterval(time_zone.toDayNum(t / scale_multiplier), days);
}
@ -697,9 +697,13 @@ struct ToStartOfInterval<IntervalKind::Kind::Week>
{
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks);
}
static UInt16 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks);
else
return ToStartOfInterval<IntervalKind::Kind::Day>::execute(t, weeks * 7, time_zone, scale_multiplier, origin);
}
};
@ -718,9 +722,23 @@ struct ToStartOfInterval<IntervalKind::Kind::Month>
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months);
}
static UInt16 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t / scale_multiplier), months);
const Int64 scaled_time = t / scale_multiplier;
if (origin == 0)
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(scaled_time), months);
else
{
const Int64 scaled_origin = origin / scale_multiplier;
const Int64 days = time_zone.toDayOfMonth(scaled_time + scaled_origin) - time_zone.toDayOfMonth(scaled_origin);
Int64 months_to_add = time_zone.toMonth(scaled_time + scaled_origin) - time_zone.toMonth(scaled_origin);
const Int64 years = time_zone.toYear(scaled_time + scaled_origin) - time_zone.toYear(scaled_origin);
months_to_add = days < 0 ? months_to_add - 1 : months_to_add;
months_to_add += years * 12;
Int64 month_multiplier = (months_to_add / months) * months;
return (time_zone.addMonths(time_zone.toDate(scaled_origin), month_multiplier) - time_zone.toDate(scaled_origin));
}
}
};
@ -739,9 +757,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Quarter>
{
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters);
}
static UInt16 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, quarters * 3, time_zone, scale_multiplier, origin);
}
};
@ -760,9 +781,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Year>
{
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years);
}
static UInt16 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
if (origin == 0)
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, years * 12, time_zone, scale_multiplier, origin);
}
};

View File

@ -6,6 +6,7 @@
# include <Columns/ColumnString.h>
# include <Functions/LowerUpperImpl.h>
# include <base/scope_guard.h>
# include <unicode/ucasemap.h>
# include <unicode/unistr.h>
# include <unicode/urename.h>
@ -49,6 +50,11 @@ struct LowerUpperUTF8Impl
if (U_FAILURE(error_code))
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Error calling ucasemap_open: {}", u_errorName(error_code));
SCOPE_EXIT(
{
ucasemap_close(case_map);
});
size_t curr_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{

View File

@ -1,11 +1,15 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnFixedString.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnString.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/getLeastSupertype.h>
#include <Columns/ColumnArray.h>
#include <Core/Settings.h>
#include <Interpreters/castColumn.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Interpreters/Context.h>
#include <Interpreters/castColumn.h>
namespace DB
@ -44,11 +48,13 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
size_t num_elements = arguments.size();
const size_t num_elements = arguments.size();
if (num_elements == 0)
{
/// We should return constant empty array.
return result_type->createColumnConstWithDefaultValue(input_rows_count);
}
const DataTypePtr & elem_type = static_cast<const DataTypeArray &>(*result_type).getNestedType();
@ -60,7 +66,6 @@ public:
Columns columns_holder(num_elements);
ColumnRawPtrs column_ptrs(num_elements);
for (size_t i = 0; i < num_elements; ++i)
{
const auto & arg = arguments[i];
@ -77,35 +82,199 @@ public:
}
/// Create and fill the result array.
auto out = ColumnArray::create(elem_type->createColumn());
IColumn & out_data = out->getData();
IColumn::Offsets & out_offsets = out->getOffsets();
out_data.reserve(input_rows_count * num_elements);
out_offsets.resize(input_rows_count);
/// Fill out_offsets
out_offsets.resize_exact(input_rows_count);
IColumn::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
for (size_t j = 0; j < num_elements; ++j)
out_data.insertFrom(*column_ptrs[j], i);
current_offset += num_elements;
out_offsets[i] = current_offset;
}
/// Fill out_data
out_data.reserve(input_rows_count * num_elements);
if (num_elements == 1)
out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count);
else
execute(column_ptrs, out_data, input_rows_count);
return out;
}
private:
bool execute(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
return executeNumber<UInt8>(columns, out_data, input_rows_count) || executeNumber<UInt16>(columns, out_data, input_rows_count)
|| executeNumber<UInt32>(columns, out_data, input_rows_count) || executeNumber<UInt64>(columns, out_data, input_rows_count)
|| executeNumber<UInt128>(columns, out_data, input_rows_count) || executeNumber<UInt256>(columns, out_data, input_rows_count)
|| executeNumber<Int8>(columns, out_data, input_rows_count) || executeNumber<Int16>(columns, out_data, input_rows_count)
|| executeNumber<Int32>(columns, out_data, input_rows_count) || executeNumber<Int64>(columns, out_data, input_rows_count)
|| executeNumber<Int128>(columns, out_data, input_rows_count) || executeNumber<Int256>(columns, out_data, input_rows_count)
|| executeNumber<Float32>(columns, out_data, input_rows_count) || executeNumber<Float64>(columns, out_data, input_rows_count)
|| executeNumber<Decimal32>(columns, out_data, input_rows_count)
|| executeNumber<Decimal64>(columns, out_data, input_rows_count)
|| executeNumber<Decimal128>(columns, out_data, input_rows_count)
|| executeNumber<Decimal256>(columns, out_data, input_rows_count)
|| executeNumber<DateTime64>(columns, out_data, input_rows_count) || executeString(columns, out_data, input_rows_count)
|| executeNullable(columns, out_data, input_rows_count) || executeTuple(columns, out_data, input_rows_count)
|| executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count);
}
template <typename T>
bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
using Container = ColumnVectorOrDecimal<T>::Container;
std::vector<const Container *> containers(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnVectorOrDecimal<T> * concrete_column = checkAndGetColumn<ColumnVectorOrDecimal<T>>(columns[i]);
if (!concrete_column)
return false;
containers[i] = &concrete_column->getData();
}
ColumnVectorOrDecimal<T> & concrete_out_data = assert_cast<ColumnVectorOrDecimal<T> &>(out_data);
Container & out_container = concrete_out_data.getData();
out_container.resize_exact(columns.size() * input_rows_count);
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
const size_t base = row_i * columns.size();
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
out_container[base + col_i] = (*containers[col_i])[row_i];
}
return true;
}
bool executeString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
size_t total_bytes = 0;
std::vector<const ColumnString *> concrete_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnString * concrete_column = checkAndGetColumn<ColumnString>(columns[i]);
if (!concrete_column)
return false;
total_bytes += concrete_column->getChars().size();
concrete_columns[i] = concrete_column;
}
ColumnString & concrete_out_data = assert_cast<ColumnString &>(out_data);
auto & out_chars = concrete_out_data.getChars();
auto & out_offsets = concrete_out_data.getOffsets();
out_chars.resize_exact(total_bytes);
out_offsets.resize_exact(input_rows_count * columns.size());
size_t cur_out_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
const size_t base = row_i * columns.size();
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
{
StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
memcpySmallAllowReadWriteOverflow15(&out_chars[cur_out_offset], ref.data, ref.size);
out_chars[cur_out_offset + ref.size] = 0;
cur_out_offset += ref.size + 1;
out_offsets[base + col_i] = cur_out_offset;
}
}
return true;
}
bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
std::vector<const ColumnFixedString *> concrete_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnFixedString * concrete_column = checkAndGetColumn<ColumnFixedString>(columns[i]);
if (!concrete_column)
return false;
concrete_columns[i] = concrete_column;
}
ColumnFixedString & concrete_out_data = assert_cast<ColumnFixedString &>(out_data);
auto & out_chars = concrete_out_data.getChars();
const size_t n = concrete_out_data.getN();
size_t total_bytes = n * columns.size() * input_rows_count;
out_chars.resize_exact(total_bytes);
size_t curr_out_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
{
StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, n);
curr_out_offset += n;
}
}
return true;
}
bool executeNullable(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
ColumnRawPtrs null_maps(columns.size(), nullptr);
ColumnRawPtrs nested_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnNullable * concrete_column = checkAndGetColumn<ColumnNullable>(columns[i]);
if (!concrete_column)
return false;
null_maps[i] = &concrete_column->getNullMapColumn();
nested_columns[i] = &concrete_column->getNestedColumn();
}
ColumnNullable & concrete_out_data = assert_cast<ColumnNullable &>(out_data);
auto & out_null_map = concrete_out_data.getNullMapColumn();
auto & out_nested_column = concrete_out_data.getNestedColumn();
execute(null_maps, out_null_map, input_rows_count);
execute(nested_columns, out_nested_column, input_rows_count);
return true;
}
bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
ColumnTuple * concrete_out_data = typeid_cast<ColumnTuple *>(&out_data);
if (!concrete_out_data)
return false;
const size_t tuple_size = concrete_out_data->tupleSize();
for (size_t i = 0; i < tuple_size; ++i)
{
ColumnRawPtrs elem_columns(columns.size(), nullptr);
for (size_t j = 0; j < columns.size(); ++j)
{
const ColumnTuple * concrete_column = assert_cast<const ColumnTuple *>(columns[j]);
elem_columns[j] = &concrete_column->getColumn(i);
}
execute(elem_columns, concrete_out_data->getColumn(i), input_rows_count);
}
return true;
}
bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
for (size_t i = 0; i < input_rows_count; ++i)
for (const auto * column : columns)
out_data.insertFrom(*column, i);
return true;
}
String getName() const override
{
return name;
}
bool addField(DataTypePtr type_res, const Field & f, Array & arr) const;
bool use_variant_as_common_type = false;
};

View File

@ -2,6 +2,8 @@
#include <Columns/ColumnMap.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnsCommon.h>
#include <Columns/ColumnsNumber.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeTuple.h>
@ -13,7 +15,6 @@
#include <Interpreters/Context.h>
#include <Interpreters/castColumn.h>
#include <Common/HashTable/HashSet.h>
#include <Core/Settings.h>
namespace DB
@ -36,11 +37,18 @@ class FunctionMap : public IFunction
public:
static constexpr auto name = "map";
explicit FunctionMap(bool use_variant_as_common_type_) : use_variant_as_common_type(use_variant_as_common_type_) {}
explicit FunctionMap(ContextPtr context_)
: context(context_)
, use_variant_as_common_type(
context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type)
, function_array(FunctionFactory::instance().get("array", context))
, function_map_from_arrays(FunctionFactory::instance().get("mapFromArrays", context))
{
}
static FunctionPtr create(ContextPtr context)
{
return std::make_shared<FunctionMap>(context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type);
return std::make_shared<FunctionMap>(context);
}
String getName() const override
@ -101,62 +109,38 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
size_t num_elements = arguments.size();
if (num_elements == 0)
return result_type->createColumnConstWithDefaultValue(input_rows_count);
ColumnsWithTypeAndName key_args;
ColumnsWithTypeAndName value_args;
for (size_t i = 0; i < num_elements; i += 2)
{
key_args.emplace_back(arguments[i]);
value_args.emplace_back(arguments[i+1]);
}
const auto & result_type_map = static_cast<const DataTypeMap &>(*result_type);
const DataTypePtr & key_type = result_type_map.getKeyType();
const DataTypePtr & value_type = result_type_map.getValueType();
const DataTypePtr & key_array_type = std::make_shared<DataTypeArray>(key_type);
const DataTypePtr & value_array_type = std::make_shared<DataTypeArray>(value_type);
Columns columns_holder(num_elements);
ColumnRawPtrs column_ptrs(num_elements);
/// key_array = array(args[0], args[2]...)
ColumnPtr key_array = function_array->build(key_args)->execute(key_args, key_array_type, input_rows_count);
/// value_array = array(args[1], args[3]...)
ColumnPtr value_array = function_array->build(value_args)->execute(value_args, value_array_type, input_rows_count);
for (size_t i = 0; i < num_elements; ++i)
{
const auto & arg = arguments[i];
const auto to_type = i % 2 == 0 ? key_type : value_type;
ColumnPtr preprocessed_column = castColumn(arg, to_type);
preprocessed_column = preprocessed_column->convertToFullColumnIfConst();
columns_holder[i] = std::move(preprocessed_column);
column_ptrs[i] = columns_holder[i].get();
}
/// Create and fill the result map.
MutableColumnPtr keys_data = key_type->createColumn();
MutableColumnPtr values_data = value_type->createColumn();
MutableColumnPtr offsets = DataTypeNumber<IColumn::Offset>().createColumn();
size_t total_elements = input_rows_count * num_elements / 2;
keys_data->reserve(total_elements);
values_data->reserve(total_elements);
offsets->reserve(input_rows_count);
IColumn::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
for (size_t j = 0; j < num_elements; j += 2)
{
keys_data->insertFrom(*column_ptrs[j], i);
values_data->insertFrom(*column_ptrs[j + 1], i);
}
current_offset += num_elements / 2;
offsets->insert(current_offset);
}
auto nested_column = ColumnArray::create(
ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}),
std::move(offsets));
return ColumnMap::create(nested_column);
/// result = mapFromArrays(key_array, value_array)
ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}};
return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count);
}
private:
ContextPtr context;
bool use_variant_as_common_type = false;
FunctionOverloadResolverPtr function_array;
FunctionOverloadResolverPtr function_map_from_arrays;
};
/// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays or maps
@ -173,6 +157,7 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{

View File

@ -2,7 +2,7 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <Columns/ColumnLowCardinality.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <Columns/ColumnSparse.h>
namespace DB
{
@ -18,11 +18,6 @@ public:
return std::make_shared<FunctionMaterialize>();
}
bool useDefaultImplementationForNulls() const override
{
return false;
}
/// Get the function name.
String getName() const override
{
@ -34,8 +29,16 @@ public:
return true;
}
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForNothing() const override { return false; }
bool useDefaultImplementationForConstants() const override { return false; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
bool useDefaultImplementationForSparseColumns() const override { return false; }
bool isSuitableForConstantFolding() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
@ -52,7 +55,7 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{
return arguments[0].column->convertToFullColumnIfConst();
return recursiveRemoveSparse(arguments[0].column->convertToFullColumnIfConst());
}
bool hasInformationAboutMonotonicity() const override { return true; }

View File

@ -1,12 +1,12 @@
#include <Columns/ColumnConst.h>
#include <Columns/ColumnString.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/GatherUtils/Sources.h>
#include <Functions/IFunction.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
namespace DB
{
@ -16,8 +16,8 @@ namespace
/// If 'is_utf8' - measure offset and length in code points instead of bytes.
/// Syntax:
/// - overlay(input, replace, offset[, length])
/// - overlayUTF8(input, replace, offset[, length]) - measure offset and length in code points instead of bytes
/// - overlay(s, replace, offset[, length])
/// - overlayUTF8(s, replace, offset[, length]) - measure offset and length in code points instead of bytes
template <bool is_utf8>
class FunctionOverlay : public IFunction
{
@ -34,7 +34,7 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
FunctionArgumentDescriptors mandatory_args{
{"input", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"s", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"replace", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"},
{"offset", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeInteger), nullptr, "(U)Int8/16/32/64"},
};
@ -100,7 +100,6 @@ public:
res_data.reserve(col_input_string->getChars().size());
}
#define OVERLAY_EXECUTE_CASE(HAS_FOUR_ARGS, OFFSET_IS_CONST, LENGTH_IS_CONST) \
if (input_is_const && replace_is_const) \
constantConstant<HAS_FOUR_ARGS, OFFSET_IS_CONST, LENGTH_IS_CONST>( \
@ -186,7 +185,6 @@ public:
return res_col;
}
private:
/// input offset is 1-based, maybe negative
/// output result is 0-based valid offset, within [0, input_size]
@ -229,6 +227,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
constantConstant<true, offset_is_const, false>(
@ -343,6 +342,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
vectorConstant<true, offset_is_const, false>(
@ -461,6 +461,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
constantVector<true, offset_is_const, false>(
@ -577,6 +578,7 @@ private:
ColumnString::Chars & res_data,
ColumnString::Offsets & res_offsets) const
{
/// Free us from handling negative length in the code below
if (has_four_args && length_is_const && const_length < 0)
{
vectorVector<true, offset_is_const, false>(

View File

@ -10,21 +10,31 @@
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <IO/WriteHelpers.h>
#include <algorithm>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
class FunctionToStartOfInterval : public IFunction
{
private:
enum class Overload
{
Default, /// toStartOfInterval(time, interval) or toStartOfInterval(time, interval, timezone)
Origin /// toStartOfInterval(time, interval, origin) or toStartOfInterval(time, interval, origin, timezone)
};
mutable Overload overload;
public:
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionToStartOfInterval>(); }
@ -34,7 +44,7 @@ public:
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; }
bool hasInformationAboutMonotonicity() const override { return true; }
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override { return { .is_monotonic = true, .is_always_monotonic = true }; }
@ -72,6 +82,9 @@ public:
"Illegal type {} of 2nd argument of function {}, expected a time interval",
type_arg2->getName(), getName());
overload = Overload::Default;
/// Determine result type for default overload (no origin)
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
@ -97,13 +110,49 @@ public:
auto check_third_argument = [&]
{
const DataTypePtr & type_arg3 = arguments[2].type;
if (!isString(type_arg3))
if (isString(type_arg3))
{
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of 3rd argument of function {}, expected a constant timezone string",
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
}
else if (isDateOrDate32OrDateTimeOrDateTime64(type_arg3))
{
overload = Overload::Origin;
const DataTypePtr & type_arg1 = arguments[0].type;
if (isDate(type_arg1) && isDate(type_arg3))
result_type = ResultType::Date;
else if (isDate32(type_arg1) && isDate32(type_arg3))
result_type = ResultType::Date32;
else if (isDateTime(type_arg1) && isDateTime(type_arg3))
result_type = ResultType::DateTime;
else if (isDateTime64(type_arg1) && isDateTime64(type_arg3))
result_type = ResultType::DateTime64;
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same type", getName());
}
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"This argument is optional and must be a constant String with timezone name or a Date/Date32/DateTime/DateTime64 with a constant origin",
type_arg3->getName(), getName());
if (value_is_date && result_type == ResultType::Date) /// weird why this is && instead of || but too afraid to change it
};
auto check_fourth_argument = [&]
{
if (overload != Overload::Origin) /// sanity check
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"The third argument must a Date/Date32/DateTime/DateTime64 with a constant origin",
arguments[2].type->getName(), getName());
const DataTypePtr & type_arg4 = arguments[3].type;
if (!isString(type_arg4))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 4th argument of function {}. "
"This argument is optional and must be a constant String with timezone name",
type_arg4->getName(), getName());
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"The timezone argument of function {} with interval type {} is allowed only when the 1st argument has type DateTime or DateTimt64",
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
};
@ -118,10 +167,17 @@ public:
check_second_argument();
check_third_argument();
}
else if (arguments.size() == 4)
{
check_first_argument();
check_second_argument();
check_third_argument();
check_fourth_argument();
}
else
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
"Number of arguments for function {} doesn't match: passed {}, must be 2, 3 or 4",
getName(), arguments.size());
}
@ -132,10 +188,19 @@ public:
case ResultType::Date32:
return std::make_shared<DataTypeDate32>();
case ResultType::DateTime:
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
{
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
case ResultType::DateTime64:
{
UInt32 scale = 0;
if (isDateTime64(arguments[0].type) && overload == Overload::Origin)
{
scale = assert_cast<const DataTypeDateTime64 &>(*arguments[0].type.get()).getScale();
if (assert_cast<const DataTypeDateTime64 &>(*arguments[2].type.get()).getScale() != scale)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same scale", getName());
}
if (interval_type->getKind() == IntervalKind::Kind::Nanosecond)
scale = 9;
else if (interval_type->getKind() == IntervalKind::Kind::Microsecond)
@ -143,69 +208,103 @@ public:
else if (interval_type->getKind() == IntervalKind::Kind::Millisecond)
scale = 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
}
std::unreachable();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override
{
const auto & time_column = arguments[0];
const auto & interval_column = arguments[1];
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0);
auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone, input_rows_count);
ColumnWithTypeAndName origin_column;
if (overload == Overload::Origin)
origin_column = arguments[2];
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_arg_num, 0);
ColumnPtr result_column;
if (isDate(result_type))
result_column = dispatchForTimeColumn<DataTypeDate>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDate32(result_type))
result_column = dispatchForTimeColumn<DataTypeDate32>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime64(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime64>(time_column, interval_column, origin_column, result_type, time_zone);
return result_column;
}
private:
template <typename ReturnType>
ColumnPtr dispatchForTimeColumn(
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone,
size_t input_rows_count) const
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const
{
const auto & time_column_type = *time_column.type.get();
const auto & time_column_col = *time_column.column.get();
if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count, scale);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
}
else if (isDate(time_column_type))
if (isDate(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate, ColumnDate>(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDate32(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate32, ColumnDate32>(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime, ColumnDateTime>(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime64, ColumnDateTime64>(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone, scale);
}
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
}
template <typename TimeDataType, typename TimeColumnType>
template <typename ReturnType, typename TimeDataType, typename TimeColumnType>
ColumnPtr dispatchForIntervalColumn(
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale = 1) const
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const
{
const auto * interval_type = checkAndGetDataType<DataTypeInterval>(interval_column.type.get());
if (!interval_type)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a time interval", getName());
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
case IntervalKind::Kind::Microsecond:
case IntervalKind::Kind::Millisecond:
if (isDateOrDate32(time_data_type) || isDateTime(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type {}", isDate(time_data_type) ? "Date" : "DateTime");
break;
case IntervalKind::Kind::Second:
case IntervalKind::Kind::Minute:
case IntervalKind::Kind::Hour:
if (isDateOrDate32(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type Date");
break;
default:
break;
}
const auto * interval_column_const_int64 = checkAndGetColumnConst<ColumnInt64>(interval_column.column.get());
if (!interval_column_const_int64)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a const time interval", getName());
@ -217,51 +316,102 @@ private:
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Microsecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Millisecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Second:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Minute:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Hour:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Day:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Week:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Month:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Quarter:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Year:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
}
std::unreachable();
}
template <typename TimeDataType, typename TimeColumnType, typename ResultDataType, IntervalKind::Kind unit>
ColumnPtr execute(
const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale) const
template <typename ResultDataType, typename TimeDataType, typename TimeColumnType, IntervalKind::Kind unit>
ColumnPtr execute(const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const
{
using ResultColumnType = typename ResultDataType::ColumnType;
using ResultFieldType = typename ResultDataType::FieldType;
const auto & time_data = time_column_type.getData();
size_t size = time_data.size();
auto result_col = result_type->createColumn();
auto * col_to = assert_cast<ResultColumnType *>(result_col.get());
auto & result_data = col_to->getData();
result_data.resize(input_rows_count);
result_data.resize(size);
Int64 scale_multiplier = DecimalUtils::scaleMultiplier<DateTime64>(scale);
for (size_t i = 0; i != input_rows_count; ++i)
result_data[i] = static_cast<ResultFieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
if (origin_column.column) // Overload: Origin
{
const bool is_small_interval = (unit == IntervalKind::Kind::Nanosecond || unit == IntervalKind::Kind::Microsecond || unit == IntervalKind::Kind::Millisecond);
const bool is_result_date = isDateOrDate32(result_type);
Int64 result_scale = scale_multiplier;
Int64 origin_scale = 1;
if (isDateTime64(result_type)) /// We have origin scale only in case if arguments are DateTime64.
origin_scale = assert_cast<const DataTypeDateTime64 &>(*origin_column.type).getScaleMultiplier();
else if (!is_small_interval) /// In case of large interval and arguments are not DateTime64, we should not have scale in result.
result_scale = 1;
if (is_small_interval)
result_scale = assert_cast<const DataTypeDateTime64 &>(*result_type).getScaleMultiplier();
/// In case if we have a difference between time arguments and Interval, we need to calculate the difference between them
/// to get the right precision for the result. In case of large intervals, we should not have scale difference.
Int64 scale_diff = is_small_interval ? std::max(result_scale / origin_scale, origin_scale / result_scale) : 1;
static constexpr Int64 SECONDS_PER_DAY = 86'400;
UInt64 origin = origin_column.column->get64(0);
for (size_t i = 0; i != size; ++i)
{
UInt64 time_arg = time_data[i];
if (origin > static_cast<size_t>(time_arg))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The origin must be before the end date / date with time");
if (is_result_date) /// All internal calculations of ToStartOfInterval<...> expect arguments to be seconds or milli-, micro-, nanoseconds.
{
time_arg *= SECONDS_PER_DAY;
origin *= SECONDS_PER_DAY;
}
Int64 offset = ToStartOfInterval<unit>::execute(time_arg - origin, num_units, time_zone, result_scale, origin);
/// In case if arguments are DateTime64 with large interval, we should apply scale on it.
offset *= (!is_small_interval) ? result_scale : 1;
if (is_result_date) /// Convert back to date after calculations.
{
offset /= SECONDS_PER_DAY;
origin /= SECONDS_PER_DAY;
}
result_data[i] = 0;
result_data[i] += (result_scale < origin_scale) ? (origin + offset) / scale_diff : (origin + offset) * scale_diff;
}
}
else // Overload: Default
{
for (size_t i = 0; i != size; ++i)
result_data[i] = static_cast<typename ResultDataType::FieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
}
return result_col;
}

View File

@ -32,8 +32,10 @@ WriteBufferFromFile::WriteBufferFromFile(
ThrottlerPtr throttler_,
mode_t mode,
char * existing_memory,
size_t alignment)
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, throttler_, alignment, file_name_)
size_t alignment,
bool use_adaptive_buffer_size_,
size_t adaptive_buffer_initial_size)
: WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, throttler_, alignment, file_name_, use_adaptive_buffer_size_, adaptive_buffer_initial_size)
{
ProfileEvents::increment(ProfileEvents::FileOpen);
@ -66,8 +68,10 @@ WriteBufferFromFile::WriteBufferFromFile(
size_t buf_size,
ThrottlerPtr throttler_,
char * existing_memory,
size_t alignment)
: WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, throttler_, alignment, original_file_name)
size_t alignment,
bool use_adaptive_buffer_size_,
size_t adaptive_buffer_initial_size)
: WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, throttler_, alignment, original_file_name, use_adaptive_buffer_size_, adaptive_buffer_initial_size)
{
fd_ = -1;
}

View File

@ -36,7 +36,9 @@ public:
ThrottlerPtr throttler_ = {},
mode_t mode = 0666,
char * existing_memory = nullptr,
size_t alignment = 0);
size_t alignment = 0,
bool use_adaptive_buffer_size_ = false,
size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE);
/// Use pre-opened file descriptor.
explicit WriteBufferFromFile(
@ -45,7 +47,9 @@ public:
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
ThrottlerPtr throttler_ = {},
char * existing_memory = nullptr,
size_t alignment = 0);
size_t alignment = 0,
bool use_adaptive_buffer_size_ = false,
size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE);
~WriteBufferFromFile() override;

View File

@ -83,6 +83,13 @@ void WriteBufferFromFileDescriptor::nextImpl()
ProfileEvents::increment(ProfileEvents::DiskWriteElapsedMicroseconds, watch.elapsedMicroseconds());
ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written);
/// Increase buffer size for next data if adaptive buffer size is used and nextImpl was called because of end of buffer.
if (!available() && use_adaptive_buffer_size && memory.size() < adaptive_max_buffer_size)
{
memory.resize(std::min(memory.size() * 2, adaptive_max_buffer_size));
BufferBase::set(memory.data(), memory.size(), 0);
}
}
/// NOTE: This class can be used as a very low-level building block, for example
@ -94,11 +101,15 @@ WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor(
char * existing_memory,
ThrottlerPtr throttler_,
size_t alignment,
std::string file_name_)
: WriteBufferFromFileBase(buf_size, existing_memory, alignment)
std::string file_name_,
bool use_adaptive_buffer_size_,
size_t adaptive_buffer_initial_size)
: WriteBufferFromFileBase(use_adaptive_buffer_size_ ? adaptive_buffer_initial_size : buf_size, existing_memory, alignment)
, fd(fd_)
, throttler(throttler_)
, file_name(std::move(file_name_))
, use_adaptive_buffer_size(use_adaptive_buffer_size_)
, adaptive_max_buffer_size(buf_size)
{
}
@ -124,6 +135,7 @@ void WriteBufferFromFileDescriptor::finalizeImpl()
return;
}
use_adaptive_buffer_size = false;
next();
}

View File

@ -18,7 +18,9 @@ public:
char * existing_memory = nullptr,
ThrottlerPtr throttler_ = {},
size_t alignment = 0,
std::string file_name_ = "");
std::string file_name_ = "",
bool use_adaptive_buffer_size_ = false,
size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE);
/** Could be used before initialization if needed 'fd' was not passed to constructor.
* It's not possible to change 'fd' during work.
@ -56,6 +58,12 @@ protected:
/// If file has name contains filename, otherwise contains string "(fd=...)"
std::string file_name;
/// If true, the size of internal buffer will be exponentially increased up to
/// adaptive_buffer_max_size after each nextImpl call. It can be used to avoid
/// large buffer allocation when actual size of written data is small.
bool use_adaptive_buffer_size;
size_t adaptive_max_buffer_size;
void finalizeImpl() override;
};

View File

@ -95,7 +95,7 @@ WriteBufferFromS3::WriteBufferFromS3(
std::optional<std::map<String, String>> object_metadata_,
ThreadPoolCallbackRunnerUnsafe<void> schedule_,
const WriteSettings & write_settings_)
: WriteBufferFromFileBase(buf_size_, nullptr, 0)
: WriteBufferFromFileBase(std::min(buf_size_, static_cast<size_t>(DBMS_DEFAULT_BUFFER_SIZE)), nullptr, 0)
, bucket(bucket_)
, key(key_)
, request_settings(request_settings_)
@ -351,9 +351,17 @@ void WriteBufferFromS3::allocateBuffer()
buffer_allocation_policy->nextBuffer();
chassert(0 == hidden_size);
/// First buffer was already allocated in BufferWithOwnMemory constructor with provided in constructor buffer size.
/// It will be reallocated in subsequent nextImpl calls up to the desired buffer size from buffer_allocation_policy.
if (buffer_allocation_policy->getBufferNumber() == 1)
{
allocateFirstBuffer();
/// Reduce memory size if initial size was larger then desired size from buffer_allocation_policy.
/// Usually it doesn't happen but we have it in unit tests.
if (memory.size() > buffer_allocation_policy->getBufferSize())
{
memory.resize(buffer_allocation_policy->getBufferSize());
WriteBuffer::set(memory.data(), memory.size());
}
return;
}
@ -361,14 +369,6 @@ void WriteBufferFromS3::allocateBuffer()
WriteBuffer::set(memory.data(), memory.size());
}
void WriteBufferFromS3::allocateFirstBuffer()
{
const auto max_first_buffer = buffer_allocation_policy->getBufferSize();
const auto size = std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), max_first_buffer);
memory = Memory(size);
WriteBuffer::set(memory.data(), memory.size());
}
void WriteBufferFromS3::setFakeBufferWhenPreFinalized()
{
WriteBuffer::set(fake_buffer_when_prefinalized, sizeof(fake_buffer_when_prefinalized));

View File

@ -64,7 +64,6 @@ private:
void reallocateFirstBuffer();
void detachBuffer();
void allocateBuffer();
void allocateFirstBuffer();
void setFakeBufferWhenPreFinalized();
S3::UploadPartRequest getUploadRequest(size_t part_number, PartData & data);

View File

@ -54,7 +54,7 @@ inline void WriteBufferValidUTF8::putReplacement()
}
inline void WriteBufferValidUTF8::putValid(char *data, size_t len)
inline void WriteBufferValidUTF8::putValid(const char *data, size_t len)
{
if (len == 0)
return;
@ -149,9 +149,34 @@ void WriteBufferValidUTF8::finalizeImpl()
/// Write all complete sequences from buffer.
nextImpl();
/// If unfinished sequence at end, then write replacement.
/// Handle remaining bytes if we have an incomplete sequence
if (working_buffer.begin() != memory.data())
{
const char * p = memory.data();
while (p < pos)
{
UInt8 len = length_of_utf8_sequence[static_cast<const unsigned char>(*p)];
if (p + len > pos)
{
/// Incomplete sequence. Skip one byte.
putReplacement();
++p;
}
else if (Poco::UTF8Encoding::isLegal(reinterpret_cast<const unsigned char *>(p), len))
{
/// Valid sequence
putValid(p, len);
p += len;
}
else
{
/// Invalid sequence, skip first byte.
putReplacement();
++p;
}
}
}
}
}

View File

@ -26,7 +26,7 @@ public:
private:
void putReplacement();
void putValid(char * data, size_t len);
void putValid(const char * data, size_t len);
void nextImpl() override;
void finalizeImpl() override;

View File

@ -24,6 +24,9 @@ struct WriteSettings
bool s3_allow_parallel_part_upload = true;
bool azure_allow_parallel_part_upload = true;
bool use_adaptive_write_buffer = false;
size_t adaptive_write_buffer_initial_size = 16 * 1024;
bool operator==(const WriteSettings & other) const = default;
};

View File

@ -2371,7 +2371,7 @@ void NO_INLINE Aggregator::mergeDataNullKey(
template <typename Method, typename Table>
void NO_INLINE Aggregator::mergeDataImpl(
Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const
Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const
{
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
@ -2410,7 +2410,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
{
if (!is_aggregate_function_compiled[i])
aggregate_functions[i]->mergeAndDestroyBatch(
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
return;
@ -2420,7 +2420,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
for (size_t i = 0; i < params.aggregates_size; ++i)
{
aggregate_functions[i]->mergeAndDestroyBatch(
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
}
@ -2535,8 +2535,10 @@ void NO_INLINE Aggregator::mergeWithoutKeyDataImpl(
template <typename Method>
void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
ManyAggregatedDataVariants & non_empty_data) const
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const
{
ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
AggregatedDataVariantsPtr & res = non_empty_data[0];
bool no_more_keys = false;
@ -2557,13 +2559,13 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, true, prefetch);
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, false, prefetch);
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, false, prefetch, thread_pool, is_cancelled);
}
}
else if (res->without_key)
@ -2589,7 +2591,7 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
#define M(NAME) \
template void NO_INLINE Aggregator::mergeSingleLevelDataImpl<decltype(AggregatedDataVariants::NAME)::element_type>( \
ManyAggregatedDataVariants & non_empty_data) const;
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const;
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M
@ -2597,6 +2599,8 @@ template <typename Method>
void NO_INLINE Aggregator::mergeBucketImpl(
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic<bool> & is_cancelled) const
{
ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
/// We merge all aggregation results to the first.
AggregatedDataVariantsPtr & res = data[0];
@ -2613,7 +2617,7 @@ void NO_INLINE Aggregator::mergeBucketImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data.impls[bucket], getDataVariant<Method>(current).data.impls[bucket], arena, true, prefetch);
getDataVariant<Method>(*res).data.impls[bucket], getDataVariant<Method>(current).data.impls[bucket], arena, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
@ -2623,7 +2627,9 @@ void NO_INLINE Aggregator::mergeBucketImpl(
getDataVariant<Method>(current).data.impls[bucket],
arena,
false,
prefetch);
prefetch,
thread_pool,
is_cancelled);
}
}
}

View File

@ -467,7 +467,7 @@ private:
/// Merge data from hash table `src` into `dst`.
template <typename Method, typename Table>
void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch) const;
void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const;
/// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`.
template <typename Method, typename Table>
@ -490,7 +490,7 @@ private:
template <typename Method>
void mergeSingleLevelDataImpl(
ManyAggregatedDataVariants & non_empty_data) const;
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const;
template <bool return_single_block>
using ConvertToBlockRes = std::conditional_t<return_single_block, Block, BlocksList>;

View File

@ -228,8 +228,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
metadata_path = metadata_path / "store" / DatabaseCatalog::getPathForUUID(create.uuid);
if (!create.attach && fs::exists(metadata_path))
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists", metadata_path.string());
if (!create.attach && fs::exists(metadata_path) && !fs::is_empty(metadata_path))
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists and is not empty", metadata_path.string());
}
else if (create.storage->engine->name == "MaterializeMySQL"
|| create.storage->engine->name == "MaterializedMySQL")
@ -329,6 +329,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
writeChar('\n', statement_buf);
String statement = statement_buf.str();
/// Needed to make database creation retriable if it fails after the file is created
fs::remove(metadata_file_tmp_path);
/// Exclusive flag guarantees, that database is not created right now in another thread.
WriteBufferFromFile out(metadata_file_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL);
writeString(statement, out);
@ -350,13 +353,6 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
DatabaseCatalog::instance().attachDatabase(database_name, database);
added = true;
if (need_write_metadata)
{
/// Prevents from overwriting metadata of detached database
renameNoReplace(metadata_file_tmp_path, metadata_file_path);
renamed = true;
}
if (!load_database_without_tables)
{
/// We use global context here, because storages lifetime is bigger than query context lifetime
@ -368,6 +364,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
/// Only then prioritize, schedule and wait all the startup tasks
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
}
if (need_write_metadata)
{
/// Prevents from overwriting metadata of detached database
renameNoReplace(metadata_file_tmp_path, metadata_file_path);
renamed = true;
}
}
catch (...)
{
@ -781,14 +784,14 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
const auto & settings = getContext()->getSettingsRef();
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings.allow_experimental_full_text_index)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (the setting 'allow_experimental_full_text_index')");
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is disabled. Turn on setting 'allow_experimental_full_text_index'");
/// ----
/// Temporary check during a transition period. Please remove at the end of 2024.
if (index_desc.type == INVERTED_INDEX_NAME && !settings.allow_experimental_inverted_index)
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'");
/// ----
if (index_desc.type == "vector_similarity" && !settings.allow_experimental_vector_similarity_index)
throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index is disabled. Turn on allow_experimental_vector_similarity_index");
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental vector similarity index is disabled. Turn on setting 'allow_experimental_vector_similarity_index'");
properties.indices.push_back(index_desc);
}
@ -1226,6 +1229,27 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data
bool from_path = create.attach_from_path.has_value();
bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !is_on_cluster && !create.attach)
{
if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, "
"see database_replicated_allow_explicit_uuid");
}
else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 1)
{
LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases");
}
else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2)
{
UUID old_uuid = create.uuid;
create.uuid = UUIDHelpers::Nil;
create.generateRandomUUIDs();
LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) "
"to make sure it's unique", old_uuid, create.uuid);
}
}
if (is_replicated_database_internal && !internal)
{
if (create.uuid == UUIDHelpers::Nil)

View File

@ -164,7 +164,7 @@ Field convertDecimalType(const Field & from, const To & type)
}
Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint)
Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_type_hint && from_type_hint->equals(type))
{
@ -359,7 +359,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Array res(src_arr_size);
for (size_t i = 0; i < src_arr_size; ++i)
{
res[i] = convertFieldToType(src_arr[i], element_type);
res[i] = convertFieldToType(src_arr[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
// See the comment for Tuples below.
@ -387,7 +387,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
for (size_t i = 0; i < dst_tuple_size; ++i)
{
const auto & element_type = *(type_tuple->getElements()[i]);
res[i] = convertFieldToType(src_tuple[i], element_type);
res[i] = convertFieldToType(src_tuple[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
/*
@ -435,12 +435,12 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Tuple updated_entry(2);
updated_entry[0] = convertFieldToType(key, key_type);
updated_entry[0] = convertFieldToType(key, key_type, nullptr, format_settings);
if (updated_entry[0].isNull() && !canContainNull(key_type))
have_unconvertible_element = true;
updated_entry[1] = convertFieldToType(value, value_type);
updated_entry[1] = convertFieldToType(value, value_type, nullptr, format_settings);
if (updated_entry[1].isNull() && !canContainNull(value_type))
have_unconvertible_element = true;
@ -551,7 +551,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
ReadBufferFromString in_buffer(src.safeGet<String>());
try
{
type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{});
type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, format_settings);
}
catch (Exception & e)
{
@ -563,7 +563,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
Field parsed = (*col)[0];
return convertFieldToType(parsed, type, from_type_hint);
return convertFieldToType(parsed, type, from_type_hint, format_settings);
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch in IN or VALUES section. Expected: {}. Got: {}",
@ -573,7 +573,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_value.isNull())
return from_value;
@ -582,7 +582,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
return from_value;
if (const auto * low_cardinality_type = typeid_cast<const DataTypeLowCardinality *>(&to_type))
return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint);
return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint, format_settings);
else if (const auto * nullable_type = typeid_cast<const DataTypeNullable *>(&to_type))
{
const IDataType & nested_type = *nullable_type->getNestedType();
@ -593,20 +593,20 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
if (from_type_hint && from_type_hint->equals(nested_type))
return from_value;
return convertFieldToTypeImpl(from_value, nested_type, from_type_hint);
return convertFieldToTypeImpl(from_value, nested_type, from_type_hint, format_settings);
}
else
return convertFieldToTypeImpl(from_value, to_type, from_type_hint);
return convertFieldToTypeImpl(from_value, to_type, from_type_hint, format_settings);
}
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
bool is_null = from_value.isNull();
if (is_null && !canContainNull(to_type))
throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName());
Field converted = convertFieldToType(from_value, to_type, from_type_hint);
Field converted = convertFieldToType(from_value, to_type, from_type_hint, format_settings);
if (!is_null && converted.isNull())
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND,
@ -626,9 +626,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value)
return decimal_to_float == float_value;
}
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type)
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings)
{
Field result_value = convertFieldToType(from_value, to_type, &from_type);
Field result_value = convertFieldToType(from_value, to_type, &from_type, format_settings);
if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType()))
{

View File

@ -1,6 +1,7 @@
#pragma once
#include <Core/Field.h>
#include <Formats/FormatSettings.h>
namespace DB
@ -15,13 +16,13 @@ class IDataType;
* Checks for the compatibility of types, checks values fall in the range of valid values of the type, makes type conversion.
* If the value does not fall into the range - returns Null.
*/
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Does the same, but throws ARGUMENT_OUT_OF_BOUND if value does not fall into the range.
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal.
/// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt.
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type);
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings = {});
}

View File

@ -198,6 +198,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr &
auto & result_query_plan = planner.getQueryPlan();
auto optimization_settings = QueryPlanOptimizationSettings::fromContext(query_context);
optimization_settings.build_sets = false; // no need to build sets to collect filters
result_query_plan.optimize(optimization_settings);
FiltersForTableExpressionMap res;

View File

@ -0,0 +1,154 @@
#include <Formats/FormatFactory.h>
#include <Formats/JSONUtils.h>
#include <Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h>
#include <IO/WriteHelpers.h>
namespace DB
{
JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat(
WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_)
: JSONRowOutputFormat(out_, header, settings_, yield_strings_)
{
}
void JSONCompactWithProgressRowOutputFormat::writePrefix()
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeCompactMetadata(names, types, settings, *ostr);
JSONUtils::writeCompactObjectEnd(*ostr);
writeCString("\n", *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num)
{
JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr);
++field_number;
}
void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter()
{
JSONUtils::writeFieldCompactDelimiter(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter()
{
if (has_progress)
writeProgress();
writeCString("{\"data\":", *ostr);
JSONUtils::writeCompactArrayStart(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter()
{
JSONUtils::writeCompactArrayEnd(*ostr);
writeCString("}\n", *ostr);
field_number = 0;
++row_count;
}
void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter()
{
}
void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals()
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeCompactArrayStart(*ostr, 0, "totals");
}
void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
{
JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeAfterTotals()
{
JSONUtils::writeCompactArrayEnd(*ostr);
JSONUtils::writeCompactObjectEnd(*ostr);
writeCString("\n", *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num)
{
JSONUtils::writeCompactArrayStart(*ostr, 2, title);
JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
JSONUtils::writeCompactArrayEnd(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value)
{
statistics.progress.incrementPiecewiseAtomically(value);
String progress_line;
WriteBufferFromString buf(progress_line);
writeCString("{\"progress\":", buf);
statistics.progress.writeJSON(buf);
writeCString("}\n", buf);
buf.finalize();
std::lock_guard lock(progress_lines_mutex);
progress_lines.emplace_back(std::move(progress_line));
has_progress = true;
}
void JSONCompactWithProgressRowOutputFormat::flush()
{
if (has_progress)
writeProgress();
JSONRowOutputFormat::flush();
}
void JSONCompactWithProgressRowOutputFormat::writeSuffix()
{
if (has_progress)
writeProgress();
}
void JSONCompactWithProgressRowOutputFormat::writeProgress()
{
std::lock_guard lock(progress_lines_mutex);
for (const auto & progress_line : progress_lines)
writeString(progress_line, *ostr);
progress_lines.clear();
has_progress = false;
}
void JSONCompactWithProgressRowOutputFormat::finalizeImpl()
{
if (exception_message.empty())
{
JSONUtils::writeCompactAdditionalInfo(
row_count,
statistics.rows_before_limit,
statistics.applied_limit,
statistics.watch,
statistics.progress,
settings.write_statistics,
*ostr);
}
else
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeException(exception_message, *ostr, settings, 0);
JSONUtils::writeCompactObjectEnd(*ostr);
}
writeCString("\n", *ostr);
ostr->next();
}
void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory)
{
factory.registerOutputFormat(
"JSONCompactWithProgress",
[](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
{ return std::make_shared<JSONCompactWithProgressRowOutputFormat>(buf, sample, format_settings, false); });
factory.registerOutputFormat(
"JSONCompactWithProgressStrings",
[](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
{ return std::make_shared<JSONCompactWithProgressRowOutputFormat>(buf, sample, format_settings, true); });
}
}

View File

@ -0,0 +1,50 @@
#pragma once
#include <Core/Block.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteBufferValidUTF8.h>
#include <Processors/Formats/Impl/JSONRowOutputFormat.h>
namespace DB
{
struct FormatSettings;
class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat
{
public:
JSONCompactWithProgressRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_);
String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; }
void onProgress(const Progress & value) override;
void flush() override;
private:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeFieldDelimiter() override;
void writeRowStartDelimiter() override;
void writeRowEndDelimiter() override;
void writeRowBetweenDelimiter() override;
bool supportTotals() const override { return true; }
bool supportExtremes() const override { return true; }
void writeBeforeTotals() override;
void writeAfterTotals() override;
void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override;
void writeTotals(const Columns & columns, size_t row_num) override;
void writeProgress();
void writePrefix() override;
void writeSuffix() override;
void finalizeImpl() override;
std::vector<String> progress_lines;
std::mutex progress_lines_mutex;
/// To not lock mutex and check progress_lines every row,
/// we will use atomic flag that progress_lines is not empty.
std::atomic_bool has_progress = false;
};
}

View File

@ -542,7 +542,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
if (format_settings.null_as_default)
tryToReplaceNullFieldsInComplexTypesWithDefaultValues(expression_value, type);
Field value = convertFieldToType(expression_value, type, value_raw.second.get());
Field value = convertFieldToType(expression_value, type, value_raw.second.get(), format_settings);
/// Check that we are indeed allowed to insert a NULL.
if (value.isNull() && !type.isNullable() && !type.isLowCardinalityNullable())

View File

@ -16,7 +16,7 @@ void optimizeTreeFirstPass(const QueryPlanOptimizationSettings & settings, Query
void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_settings, QueryPlan::Node & root, QueryPlan::Nodes & nodes);
/// Third pass is used to apply filters such as key conditions and skip indexes to the storages that support them.
/// After that it add CreateSetsStep for the subqueries that has not be used in the filters.
void optimizeTreeThirdPass(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes);
void addStepsToBuildSets(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes);
/// Optimization (first pass) is a function applied to QueryPlan::Node.
/// It can read and update subtree of specified node.

View File

@ -75,6 +75,8 @@ struct QueryPlanOptimizationSettings
String force_projection_name;
bool optimize_use_implicit_projections = false;
bool build_sets = true;
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
};

View File

@ -216,7 +216,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
optimization_settings.force_projection_name);
}
void optimizeTreeThirdPass(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes)
void addStepsToBuildSets(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes)
{
Stack stack;
stack.push_back({.node = &root});

View File

@ -50,6 +50,9 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
case TypeIndex::Float64:
case TypeIndex::Nullable:
case TypeIndex::ObjectDeprecated:
case TypeIndex::Object:
case TypeIndex::Variant:
case TypeIndex::Dynamic:
return false;
case TypeIndex::Array:
{
@ -76,16 +79,6 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
const auto & data_type_map = static_cast<const DataTypeMap &>(data_type);
return isSafePrimaryDataKeyType(*data_type_map.getKeyType()) && isSafePrimaryDataKeyType(*data_type_map.getValueType());
}
case TypeIndex::Variant:
{
const auto & data_type_variant = static_cast<const DataTypeVariant &>(data_type);
const auto & data_type_variant_elements = data_type_variant.getVariants();
for (const auto & data_type_variant_element : data_type_variant_elements)
if (!isSafePrimaryDataKeyType(*data_type_variant_element))
return false;
return false;
}
default:
{
break;

View File

@ -504,7 +504,8 @@ void QueryPlan::optimize(const QueryPlanOptimizationSettings & optimization_sett
QueryPlanOptimizations::optimizeTreeFirstPass(optimization_settings, *root, nodes);
QueryPlanOptimizations::optimizeTreeSecondPass(optimization_settings, *root, nodes);
QueryPlanOptimizations::optimizeTreeThirdPass(*this, *root, nodes);
if (optimization_settings.build_sets)
QueryPlanOptimizations::addStepsToBuildSets(*this, *root, nodes);
updateDataStreams(*root);
}

View File

@ -486,7 +486,7 @@ private:
#define M(NAME) \
else if (first->type == AggregatedDataVariants::Type::NAME) \
params->aggregator.mergeSingleLevelDataImpl<decltype(first->NAME)::element_type>(*data);
params->aggregator.mergeSingleLevelDataImpl<decltype(first->NAME)::element_type>(*data, shared_data->is_cancelled);
if (false) {} // NOLINT
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M

View File

@ -1142,6 +1142,16 @@ bool AlterCommands::hasFullTextIndex(const StorageInMemoryMetadata & metadata)
return false;
}
bool AlterCommands::hasVectorSimilarityIndex(const StorageInMemoryMetadata & metadata)
{
for (const auto & index : metadata.secondary_indices)
{
if (index.type == "vector_similarity")
return true;
}
return false;
}
void AlterCommands::apply(StorageInMemoryMetadata & metadata, ContextPtr context) const
{
if (!prepared)

View File

@ -237,6 +237,9 @@ public:
/// Check if commands have any full-text index
static bool hasFullTextIndex(const StorageInMemoryMetadata & metadata);
/// Check if commands have any vector similarity index
static bool hasVectorSimilarityIndex(const StorageInMemoryMetadata & metadata);
};
}

View File

@ -28,7 +28,6 @@ namespace ErrorCodes
extern const int TOO_MANY_PARTITIONS;
extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int LOGICAL_ERROR;
}
/// Can the batch be split and send files from batch one-by-one instead?
@ -243,10 +242,7 @@ void DistributedAsyncInsertBatch::sendBatch(const SettingsChanges & settings_cha
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
auto result = results.front();
if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
auto result = parent.pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas);
connection = std::move(result.entry);
compression_expected = connection->getCompression() == Protocol::Compression::Enable;
@ -305,10 +301,7 @@ void DistributedAsyncInsertBatch::sendSeparateFiles(const SettingsChanges & sett
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
auto result = results.front();
if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
auto result = parent.pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas);
auto connection = std::move(result.entry);
bool compression_expected = connection->getCompression() == Protocol::Compression::Enable;

View File

@ -415,10 +415,7 @@ void DistributedAsyncInsertDirectoryQueue::processFile(std::string & file_path,
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
auto results = pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName());
auto result = results.front();
if (pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
auto result = pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas);
auto connection = std::move(result.entry);
LOG_DEBUG(log, "Sending `{}` to {} ({} rows, {} bytes)",

View File

@ -347,7 +347,7 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si
}
const Block & shard_block = (num_shards > 1) ? job.current_shard_block : current_block;
const Settings & settings = context->getSettingsRef();
const Settings settings = context->getSettingsCopy();
size_t rows = shard_block.rows();
@ -377,10 +377,7 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si
/// NOTE: INSERT will also take into account max_replica_delay_for_distributed_queries
/// (anyway fallback_to_stale_replicas_for_distributed_queries=true by default)
auto results = shard_info.pool->getManyCheckedForInsert(timeouts, settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName());
auto result = results.front();
if (shard_info.pool->isTryResultInvalid(result, settings.distributed_insert_skip_read_only_replicas))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
auto result = shard_info.pool->getValidTryResult(results, settings.distributed_insert_skip_read_only_replicas);
job.connection_entry = std::move(result.entry);
}
else

View File

@ -3230,6 +3230,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Experimental full-text index feature is not enabled (turn on setting 'allow_experimental_full_text_index')");
if (AlterCommands::hasVectorSimilarityIndex(new_metadata) && !settings.allow_experimental_vector_similarity_index)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Experimental vector similarity index is disabled (turn on setting 'allow_experimental_vector_similarity_index')");
for (const auto & disk : getDisks())
if (!disk->supportsHardLinks() && !commands.isSettingsAlter() && !commands.isCommentAlter())
throw Exception(

View File

@ -85,11 +85,11 @@ MergeTreeDataPartWriterOnDisk::Stream<false>::Stream(
marks_file_extension{marks_file_extension_},
plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)),
plain_hashing(*plain_file),
compressor(plain_hashing, compression_codec_, max_compress_block_size_),
compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size),
compressed_hashing(compressor),
marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)),
marks_hashing(*marks_file),
marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_),
marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size),
marks_compressed_hashing(marks_compressor),
compress_marks(MarkType(marks_file_extension).compressed)
{
@ -108,7 +108,7 @@ MergeTreeDataPartWriterOnDisk::Stream<true>::Stream(
data_file_extension{data_file_extension_},
plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)),
plain_hashing(*plain_file),
compressor(plain_hashing, compression_codec_, max_compress_block_size_),
compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size),
compressed_hashing(compressor),
compress_marks(false)
{

View File

@ -177,6 +177,10 @@ void MergeTreeDataPartWriterWide::addStreams(
if (!max_compress_block_size)
max_compress_block_size = settings.max_compress_block_size;
WriteSettings query_write_settings = settings.query_write_settings;
query_write_settings.use_adaptive_write_buffer = settings.use_adaptive_write_buffer_for_dynamic_subcolumns && ISerialization::isDynamicSubcolumn(substream_path, substream_path.size());
query_write_settings.adaptive_write_buffer_initial_size = settings.adaptive_write_buffer_initial_size;
column_streams[stream_name] = std::make_unique<Stream<false>>(
stream_name,
data_part_storage,
@ -186,7 +190,7 @@ void MergeTreeDataPartWriterWide::addStreams(
max_compress_block_size,
marks_compression_codec,
settings.marks_compress_block_size,
settings.query_write_settings);
query_write_settings);
full_name_to_stream_name.emplace(full_stream_name, stream_name);
stream_name_to_full_name.emplace(stream_name, full_stream_name);

View File

@ -30,6 +30,8 @@ MergeTreeWriterSettings::MergeTreeWriterSettings(
, low_cardinality_max_dictionary_size(global_settings.low_cardinality_max_dictionary_size)
, low_cardinality_use_single_dictionary_for_part(global_settings.low_cardinality_use_single_dictionary_for_part != 0)
, use_compact_variant_discriminators_serialization(storage_settings->use_compact_variant_discriminators_serialization)
, use_adaptive_write_buffer_for_dynamic_subcolumns(storage_settings->use_adaptive_write_buffer_for_dynamic_subcolumns)
, adaptive_write_buffer_initial_size(storage_settings->adaptive_write_buffer_initial_size)
{
}

View File

@ -80,6 +80,8 @@ struct MergeTreeWriterSettings
size_t low_cardinality_max_dictionary_size;
bool low_cardinality_use_single_dictionary_for_part;
bool use_compact_variant_discriminators_serialization;
bool use_adaptive_write_buffer_for_dynamic_subcolumns;
size_t adaptive_write_buffer_initial_size;
};
}

View File

@ -195,7 +195,7 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr)
LOG_TRACE(logger, "Start writing vector similarity index");
if (empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name));
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty vector similarity index {}", backQuote(index_name));
writeIntBinary(FILE_FORMAT_VERSION, ostr);

View File

@ -99,6 +99,8 @@ struct Settings;
M(Bool, add_implicit_sign_column_constraint_for_collapsing_engine, false, "If true, add implicit constraint for sign column for CollapsingMergeTree engine.", 0) \
M(Milliseconds, sleep_before_commit_local_part_in_replicated_table_ms, 0, "For testing. Do not change it.", 0) \
M(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \
M(Bool, use_adaptive_write_buffer_for_dynamic_subcolumns, true, "Allow to use adaptive writer buffers during writing dynamic subcolumns to reduce memory usage", 0) \
M(UInt64, adaptive_write_buffer_initial_size, 16 * 1024, "Initial size of an adaptive write buffer", 0) \
\
/* Part removal settings. */ \
M(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \

View File

@ -40,10 +40,12 @@ void extractReferenceVectorFromLiteral(std::vector<Float64> & reference_vector,
}
}
VectorSimilarityCondition::Info::DistanceFunction stringToDistanceFunction(std::string_view distance_function)
VectorSimilarityCondition::Info::DistanceFunction stringToDistanceFunction(const String & distance_function)
{
if (distance_function == "L2Distance")
return VectorSimilarityCondition::Info::DistanceFunction::L2;
else if (distance_function == "cosineDistance")
return VectorSimilarityCondition::Info::DistanceFunction::Cosine;
else
return VectorSimilarityCondition::Info::DistanceFunction::Unknown;
}
@ -57,7 +59,7 @@ VectorSimilarityCondition::VectorSimilarityCondition(const SelectQueryInfo & que
, index_is_useful(checkQueryStructure(query_info))
{}
bool VectorSimilarityCondition::alwaysUnknownOrTrue(String distance_function) const
bool VectorSimilarityCondition::alwaysUnknownOrTrue(const String & distance_function) const
{
if (!index_is_useful)
return true; /// query isn't supported

View File

@ -57,7 +57,8 @@ public:
enum class DistanceFunction : uint8_t
{
Unknown,
L2
L2,
Cosine
};
std::vector<Float64> reference_vector;
@ -68,7 +69,7 @@ public:
};
/// Returns false if query can be speeded up by an ANN index, true otherwise.
bool alwaysUnknownOrTrue(String distance_function) const;
bool alwaysUnknownOrTrue(const String & distance_function) const;
std::vector<Float64> getReferenceVector() const;
size_t getDimensions() const;
@ -141,18 +142,12 @@ private:
/// Traverses the AST of ORDERBY section
void traverseOrderByAST(const ASTPtr & node, RPN & rpn);
/// Returns true and stores ANNExpr if the query has valid WHERE section
static bool matchRPNWhere(RPN & rpn, Info & info);
/// Returns true and stores ANNExpr if the query has valid ORDERBY section
static bool matchRPNOrderBy(RPN & rpn, Info & info);
/// Returns true and stores Length if we have valid LIMIT clause in query
static bool matchRPNLimit(RPNElement & rpn, UInt64 & limit);
/// Matches dist function, reference vector, column name
static bool matchMainParts(RPN::iterator & iter, const RPN::iterator & end, Info & info);
/// Gets float or int from AST node
static float getFloatOrIntLiteralOrPanic(const RPN::iterator& iter);

Some files were not shown because too many files have changed in this diff Show More