mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-05 14:02:21 +00:00
Merge branch 'master' into vdimir/from-10931
This commit is contained in:
commit
31bf9a2a0b
31
CITATION.cff
Normal file
31
CITATION.cff
Normal file
@ -0,0 +1,31 @@
|
||||
# This CITATION.cff file was generated with cffinit.
|
||||
|
||||
cff-version: 1.2.0
|
||||
title: "ClickHouse"
|
||||
message: "If you use this software, please cite it as below."
|
||||
type: software
|
||||
authors:
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
repository-code: 'https://github.com/ClickHouse/ClickHouse'
|
||||
url: 'https://clickhouse.com'
|
||||
license: Apache-2.0
|
||||
preferred-citation:
|
||||
type: article
|
||||
authors:
|
||||
- family-names: "Schulze"
|
||||
given-names: "Robert"
|
||||
- family-names: "Schreiber"
|
||||
given-names: "Tom"
|
||||
- family-names: "Yatsishin"
|
||||
given-names: "Ilya"
|
||||
- family-names: "Dahimene"
|
||||
given-names: "Ryadh"
|
||||
- family-names: "Milovidov"
|
||||
given-names: "Alexey"
|
||||
journal: "Proceedings of the VLDB Endowment"
|
||||
title: "ClickHouse - Lightning Fast Analytics for Everyone"
|
||||
year: 2024
|
||||
volume: 17
|
||||
issue: 12
|
||||
doi: 10.14778/3685800.3685802
|
18
README.md
18
README.md
@ -42,21 +42,19 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
|
||||
|
||||
The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov:
|
||||
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
|
||||
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
|
||||
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
|
||||
|
||||
Other upcoming meetups
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18
|
||||
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||
@ -64,7 +62,13 @@ Other upcoming meetups
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
|
||||
|
||||
Recently completed events
|
||||
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
|
||||
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
|
||||
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
|
||||
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
|
||||
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
|
||||
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit de7b3e89218467159a7af72d58cea8425946e97d
|
||||
Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db
|
||||
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
|
@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
|
||||
# These lists of sources were generated from build log of the original ICU build system (configure + make).
|
||||
|
||||
set(ICUUC_SOURCES
|
||||
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/appendable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/caniter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/chariter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/charstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/edits.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locid.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locutil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lsr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propname.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/putil.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
|
||||
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
|
||||
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/resource.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/schriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/serv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servlk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servls.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/punycode.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/locbased.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/wintz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/restrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
|
||||
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucat.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uidna.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinit.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulist.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umath.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/umutex.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unames.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unorm.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uobject.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ushape.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/usprep.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustack.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustring.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utext.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/util_props.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrace.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uts46.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/utypes.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvector.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
|
||||
"${ICU_SOURCE_DIR}/common/wintz.cpp")
|
||||
|
||||
set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
|
||||
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/format.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
|
||||
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
|
||||
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
|
||||
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/region.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/search.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
|
||||
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
|
||||
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
|
||||
|
||||
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
|
||||
enable_language(ASM)
|
||||
@ -464,6 +481,11 @@ if (ARCH_S390X)
|
||||
else()
|
||||
set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" )
|
||||
endif()
|
||||
# ^^ you might be confused how for different little endian platforms (x86, ARM) the same assembly files can be used.
|
||||
# These files are indeed assembly but they only contain data ('.long' directive), which makes them portable accross CPUs.
|
||||
# Only the endianness and the character set (ASCII, EBCDIC) makes a difference, also see
|
||||
# https://unicode-org.github.io/icu/userguide/icu_data/#sharing-icu-data-between-platforms, 'Sharing ICU Data Between Platforms')
|
||||
# (and as an experiment, try re-generating the data files on x86 vs. ARM, ... you'll get exactly the same files)
|
||||
|
||||
set(ICUDATA_SOURCES
|
||||
"${ICUDATA_SOURCE_FILE}"
|
||||
|
2
contrib/libarchive
vendored
2
contrib/libarchive
vendored
@ -1 +1 @@
|
||||
Subproject commit ee45796171324519f0c0bfd012018dd099296336
|
||||
Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543
|
@ -1,6 +1,6 @@
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libarchive")
|
||||
|
||||
set(SRCS
|
||||
set(SRCS
|
||||
"${LIBRARY_DIR}/libarchive/archive_acl.c"
|
||||
"${LIBRARY_DIR}/libarchive/archive_blake2sp_ref.c"
|
||||
"${LIBRARY_DIR}/libarchive/archive_blake2s_ref.c"
|
||||
@ -135,7 +135,7 @@ set(SRCS
|
||||
)
|
||||
|
||||
add_library(_libarchive ${SRCS})
|
||||
target_include_directories(_libarchive PUBLIC
|
||||
target_include_directories(_libarchive PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
"${LIBRARY_DIR}/libarchive"
|
||||
)
|
||||
@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::zstd)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
@ -179,4 +179,4 @@ if (OS_LINUX)
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::libarchive ALIAS _libarchive)
|
||||
add_library(ch_contrib::libarchive ALIAS _libarchive)
|
||||
|
@ -334,13 +334,16 @@ typedef uint64_t uintmax_t;
|
||||
/* #undef ARCHIVE_XATTR_LINUX */
|
||||
|
||||
/* Version number of bsdcpio */
|
||||
#define BSDCPIO_VERSION_STRING "3.7.0"
|
||||
#define BSDCPIO_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdtar */
|
||||
#define BSDTAR_VERSION_STRING "3.7.0"
|
||||
#define BSDTAR_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdcat */
|
||||
#define BSDCAT_VERSION_STRING "3.7.0"
|
||||
#define BSDCAT_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Version number of bsdunzip */
|
||||
#define BSDUNZIP_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Define to 1 if you have the `acl_create_entry' function. */
|
||||
/* #undef HAVE_ACL_CREATE_ENTRY */
|
||||
@ -642,8 +645,8 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `getgrnam_r' function. */
|
||||
#define HAVE_GETGRNAM_R 1
|
||||
|
||||
/* Define to 1 if platform uses `optreset` to reset `getopt` */
|
||||
#define HAVE_GETOPT_OPTRESET 1
|
||||
/* Define to 1 if you have the `getline' function. */
|
||||
#define HAVE_GETLINE 1
|
||||
|
||||
/* Define to 1 if you have the `getpid' function. */
|
||||
#define HAVE_GETPID 1
|
||||
@ -750,6 +753,12 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `pcreposix' library (-lpcreposix). */
|
||||
/* #undef HAVE_LIBPCREPOSIX */
|
||||
|
||||
/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */
|
||||
/* #undef HAVE_LIBPCRE2 */
|
||||
|
||||
/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */
|
||||
/* #undef HAVE_LIBPCRE2POSIX */
|
||||
|
||||
/* Define to 1 if you have the `xml2' library (-lxml2). */
|
||||
#define HAVE_LIBXML2 1
|
||||
|
||||
@ -765,9 +774,8 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd). */
|
||||
/* #undef HAVE_LIBZSTD */
|
||||
|
||||
/* Define to 1 if you have the `zstd' library (-lzstd) with compression
|
||||
support. */
|
||||
/* #undef HAVE_LIBZSTD_COMPRESSOR */
|
||||
/* Define to 1 if you have the ZSTD_compressStream function. */
|
||||
/* #undef HAVE_ZSTD_compressStream */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
@ -923,6 +931,9 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the <pcreposix.h> header file. */
|
||||
/* #undef HAVE_PCREPOSIX_H */
|
||||
|
||||
/* Define to 1 if you have the <pcre2posix.h> header file. */
|
||||
/* #undef HAVE_PCRE2POSIX_H */
|
||||
|
||||
/* Define to 1 if you have the `pipe' function. */
|
||||
#define HAVE_PIPE 1
|
||||
|
||||
@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `strrchr' function. */
|
||||
#define HAVE_STRRCHR 1
|
||||
|
||||
/* Define to 1 if the system has the type `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS */
|
||||
|
||||
/* Define to 1 if `f_iosize' is a member of `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */
|
||||
|
||||
/* Define to 1 if `f_namemax' is a member of `struct statfs'. */
|
||||
/* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */
|
||||
|
||||
@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t;
|
||||
/* Define to 1 if you have the `symlink' function. */
|
||||
#define HAVE_SYMLINK 1
|
||||
|
||||
/* Define to 1 if you have the `sysconf' function. */
|
||||
#define HAVE_SYSCONF 1
|
||||
|
||||
/* Define to 1 if you have the <sys/acl.h> header file. */
|
||||
/* #undef HAVE_SYS_ACL_H */
|
||||
|
||||
@ -1273,13 +1293,13 @@ typedef uint64_t uintmax_t;
|
||||
/* #undef HAVE__MKGMTIME */
|
||||
|
||||
/* Define as const if the declaration of iconv() needs const. */
|
||||
#define ICONV_CONST
|
||||
#define ICONV_CONST
|
||||
|
||||
/* Version number of libarchive as a single integer */
|
||||
#define LIBARCHIVE_VERSION_NUMBER "3007000"
|
||||
#define LIBARCHIVE_VERSION_NUMBER "3007004"
|
||||
|
||||
/* Version number of libarchive */
|
||||
#define LIBARCHIVE_VERSION_STRING "3.7.0"
|
||||
#define LIBARCHIVE_VERSION_STRING "3.7.4"
|
||||
|
||||
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
|
||||
slash. */
|
||||
@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t;
|
||||
#endif /* SAFE_TO_DEFINE_EXTENSIONS */
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "3.7.0"
|
||||
#define VERSION "3.7.4"
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
||||
Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a
|
||||
Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e
|
@ -10,6 +10,7 @@ set(uv_sources
|
||||
src/random.c
|
||||
src/strscpy.c
|
||||
src/strtok.c
|
||||
src/thread-common.c
|
||||
src/threadpool.c
|
||||
src/timer.c
|
||||
src/uv-common.c
|
||||
@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||
list(APPEND uv_libraries rt)
|
||||
list(APPEND uv_sources
|
||||
src/unix/epoll.c
|
||||
src/unix/linux-core.c
|
||||
src/unix/linux-inotify.c
|
||||
src/unix/linux-syscalls.c
|
||||
src/unix/linux.c
|
||||
src/unix/procfs-exepath.c
|
||||
src/unix/random-getrandom.c
|
||||
src/unix/random-sysctl-linux.c)
|
||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62
|
||||
Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.8.3.59"
|
||||
ARG VERSION="24.8.4.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
@ -109,7 +109,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<password>${CLICKHOUSE_PASSWORD}</password>
|
||||
<password><![CDATA[${CLICKHOUSE_PASSWORD//]]>/]]]]><![CDATA[>}]]></password>
|
||||
<quota>default</quota>
|
||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||
</${CLICKHOUSE_USER}>
|
||||
|
@ -124,6 +124,8 @@ function setup_logs_replication
|
||||
check_logs_credentials || return 0
|
||||
__set_connection_args
|
||||
|
||||
echo "My hostname is ${HOSTNAME}"
|
||||
|
||||
echo 'Create all configured system logs'
|
||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
@ -184,7 +186,17 @@ function setup_logs_replication
|
||||
/^TTL /d
|
||||
')
|
||||
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:" >&2
|
||||
|
||||
echo "::group::${table}"
|
||||
# there's the only way big "$statement" can be printed without causing EAGAIN error
|
||||
# cat: write error: Resource temporarily unavailable
|
||||
statement_print="${statement}"
|
||||
if [ "${#statement_print}" -gt 4000 ]; then
|
||||
statement_print="${statement::1999}\n…\n${statement:${#statement}-1999}"
|
||||
fi
|
||||
echo -e "$statement_print"
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||
--distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \
|
||||
|
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
17
docs/changelogs/v24.3.11.7-lts.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
18
docs/changelogs/v24.5.8.10-stable.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
16
docs/changelogs/v24.6.6.6-stable.md
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
17
docs/changelogs/v24.7.6.8-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
|
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
22
docs/changelogs/v24.8.4.13-lts.md
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)).
|
||||
|
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
72
docs/en/engines/table-engines/integrations/azure-queue.md
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
slug: /en/engines/table-engines/integrations/azure-queue
|
||||
sidebar_position: 181
|
||||
sidebar_label: AzureQueue
|
||||
---
|
||||
|
||||
# AzureQueue Table Engine
|
||||
|
||||
This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem, allowing streaming data import.
|
||||
|
||||
## Create Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test (name String, value UInt32)
|
||||
ENGINE = AzureQueue(...)
|
||||
[SETTINGS]
|
||||
[mode = '',]
|
||||
[after_processing = 'keep',]
|
||||
[keeper_path = '',]
|
||||
...
|
||||
```
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
`AzureQueue` parameters are the same as `AzureBlobStorage` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/azureBlobStorage.md).
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||
ENGINE=AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/data/')
|
||||
SETTINGS
|
||||
mode = 'unordered'
|
||||
```
|
||||
|
||||
## Settings {#settings}
|
||||
|
||||
The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings).
|
||||
|
||||
## Description {#description}
|
||||
|
||||
`SELECT` is not particularly useful for streaming import (except for debugging), because each file can be imported only once. It is more practical to create real-time threads using [materialized views](../../../sql-reference/statements/create/view.md). To do this:
|
||||
|
||||
1. Use the engine to create a table for consuming from specified path in S3 and consider it a data stream.
|
||||
2. Create a table with the desired structure.
|
||||
3. Create a materialized view that converts data from the engine and puts it into a previously created table.
|
||||
|
||||
When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE azure_queue_engine_table (name String, value UInt32)
|
||||
ENGINE=AzureQueue('<endpoint>', 'CSV', 'gzip')
|
||||
SETTINGS
|
||||
mode = 'unordered';
|
||||
|
||||
CREATE TABLE stats (name String, value UInt32)
|
||||
ENGINE = MergeTree() ORDER BY name;
|
||||
|
||||
CREATE MATERIALIZED VIEW consumer TO stats
|
||||
AS SELECT name, value FROM azure_queue_engine_table;
|
||||
|
||||
SELECT * FROM stats ORDER BY name;
|
||||
```
|
||||
|
||||
## Virtual columns {#virtual-columns}
|
||||
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
|
||||
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).
|
@ -35,7 +35,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
### Engine parameters
|
||||
### Engine parameters {#parameters}
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
|
@ -5,6 +5,7 @@ sidebar_label: S3Queue
|
||||
---
|
||||
|
||||
# S3Queue Table Engine
|
||||
|
||||
This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features.
|
||||
|
||||
## Create Table {#creating-a-table}
|
||||
@ -16,27 +17,25 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
[mode = '',]
|
||||
[after_processing = 'keep',]
|
||||
[keeper_path = '',]
|
||||
[s3queue_loading_retries = 0,]
|
||||
[s3queue_processing_threads_num = 1,]
|
||||
[s3queue_enable_logging_to_s3queue_log = 0,]
|
||||
[s3queue_polling_min_timeout_ms = 1000,]
|
||||
[s3queue_polling_max_timeout_ms = 10000,]
|
||||
[s3queue_polling_backoff_ms = 0,]
|
||||
[s3queue_tracked_file_ttl_sec = 0,]
|
||||
[s3queue_tracked_files_limit = 1000,]
|
||||
[s3queue_cleanup_interval_min_ms = 10000,]
|
||||
[s3queue_cleanup_interval_max_ms = 30000,]
|
||||
[loading_retries = 0,]
|
||||
[processing_threads_num = 1,]
|
||||
[enable_logging_to_s3queue_log = 0,]
|
||||
[polling_min_timeout_ms = 1000,]
|
||||
[polling_max_timeout_ms = 10000,]
|
||||
[polling_backoff_ms = 0,]
|
||||
[tracked_file_ttl_sec = 0,]
|
||||
[tracked_files_limit = 1000,]
|
||||
[cleanup_interval_min_ms = 10000,]
|
||||
[cleanup_interval_max_ms = 30000,]
|
||||
```
|
||||
|
||||
Starting with `24.7` settings without `s3queue_` prefix are also supported.
|
||||
:::warning
|
||||
Before `24.7`, it is required to use `s3queue_` prefix for all settings apart from `mode`, `after_processing` and `keeper_path`.
|
||||
:::
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
`S3Queue` parameters are the same as `S3` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/s3.md#parameters).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a;
|
||||
These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes.
|
||||
They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`.
|
||||
|
||||
#### Available Types of Column Statistics {#available-types-of-column-statistics}
|
||||
### Available Types of Column Statistics {#available-types-of-column-statistics}
|
||||
|
||||
- `MinMax`
|
||||
|
||||
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
|
||||
|
||||
Syntax: `minmax`
|
||||
|
||||
- `TDigest`
|
||||
|
||||
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
|
||||
|
||||
Syntax: `tdigest`
|
||||
|
||||
- `Uniq`
|
||||
|
||||
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
|
||||
|
||||
- `count_min`
|
||||
Syntax: `uniq`
|
||||
|
||||
- `CountMin`
|
||||
|
||||
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||
|
||||
Syntax `countmin`
|
||||
|
||||
|
||||
### Supported Data Types {#supported-data-types}
|
||||
|
||||
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|
||||
|-----------|----------------------------------------------------|-----------------------|
|
||||
| CountMin | ✔ | ✔ |
|
||||
| MinMax | ✔ | ✗ |
|
||||
| TDigest | ✔ | ✗ |
|
||||
| Uniq | ✔ | ✔ |
|
||||
|
||||
|
||||
### Supported Operations {#supported-operations}
|
||||
|
||||
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|
||||
|-----------|-----------------------|------------------------------|
|
||||
| CountMin | ✔ | ✗ |
|
||||
| MinMax | ✗ | ✔ |
|
||||
| TDigest | ✗ | ✔ |
|
||||
| Uniq | ✔ | ✗ |
|
||||
|
||||
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
|
||||
|
||||
## Column-level Settings {#column-level-settings}
|
||||
|
||||
|
@ -39,6 +39,7 @@ The supported formats are:
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
|
||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||
@ -988,6 +989,59 @@ Example:
|
||||
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
## JSONCompactWithProgress (#jsoncompactwithprogress)
|
||||
|
||||
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||
|
||||
Each row is either a metadata object, data object, progress information or statistics object:
|
||||
|
||||
1. **Metadata Object (`meta`)**
|
||||
- Describes the structure of the data rows.
|
||||
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
|
||||
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
|
||||
- Appears before any data objects.
|
||||
|
||||
2. **Data Object (`data`)**
|
||||
- Represents a row of query results.
|
||||
- Fields: An array with values corresponding to the columns defined in the metadata.
|
||||
- Example: `{"data":["1", "John Doe"]}`
|
||||
- Appears after the metadata object, one per row.
|
||||
|
||||
3. **Progress Information Object (`progress`)**
|
||||
- Provides real-time progress feedback during query execution.
|
||||
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
|
||||
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
|
||||
- May appear intermittently.
|
||||
|
||||
4. **Statistics Object (`statistics`)**
|
||||
- Summarizes query execution statistics.
|
||||
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
|
||||
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
|
||||
- Appears at the end.
|
||||
|
||||
5. **Exception Object (`exception`)**
|
||||
- Represents an error that occurred during query execution.
|
||||
- Fields: A single text field containing the error message.
|
||||
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
|
||||
- Appears when an error is encountered.
|
||||
|
||||
6. **Totals Object (`totals`)**
|
||||
- Provides the totals for each numeric column in the result set.
|
||||
- Fields: An array with total values corresponding to the columns defined in the metadata.
|
||||
- Example: `{"totals": ["", "3"]}`
|
||||
- Appears at the end of the data rows, if applicable.
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
|
||||
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
|
||||
{"data":["1", "John Doe"]}
|
||||
{"data":["2", "Joe Doe"]}
|
||||
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
|
||||
```
|
||||
|
||||
|
||||
## JSONEachRow {#jsoneachrow}
|
||||
|
||||
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
|
||||
@ -1342,6 +1396,7 @@ SELECT * FROM json_each_row_nested
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_json_throw_on_bad_escape_sequence](/docs/en/operations/settings/settings-formats.md/#input_format_json_throw_on_bad_escape_sequence) - throw an exception if JSON string contains bad escape sequence. If disabled, bad escape sequences will remain as is in the data. Default value - `true`.
|
||||
- [input_format_json_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_json_empty_as_default) - treat empty fields in JSON input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
|
10
docs/en/interfaces/third-party/gui.md
vendored
10
docs/en/interfaces/third-party/gui.md
vendored
@ -233,6 +233,16 @@ Features:
|
||||
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
|
||||
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
|
||||
|
||||
### CKibana {#ckibana}
|
||||
|
||||
[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI.
|
||||
|
||||
Features:
|
||||
|
||||
- Translates chart requests from the native Kibana UI into ClickHouse query syntax.
|
||||
- Supports advanced features such as sampling and caching to enhance query performance.
|
||||
- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse.
|
||||
|
||||
## Commercial {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
|
@ -6,7 +6,7 @@ import SelfManaged from '@site/docs/en/_snippets/_self_managed_only_no_roadmap.m
|
||||
|
||||
<SelfManaged />
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` or `subjectAltName extension` field of the certificate is used to identify the connected user. `subjectAltName extension` supports the usage of one wildcard '*' in the server configuration. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
To enable SSL certificate authentication, a list of `Common Name`'s or `Subject Alt Name`'s for each ClickHouse user must be specified in the settings file `users.xml `:
|
||||
|
||||
@ -30,6 +30,12 @@ To enable SSL certificate authentication, a list of `Common Name`'s or `Subject
|
||||
</ssl_certificates>
|
||||
<!-- Other settings -->
|
||||
</user_name_2>
|
||||
<user_name_3>
|
||||
<ssl_certificates>
|
||||
<!-- Wildcard support -->
|
||||
<subject_alt_name>URI:spiffe://foo.com/*/bar</subject_alt_name>
|
||||
</ssl_certificates>
|
||||
</user_name_3>
|
||||
</users>
|
||||
</clickhouse>
|
||||
```
|
||||
|
@ -1463,26 +1463,29 @@ Examples:
|
||||
|
||||
## logger {#logger}
|
||||
|
||||
Logging settings.
|
||||
The location and format of log messages.
|
||||
|
||||
Keys:
|
||||
|
||||
- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
|
||||
- `log` – The log file. Contains all the entries according to `level`.
|
||||
- `errorlog` – Error log file.
|
||||
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
|
||||
- `count` – The number of archived log files that ClickHouse stores.
|
||||
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
|
||||
- `console_log_level` – Logging level for console. Default to `level`.
|
||||
- `use_syslog` - Log to syslog as well.
|
||||
- `syslog_level` - Logging level for logging to syslog.
|
||||
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
|
||||
- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
|
||||
- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
|
||||
`debug`, `trace`, `test`
|
||||
- `log` – The path to the log file.
|
||||
- `errorlog` – The path to the error log file.
|
||||
- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
|
||||
- `count` – Rotation policy: How many historical log files Clickhouse are kept at most.
|
||||
- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable.
|
||||
- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
|
||||
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
|
||||
- `console_log_level` – Log level for console output. Defaults to `level`.
|
||||
- `formatting` – Log format for console output. Currently, only `json` is supported).
|
||||
- `use_syslog` - Also forward log output to syslog.
|
||||
- `syslog_level` - Log level for logging to syslog.
|
||||
|
||||
Both log and error log file names (only file names, not directories) support date and time format specifiers.
|
||||
**Log format specifiers**
|
||||
|
||||
**Format specifiers**
|
||||
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
|
||||
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
|
||||
|
||||
Column “Example” shows the output at `2023-07-06 18:32:07`.
|
||||
|
||||
| Specifier | Description | Example |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||
@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin
|
||||
</logger>
|
||||
```
|
||||
|
||||
Writing to the console can be configured. Config example:
|
||||
To print log messages only in the console:
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
<level>information</level>
|
||||
<console>1</console>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
```
|
||||
|
||||
**Per-level Overrides**
|
||||
|
||||
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
|
||||
|
||||
```xml
|
||||
<logger>
|
||||
<levels>
|
||||
<logger>
|
||||
<name>Backup</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
<logger>
|
||||
<name>RBAC</name>
|
||||
<level>none</level>
|
||||
</logger>
|
||||
</levels>
|
||||
</logger>
|
||||
```
|
||||
|
||||
### syslog
|
||||
|
||||
Writing to the syslog is also supported. Config example:
|
||||
To write log messages additionally to syslog:
|
||||
|
||||
``` xml
|
||||
<logger>
|
||||
@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example:
|
||||
</logger>
|
||||
```
|
||||
|
||||
Keys for syslog:
|
||||
Keys for `<syslog>`:
|
||||
|
||||
- use_syslog — Required setting if you want to write to the syslog.
|
||||
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
|
||||
- hostname — Optional. The name of the host that logs are sent from.
|
||||
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
|
||||
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||
- format – Message format. Possible values: `bsd` and `syslog.`
|
||||
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
|
||||
- `hostname` — The name of the host from which logs are send. Optional.
|
||||
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
|
||||
- `format` – Log message format. Possible values: `bsd` and `syslog.`
|
||||
|
||||
### Log formats
|
||||
|
||||
@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren
|
||||
"source_line": "192"
|
||||
}
|
||||
```
|
||||
|
||||
To enable JSON logging support, use the following snippet:
|
||||
|
||||
```xml
|
||||
|
@ -752,6 +752,17 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
### input_format_json_empty_as_default {#input_format_json_empty_as_default}
|
||||
|
||||
When enabled, replace empty input fields in JSON with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||
|
||||
Possible values:
|
||||
|
||||
+ 0 — Disable.
|
||||
+ 1 — Enable.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## TSV format settings {#tsv-format-settings}
|
||||
|
||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||
|
@ -47,13 +47,15 @@ keeper foo bar
|
||||
|
||||
- `ls '[path]'` -- Lists the nodes for the given path (default: cwd)
|
||||
- `cd '[path]'` -- Changes the working path (default `.`)
|
||||
- `cp '<src>' '<dest>'` -- Copies 'src' node to 'dest' path
|
||||
- `mv '<src>' '<dest>'` -- Moves 'src' node to the 'dest' path
|
||||
- `exists '<path>'` -- Returns `1` if node exists, `0` otherwise
|
||||
- `set '<path>' <value> [version]` -- Updates the node's value. Only updates if version matches (default: -1)
|
||||
- `create '<path>' <value> [mode]` -- Creates new node with the set value
|
||||
- `touch '<path>'` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
|
||||
- `get '<path>'` -- Returns the node's value
|
||||
- `rm '<path>' [version]` -- Removes the node only if version matches (default: -1)
|
||||
- `rmr '<path>'` -- Recursively deletes path. Confirmation required
|
||||
- `rmr '<path>' [limit]` -- Recursively deletes path if the subtree size is smaller than the limit. Confirmation required (default limit = 100)
|
||||
- `flwc <command>` -- Executes four-letter-word command
|
||||
- `help` -- Prints this message
|
||||
- `get_direct_children_number '[path]'` -- Get numbers of direct children nodes under a specific path
|
||||
|
@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time:
|
||||
|
||||
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
|
||||
|
||||
**See Also**
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toStartOfInterval(value, INTERVAL x unit[, time_zone])
|
||||
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
|
||||
```
|
||||
|
||||
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
|
||||
|
||||
``` SQL
|
||||
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
|
||||
```
|
||||
**See Also**
|
||||
- [date_trunc](#date_trunc)
|
||||
|
||||
## toTime
|
||||
|
||||
Converts a date with time to a certain fixed date, while preserving the time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toTime(date[,timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md).
|
||||
|
||||
:::note
|
||||
If the `date` input argument contained sub-second components,
|
||||
they will be dropped in the returned `DateTime` value with second-accuracy.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌──────────────result─┬─toTypeName(result)─┐
|
||||
│ 1970-01-02 01:20:30 │ DateTime │
|
||||
└─────────────────────┴────────────────────┘
|
||||
```
|
||||
|
||||
## toRelativeYearNum
|
||||
|
||||
Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeYearNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeYearNum(toDate('2002-12-08')) AS y1,
|
||||
toRelativeYearNum(toDate('2010-10-26')) AS y2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───y1─┬───y2─┐
|
||||
│ 2002 │ 2010 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeQuarterNum
|
||||
|
||||
Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeQuarterNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeQuarterNum(toDate('1993-11-25')) AS q1,
|
||||
toRelativeQuarterNum(toDate('2005-01-05')) AS q2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───q1─┬───q2─┐
|
||||
│ 7975 │ 8020 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeMonthNum
|
||||
|
||||
Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeMonthNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeMonthNum(toDate('2001-04-25')) AS m1,
|
||||
toRelativeMonthNum(toDate('2009-07-08')) AS m2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────m1─┬────m2─┐
|
||||
│ 24016 │ 24115 │
|
||||
└───────┴───────┘
|
||||
```
|
||||
|
||||
## toRelativeWeekNum
|
||||
|
||||
Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeWeekNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeWeekNum(toDate('2000-02-29')) AS w1,
|
||||
toRelativeWeekNum(toDate('2001-01-12')) AS w2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───w1─┬───w2─┐
|
||||
│ 1574 │ 1619 │
|
||||
└──────┴──────┘
|
||||
```
|
||||
|
||||
## toRelativeDayNum
|
||||
|
||||
Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeDayNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeDayNum(toDate('1993-10-05')) AS d1,
|
||||
toRelativeDayNum(toDate('2000-09-20')) AS d2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───d1─┬────d2─┐
|
||||
│ 8678 │ 11220 │
|
||||
└──────┴───────┘
|
||||
```
|
||||
|
||||
## toRelativeHourNum
|
||||
|
||||
Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeHourNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1,
|
||||
toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────h1─┬─────h2─┐
|
||||
│ 208276 │ 269292 │
|
||||
└────────┴────────┘
|
||||
```
|
||||
|
||||
## toRelativeMinuteNum
|
||||
|
||||
Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeMinuteNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1,
|
||||
toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────m1─┬───────m2─┐
|
||||
│ 12496580 │ 16157531 │
|
||||
└──────────┴──────────┘
|
||||
```
|
||||
|
||||
## toRelativeSecondNum
|
||||
|
||||
Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past.
|
||||
Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toRelativeSecondNum(date)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1,
|
||||
toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌────────s1─┬────────s2─┐
|
||||
│ 749794836 │ 969451889 │
|
||||
└───────────┴───────────┘
|
||||
```
|
||||
|
||||
## toISOYear
|
||||
|
||||
@ -3884,19 +4187,29 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## timeSlots(StartTime, Duration,\[, Size\])
|
||||
## timeSlots
|
||||
|
||||
For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter set to 1800 (30 minutes) by default.
|
||||
This is necessary, for example, when searching for pageviews in the corresponding session.
|
||||
Accepts DateTime and DateTime64 as ’StartTime’ argument. For DateTime, ’Duration’ and ’Size’ arguments must be `UInt32`. For ’DateTime64’ they must be `Decimal64`.
|
||||
Returns an array of DateTime/DateTime64 (return type matches the type of ’StartTime’). For DateTime64, the return value's scale can differ from the scale of ’StartTime’ --- the highest scale among all given arguments is taken.
|
||||
|
||||
Example:
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
timeSlots(StartTime, Duration,\[, Size\])
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||
|
@ -20,10 +20,10 @@ overlay(s, replace, offset[, length])
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: A string type [String](../data-types/string.md).
|
||||
- `s`: A string type [String](../data-types/string.md).
|
||||
- `replace`: A string type [String](../data-types/string.md).
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -32,22 +32,35 @@ overlay(s, replace, offset[, length])
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res;
|
||||
SELECT overlay('My father is from Mexico.', 'mother', 4) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res─────────────┐
|
||||
│ ClickHouse CORE │
|
||||
└─────────────────┘
|
||||
┌─res──────────────────────┐
|
||||
│ My mother is from Mexico.│
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res───────────────────┐
|
||||
│ My dad is from Mexico.│
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## overlayUTF8
|
||||
|
||||
Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`.
|
||||
|
||||
Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
Assumes that the string contains valid UTF-8 encoded text.
|
||||
If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length])
|
||||
|
||||
- `s`: A string type [String](../data-types/string.md).
|
||||
- `replace`: A string type [String](../data-types/string.md).
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed.
|
||||
- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`.
|
||||
- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length])
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res;
|
||||
SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─res────────────────────────┐
|
||||
│ ClickHouse是开源OLAP数据库 │
|
||||
└────────────────────────────┘
|
||||
┌─res───────────────────────────┐
|
||||
│ Mein Vater ist aus der Türkei.│
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## replaceOne
|
||||
|
@ -3906,7 +3906,7 @@ Result:
|
||||
|
||||
## toDateTime64
|
||||
|
||||
Converts the argument to the [DateTime64](../data-types/datetime64.md) data type.
|
||||
Converts an input value to a value of type [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -3918,7 +3918,7 @@ toDateTime64(expr, scale, [timezone])
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` - Time zone of the specified datetime64 object.
|
||||
- `timezone` (optional) - Time zone of the specified datetime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -3977,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN
|
||||
|
||||
## toDateTime64OrZero
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrZero(expr, scale, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────────────invalid_arg─┐
|
||||
│ 1970-01-01 01:00:00.000 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrNull](#todatetime64ornull).
|
||||
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||
|
||||
## toDateTime64OrNull
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrNull(expr, scale, [timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg,
|
||||
toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌───────────────valid_arg─┬─invalid_arg─┐
|
||||
│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │
|
||||
└─────────────────────────┴─────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrZero](#todatetime64orzero).
|
||||
- [toDateTime64OrDefault](#todatetime64ordefault).
|
||||
|
||||
## toDateTime64OrDefault
|
||||
|
||||
Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md),
|
||||
but returns either the default value of [DateTime64](../data-types/datetime64.md)
|
||||
or the provided default if an invalid argument is received.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDateTime64OrNull(expr, scale, [timezone, default])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md).
|
||||
- `scale` - Tick size (precision): 10<sup>-precision</sup> seconds. Valid range: [ 0 : 9 ].
|
||||
- `timezone` (optional) - Time zone of the specified DateTime64 object.
|
||||
- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg,
|
||||
toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─────────────invalid_arg─┬─invalid_arg_with_default─┐
|
||||
│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │
|
||||
└─────────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toDateTime64](#todatetime64).
|
||||
- [toDateTime64OrZero](#todatetime64orzero).
|
||||
- [toDateTime64OrNull](#todatetime64ornull).
|
||||
|
||||
## toDecimal32
|
||||
|
||||
Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error.
|
||||
|
@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
|
||||
|
||||
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
|
||||
|
||||
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
||||
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
|
||||
|
||||
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
|
||||
|
||||
```sql
|
||||
FROM table
|
||||
SELECT *
|
||||
```
|
||||
|
||||
## FINAL Modifier
|
||||
|
||||
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
|
||||
|
||||
### Example Usage
|
||||
|
||||
**Using the `FINAL` keyword**
|
||||
Using the `FINAL` keyword
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable FINAL WHERE x > 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a query-level setting**
|
||||
Using `FINAL` as a query-level setting
|
||||
|
||||
```sql
|
||||
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
|
||||
```
|
||||
|
||||
**Using `FINAL` as a session-level setting**
|
||||
Using `FINAL` as a session-level setting
|
||||
|
||||
```sql
|
||||
SET final = 1;
|
||||
|
@ -506,14 +506,23 @@ bool RMRCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & nod
|
||||
return false;
|
||||
node->args.push_back(std::move(path));
|
||||
|
||||
ASTPtr remove_nodes_limit;
|
||||
if (ParserUnsignedInteger{}.parse(pos, remove_nodes_limit, expected))
|
||||
node->args.push_back(remove_nodes_limit->as<ASTLiteral &>().value);
|
||||
else
|
||||
node->args.push_back(UInt64(100));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void RMRCommand::execute(const ASTKeeperQuery * query, KeeperClient * client) const
|
||||
{
|
||||
String path = client->getAbsolutePath(query->args[0].safeGet<String>());
|
||||
UInt64 remove_nodes_limit = query->args[1].safeGet<UInt64>();
|
||||
|
||||
client->askConfirmation(
|
||||
"You are going to recursively delete path " + path, [client, path] { client->zookeeper->removeRecursive(path); });
|
||||
"You are going to recursively delete path " + path,
|
||||
[client, path, remove_nodes_limit] { client->zookeeper->removeRecursive(path, static_cast<UInt32>(remove_nodes_limit)); });
|
||||
}
|
||||
|
||||
bool ReconfigCommand::parse(IParser::Pos & pos, std::shared_ptr<ASTKeeperQuery> & node, DB::Expected & expected) const
|
||||
|
@ -184,7 +184,7 @@ class RMRCommand : public IKeeperClientCommand
|
||||
|
||||
void execute(const ASTKeeperQuery * query, KeeperClient * client) const override;
|
||||
|
||||
String getHelpMessage() const override { return "{} <path> -- Recursively deletes path. Confirmation required"; }
|
||||
String getHelpMessage() const override { return "{} <path> [limit] -- Recursively deletes path if the subtree size is smaller than the limit. Confirmation required (default limit = 100)"; }
|
||||
};
|
||||
|
||||
class ReconfigCommand : public IKeeperClientCommand
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||
#include <Common/MemoryWorker.h>
|
||||
#include <Common/ErrorHandlers.h>
|
||||
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||
#include <Common/makeSocketAddress.h>
|
||||
@ -384,6 +385,9 @@ try
|
||||
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
||||
});
|
||||
|
||||
MemoryWorker memory_worker(config().getUInt64("memory_worker_period_ms", 0));
|
||||
memory_worker.start();
|
||||
|
||||
static ServerErrorHandler error_handler;
|
||||
Poco::ErrorHandler::set(&error_handler);
|
||||
|
||||
@ -425,8 +429,9 @@ try
|
||||
for (const auto & server : *servers)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||
return metrics;
|
||||
}
|
||||
);
|
||||
},
|
||||
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
|
||||
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
@ -655,7 +660,6 @@ try
|
||||
GWPAsan::initFinished();
|
||||
#endif
|
||||
|
||||
|
||||
LOG_INFO(log, "Ready for connections.");
|
||||
|
||||
waitForTerminationRequest();
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <Poco/Util/HelpFormatter.h>
|
||||
#include <Poco/Environment.h>
|
||||
#include <Poco/Config.h>
|
||||
#include <Common/Jemalloc.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/phdr_cache.h>
|
||||
@ -25,6 +24,7 @@
|
||||
#include <base/Numa.h>
|
||||
#include <Common/PoolId.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/MemoryWorker.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||
@ -111,6 +111,8 @@
|
||||
#include <filesystem>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <Common/Jemalloc.h>
|
||||
|
||||
#include "config.h"
|
||||
#include <Common/config_version.h>
|
||||
|
||||
@ -449,9 +451,12 @@ void checkForUsersNotInMainConfig(
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Unused in other builds
|
||||
#if defined(OS_LINUX)
|
||||
static String readLine(const String & path)
|
||||
String readLine(const String & path)
|
||||
{
|
||||
ReadBufferFromFile in(path);
|
||||
String contents;
|
||||
@ -459,7 +464,7 @@ static String readLine(const String & path)
|
||||
return contents;
|
||||
}
|
||||
|
||||
static int readNumber(const String & path)
|
||||
int readNumber(const String & path)
|
||||
{
|
||||
ReadBufferFromFile in(path);
|
||||
int result;
|
||||
@ -469,7 +474,7 @@ static int readNumber(const String & path)
|
||||
|
||||
#endif
|
||||
|
||||
static void sanityChecks(Server & server)
|
||||
void sanityChecks(Server & server)
|
||||
{
|
||||
std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH));
|
||||
std::string logs_path = server.config().getString("logger.log", "");
|
||||
@ -590,6 +595,8 @@ static void sanityChecks(Server & server)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
|
||||
{
|
||||
try
|
||||
@ -906,6 +913,8 @@ try
|
||||
LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds());
|
||||
});
|
||||
|
||||
MemoryWorker memory_worker(global_context->getServerSettings().memory_worker_period_ms);
|
||||
|
||||
/// This object will periodically calculate some metrics.
|
||||
ServerAsynchronousMetrics async_metrics(
|
||||
global_context,
|
||||
@ -924,8 +933,9 @@ try
|
||||
for (const auto & server : servers)
|
||||
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()});
|
||||
return metrics;
|
||||
}
|
||||
);
|
||||
},
|
||||
/*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc,
|
||||
/*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None);
|
||||
|
||||
/// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown()
|
||||
/// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads.
|
||||
@ -1204,6 +1214,8 @@ try
|
||||
|
||||
FailPointInjection::enableFromGlobalConfig(config());
|
||||
|
||||
memory_worker.start();
|
||||
|
||||
int default_oom_score = 0;
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
@ -1547,15 +1559,6 @@ try
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
if (cgroups_memory_usage_observer)
|
||||
{
|
||||
double hard_limit_ratio = new_server_settings.cgroup_memory_watcher_hard_limit_ratio;
|
||||
double soft_limit_ratio = new_server_settings.cgroup_memory_watcher_soft_limit_ratio;
|
||||
cgroups_memory_usage_observer->setMemoryUsageLimits(
|
||||
static_cast<uint64_t>(max_server_memory_usage * hard_limit_ratio),
|
||||
static_cast<uint64_t>(max_server_memory_usage * soft_limit_ratio));
|
||||
}
|
||||
|
||||
size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit;
|
||||
|
||||
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||
@ -1584,8 +1587,6 @@ try
|
||||
background_memory_tracker.setDescription("(background)");
|
||||
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
||||
|
||||
total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory);
|
||||
|
||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
|
||||
|
||||
|
@ -239,15 +239,36 @@ bool Authentication::areCredentialsValid(
|
||||
throw Authentication::Require<GSSAcceptorContext>(auth_data.getKerberosRealm());
|
||||
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
{
|
||||
for (SSLCertificateSubjects::Type type : {SSLCertificateSubjects::Type::CN, SSLCertificateSubjects::Type::SAN})
|
||||
{
|
||||
for (const auto & subject : auth_data.getSSLCertificateSubjects().at(type))
|
||||
{
|
||||
if (ssl_certificate_credentials->getSSLCertificateSubjects().at(type).contains(subject))
|
||||
return true;
|
||||
|
||||
// Wildcard support (1 only)
|
||||
if (subject.contains('*'))
|
||||
{
|
||||
auto prefix = std::string_view(subject).substr(0, subject.find('*'));
|
||||
auto suffix = std::string_view(subject).substr(subject.find('*') + 1);
|
||||
auto slashes = std::count(subject.begin(), subject.end(), '/');
|
||||
|
||||
for (const auto & certificate_subject : ssl_certificate_credentials->getSSLCertificateSubjects().at(type))
|
||||
{
|
||||
bool matches_wildcard = certificate_subject.starts_with(prefix) && certificate_subject.ends_with(suffix);
|
||||
|
||||
// '*' must not represent a '/' in URI, so check if the number of '/' are equal
|
||||
bool matches_slashes = slashes == count(certificate_subject.begin(), certificate_subject.end(), '/');
|
||||
|
||||
if (matches_wildcard && matches_slashes)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthenticationType::SSH_KEY:
|
||||
#if USE_SSH
|
||||
|
@ -116,15 +116,17 @@ class GroupConcatImpl final
|
||||
SerializationPtr serialization;
|
||||
UInt64 limit;
|
||||
const String delimiter;
|
||||
const DataTypePtr type;
|
||||
|
||||
public:
|
||||
GroupConcatImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 limit_, const String & delimiter_)
|
||||
: IAggregateFunctionDataHelper<GroupConcatData<has_limit>, GroupConcatImpl<has_limit>>(
|
||||
{data_type_}, parameters_, std::make_shared<DataTypeString>())
|
||||
, serialization(this->argument_types[0]->getDefaultSerialization())
|
||||
, limit(limit_)
|
||||
, delimiter(delimiter_)
|
||||
, type(data_type_)
|
||||
{
|
||||
serialization = isFixedString(type) ? std::make_shared<DataTypeString>()->getDefaultSerialization() : this->argument_types[0]->getDefaultSerialization();
|
||||
}
|
||||
|
||||
String getName() const override { return name; }
|
||||
@ -140,7 +142,14 @@ public:
|
||||
if (cur_data.data_size != 0)
|
||||
cur_data.insertChar(delimiter.c_str(), delimiter.size(), arena);
|
||||
|
||||
cur_data.insert(columns[0], serialization, row_num, arena);
|
||||
if (isFixedString(type))
|
||||
{
|
||||
ColumnWithTypeAndName col = {columns[0]->getPtr(), type, "column"};
|
||||
const auto & col_str = castColumn(col, std::make_shared<DataTypeString>());
|
||||
cur_data.insert(col_str.get(), serialization, row_num, arena);
|
||||
}
|
||||
else
|
||||
cur_data.insert(columns[0], serialization, row_num, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
|
@ -459,6 +459,8 @@ public:
|
||||
|
||||
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
|
||||
|
||||
constexpr static bool parallelizeMergeWithKey() { return true; }
|
||||
|
||||
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
|
||||
{
|
||||
if constexpr (is_parallelize_merge_prepare_needed)
|
||||
|
@ -145,6 +145,8 @@ public:
|
||||
|
||||
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
|
||||
|
||||
constexpr static bool parallelizeMergeWithKey() { return false; }
|
||||
|
||||
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
|
||||
@ -169,7 +171,7 @@ public:
|
||||
|
||||
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
|
||||
/// then destroy states (on which src places points to).
|
||||
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
|
||||
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const = 0;
|
||||
|
||||
/// Serializes state (to transmit it over the network, for example).
|
||||
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
|
||||
@ -499,11 +501,15 @@ public:
|
||||
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
|
||||
}
|
||||
|
||||
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
|
||||
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
|
||||
if constexpr (Derived::parallelizeMergeWithKey())
|
||||
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
|
||||
else
|
||||
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
|
||||
|
||||
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
|
||||
}
|
||||
}
|
||||
|
@ -101,6 +101,13 @@ public:
|
||||
|
||||
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
|
||||
{
|
||||
/// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
|
||||
if (other.size() > 40000)
|
||||
{
|
||||
if (isSingleLevel())
|
||||
convertToTwoLevel();
|
||||
}
|
||||
|
||||
if (isSingleLevel() && other.isTwoLevel())
|
||||
convertToTwoLevel();
|
||||
|
||||
|
@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
|
||||
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
|
||||
DatabasePtr database = table_info.database;
|
||||
|
||||
auto query_context = Context::createCopy(context);
|
||||
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
|
||||
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
|
||||
|
||||
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
|
||||
/// database-specific things).
|
||||
database->createTableRestoredFromBackup(
|
||||
create_table_query,
|
||||
context,
|
||||
query_context,
|
||||
restore_coordination,
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp)
|
||||
target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io)
|
||||
if (TARGET ch_contrib::jemalloc)
|
||||
target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc)
|
||||
target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc)
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::jemalloc)
|
||||
target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc)
|
||||
endif()
|
||||
|
||||
|
@ -168,7 +168,7 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
{ return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); };
|
||||
|
||||
return getManyImpl(settings, pool_mode, try_get_entry,
|
||||
/*skip_unavailable_endpoints=*/ std::nullopt,
|
||||
/*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one
|
||||
/*priority_func=*/ {},
|
||||
settings.distributed_insert_skip_read_only_replicas);
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
|
||||
|
||||
using Entry = IConnectionPool::Entry;
|
||||
using PoolWithFailoverBase<IConnectionPool>::checkTryResultIsValid;
|
||||
using PoolWithFailoverBase<IConnectionPool>::getValidTryResult;
|
||||
|
||||
/** Allocates connection to work. */
|
||||
Entry get(const ConnectionTimeouts & timeouts) override;
|
||||
@ -98,7 +98,7 @@ public:
|
||||
|
||||
std::vector<Base::ShuffledPool> getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false);
|
||||
|
||||
size_t getMaxErrorCup() const { return Base::max_error_cap; }
|
||||
size_t getMaxErrorCap() const { return Base::max_error_cap; }
|
||||
|
||||
void updateSharedError(std::vector<ShuffledPool> & shuffled_pools)
|
||||
{
|
||||
|
@ -327,7 +327,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
|
||||
ShuffledPool & shuffled_pool = shuffled_pools[index];
|
||||
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
||||
|
||||
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
|
||||
shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1);
|
||||
shuffled_pool.slowdown_count = 0;
|
||||
|
||||
if (shuffled_pool.error_count >= max_tries)
|
||||
|
@ -1,5 +1,3 @@
|
||||
#include <Common/AsynchronousMetrics.h>
|
||||
|
||||
#include <IO/MMappedFileCache.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/UncompressedCache.h>
|
||||
@ -8,8 +6,10 @@
|
||||
#include <base/find_symbols.h>
|
||||
#include <base/getPageSize.h>
|
||||
#include <sys/resource.h>
|
||||
#include <Common/AsynchronousMetrics.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Jemalloc.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
@ -69,10 +69,14 @@ static void openCgroupv2MetricFile(const std::string & filename, std::optional<R
|
||||
|
||||
AsynchronousMetrics::AsynchronousMetrics(
|
||||
unsigned update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||
bool update_jemalloc_epoch_,
|
||||
bool update_rss_)
|
||||
: update_period(update_period_seconds)
|
||||
, log(getLogger("AsynchronousMetrics"))
|
||||
, protocol_server_metrics_func(protocol_server_metrics_func_)
|
||||
, update_jemalloc_epoch(update_jemalloc_epoch_)
|
||||
, update_rss(update_rss_)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
openFileIfExists("/proc/cpuinfo", cpuinfo);
|
||||
@ -411,9 +415,7 @@ Value saveJemallocMetricImpl(
|
||||
const std::string & jemalloc_full_name,
|
||||
const std::string & clickhouse_full_name)
|
||||
{
|
||||
Value value{};
|
||||
size_t size = sizeof(value);
|
||||
mallctl(jemalloc_full_name.c_str(), &value, &size, nullptr, 0);
|
||||
auto value = getJemallocValue<Value>(jemalloc_full_name.c_str());
|
||||
values[clickhouse_full_name] = AsynchronousMetricValue(value, "An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html");
|
||||
return value;
|
||||
}
|
||||
@ -768,8 +770,11 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
// 'epoch' is a special mallctl -- it updates the statistics. Without it, all
|
||||
// the following calls will return stale values. It increments and returns
|
||||
// the current epoch number, which might be useful to log as a sanity check.
|
||||
auto epoch = updateJemallocEpoch();
|
||||
new_values["jemalloc.epoch"] = { epoch, "An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other `jemalloc` metrics." };
|
||||
auto epoch = update_jemalloc_epoch ? updateJemallocEpoch() : getJemallocValue<uint64_t>("epoch");
|
||||
new_values["jemalloc.epoch"]
|
||||
= {epoch,
|
||||
"An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other "
|
||||
"`jemalloc` metrics."};
|
||||
|
||||
// Collect the statistics themselves.
|
||||
saveJemallocMetric<size_t>(new_values, "allocated");
|
||||
@ -782,10 +787,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
saveJemallocMetric<size_t>(new_values, "background_thread.num_threads");
|
||||
saveJemallocMetric<uint64_t>(new_values, "background_thread.num_runs");
|
||||
saveJemallocMetric<uint64_t>(new_values, "background_thread.run_intervals");
|
||||
saveJemallocProf<size_t>(new_values, "active");
|
||||
saveJemallocProf<bool>(new_values, "active");
|
||||
saveAllArenasMetric<size_t>(new_values, "pactive");
|
||||
[[maybe_unused]] size_t je_malloc_pdirty = saveAllArenasMetric<size_t>(new_values, "pdirty");
|
||||
[[maybe_unused]] size_t je_malloc_pmuzzy = saveAllArenasMetric<size_t>(new_values, "pmuzzy");
|
||||
saveAllArenasMetric<size_t>(new_values, "pdirty");
|
||||
saveAllArenasMetric<size_t>(new_values, "pmuzzy");
|
||||
saveAllArenasMetric<size_t>(new_values, "dirty_purged");
|
||||
saveAllArenasMetric<size_t>(new_values, "muzzy_purged");
|
||||
#endif
|
||||
@ -814,41 +819,8 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
" It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call."
|
||||
" This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring."};
|
||||
|
||||
/// We must update the value of total_memory_tracker periodically.
|
||||
/// Otherwise it might be calculated incorrectly - it can include a "drift" of memory amount.
|
||||
/// See https://github.com/ClickHouse/ClickHouse/issues/10293
|
||||
{
|
||||
Int64 amount = total_memory_tracker.get();
|
||||
Int64 peak = total_memory_tracker.getPeak();
|
||||
Int64 rss = data.resident;
|
||||
Int64 free_memory_in_allocator_arenas = 0;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
/// According to jemalloc man, pdirty is:
|
||||
///
|
||||
/// Number of pages within unused extents that are potentially
|
||||
/// dirty, and for which madvise() or similar has not been called.
|
||||
///
|
||||
/// So they will be subtracted from RSS to make accounting more
|
||||
/// accurate, since those pages are not really RSS but a memory
|
||||
/// that can be used at anytime via jemalloc.
|
||||
free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize();
|
||||
#endif
|
||||
|
||||
Int64 difference = rss - amount;
|
||||
|
||||
/// Log only if difference is high. This is for convenience. The threshold is arbitrary.
|
||||
if (difference >= 1048576 || difference <= -1048576)
|
||||
LOG_TRACE(log,
|
||||
"MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}",
|
||||
ReadableSize(amount),
|
||||
ReadableSize(peak),
|
||||
ReadableSize(free_memory_in_allocator_arenas),
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
||||
}
|
||||
if (update_rss)
|
||||
MemoryTracker::updateRSS(data.resident);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -1,15 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||
#include <Common/MemoryStatisticsOS.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
|
||||
#include <condition_variable>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
@ -69,7 +68,9 @@ public:
|
||||
|
||||
AsynchronousMetrics(
|
||||
unsigned update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||
bool update_jemalloc_epoch_,
|
||||
bool update_rss_);
|
||||
|
||||
virtual ~AsynchronousMetrics();
|
||||
|
||||
@ -112,6 +113,9 @@ private:
|
||||
MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex);
|
||||
#endif
|
||||
|
||||
[[maybe_unused]] const bool update_jemalloc_epoch;
|
||||
[[maybe_unused]] const bool update_rss;
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
std::optional<ReadBufferFromFilePRead> meminfo TSA_GUARDED_BY(data_mutex);
|
||||
std::optional<ReadBufferFromFilePRead> loadavg TSA_GUARDED_BY(data_mutex);
|
||||
|
@ -14,239 +14,21 @@
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include "config.h"
|
||||
#if USE_JEMALLOC
|
||||
# include <jemalloc/jemalloc.h>
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
#endif
|
||||
|
||||
using namespace DB;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/// Format is
|
||||
/// kernel 5
|
||||
/// rss 15
|
||||
/// [...]
|
||||
using Metrics = std::map<std::string, uint64_t>;
|
||||
|
||||
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
|
||||
{
|
||||
Metrics metrics;
|
||||
while (!buf.eof())
|
||||
{
|
||||
std::string current_key;
|
||||
readStringUntilWhitespace(current_key, buf);
|
||||
|
||||
assertChar(' ', buf);
|
||||
|
||||
uint64_t value = 0;
|
||||
readIntText(value, buf);
|
||||
assertChar('\n', buf);
|
||||
|
||||
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
|
||||
chassert(inserted, "Duplicate keys in stat file");
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
||||
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key)
|
||||
{
|
||||
const auto all_metrics = readAllMetricsFromStatFile(buf);
|
||||
if (const auto it = all_metrics.find(key); it != all_metrics.end())
|
||||
return it->second;
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName());
|
||||
}
|
||||
|
||||
struct CgroupsV1Reader : ICgroupsReader
|
||||
{
|
||||
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
|
||||
|
||||
uint64_t readMemoryUsage() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
buf.rewind();
|
||||
return readMetricFromStatFile(buf, "rss");
|
||||
}
|
||||
|
||||
std::string dumpAllStats() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
buf.rewind();
|
||||
return fmt::format("{}", readAllMetricsFromStatFile(buf));
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex;
|
||||
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
|
||||
};
|
||||
|
||||
struct CgroupsV2Reader : ICgroupsReader
|
||||
{
|
||||
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
|
||||
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
|
||||
{
|
||||
}
|
||||
|
||||
uint64_t readMemoryUsage() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
current_buf.rewind();
|
||||
stat_buf.rewind();
|
||||
|
||||
int64_t mem_usage = 0;
|
||||
/// memory.current contains a single number
|
||||
/// the reason why we subtract it described here: https://github.com/ClickHouse/ClickHouse/issues/64652#issuecomment-2149630667
|
||||
readIntText(mem_usage, current_buf);
|
||||
mem_usage -= readMetricFromStatFile(stat_buf, "inactive_file");
|
||||
chassert(mem_usage >= 0, "Negative memory usage");
|
||||
return mem_usage;
|
||||
}
|
||||
|
||||
std::string dumpAllStats() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
stat_buf.rewind();
|
||||
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex;
|
||||
ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex);
|
||||
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
|
||||
};
|
||||
|
||||
/// Caveats:
|
||||
/// - All of the logic in this file assumes that the current process is the only process in the
|
||||
/// containing cgroup (or more precisely: the only process with significant memory consumption).
|
||||
/// If this is not the case, then other processe's memory consumption may affect the internal
|
||||
/// memory tracker ...
|
||||
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
|
||||
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
|
||||
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
|
||||
/// systems existed only for a short transition period.
|
||||
|
||||
std::optional<std::string> getCgroupsV1Path()
|
||||
{
|
||||
auto path = default_cgroups_mount / "memory/memory.stat";
|
||||
if (!fs::exists(path))
|
||||
return {};
|
||||
return {default_cgroups_mount / "memory"};
|
||||
}
|
||||
|
||||
std::pair<std::string, CgroupsMemoryUsageObserver::CgroupsVersion> getCgroupsPath()
|
||||
{
|
||||
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
|
||||
if (v2_path.has_value())
|
||||
return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2};
|
||||
|
||||
auto v1_path = getCgroupsV1Path();
|
||||
if (v1_path.has_value())
|
||||
return {*v1_path, CgroupsMemoryUsageObserver::CgroupsVersion::V1};
|
||||
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_)
|
||||
: log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_)
|
||||
{
|
||||
const auto [cgroup_path, version] = getCgroupsPath();
|
||||
|
||||
cgroup_reader = createCgroupsReader(version, cgroup_path);
|
||||
|
||||
LOG_INFO(
|
||||
log,
|
||||
"Will read the current memory usage from '{}' (cgroups version: {}), wait time is {} sec",
|
||||
cgroup_path,
|
||||
(version == CgroupsVersion::V1) ? "v1" : "v2",
|
||||
wait_time.count());
|
||||
}
|
||||
{}
|
||||
|
||||
CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver()
|
||||
{
|
||||
stopThread();
|
||||
}
|
||||
|
||||
void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_)
|
||||
{
|
||||
std::lock_guard<std::mutex> limit_lock(limit_mutex);
|
||||
|
||||
if (hard_limit_ == hard_limit && soft_limit_ == soft_limit)
|
||||
return;
|
||||
|
||||
hard_limit = hard_limit_;
|
||||
soft_limit = soft_limit_;
|
||||
|
||||
on_hard_limit = [this, hard_limit_](bool up)
|
||||
{
|
||||
if (up)
|
||||
{
|
||||
LOG_WARNING(log, "Exceeded hard memory limit ({})", ReadableSize(hard_limit_));
|
||||
|
||||
/// Update current usage in memory tracker. Also reset free_memory_in_allocator_arenas to zero though we don't know if they are
|
||||
/// really zero. Trying to avoid OOM ...
|
||||
MemoryTracker::setRSS(hard_limit_, 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_INFO(log, "Dropped below hard memory limit ({})", ReadableSize(hard_limit_));
|
||||
}
|
||||
};
|
||||
|
||||
on_soft_limit = [this, soft_limit_](bool up)
|
||||
{
|
||||
if (up)
|
||||
{
|
||||
LOG_WARNING(log, "Exceeded soft memory limit ({})", ReadableSize(soft_limit_));
|
||||
|
||||
# if USE_JEMALLOC
|
||||
LOG_INFO(log, "Purging jemalloc arenas");
|
||||
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
|
||||
# endif
|
||||
/// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them.
|
||||
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
||||
LOG_TRACE(
|
||||
log,
|
||||
"Read current memory usage {} bytes ({}) from cgroups, full available stats: {}",
|
||||
memory_usage,
|
||||
ReadableSize(memory_usage),
|
||||
cgroup_reader->dumpAllStats());
|
||||
MemoryTracker::setRSS(memory_usage, 0);
|
||||
|
||||
LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage));
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_INFO(log, "Dropped below soft memory limit ({})", ReadableSize(soft_limit_));
|
||||
}
|
||||
};
|
||||
|
||||
LOG_INFO(log, "Set new limits, soft limit: {}, hard limit: {}", ReadableSize(soft_limit_), ReadableSize(hard_limit_));
|
||||
}
|
||||
|
||||
void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_)
|
||||
{
|
||||
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
||||
@ -300,35 +82,6 @@ void CgroupsMemoryUsageObserver::runThread()
|
||||
std::lock_guard<std::mutex> memory_amount_available_changed_lock(memory_amount_available_changed_mutex);
|
||||
on_memory_amount_available_changed();
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> limit_lock(limit_mutex);
|
||||
if (soft_limit > 0 && hard_limit > 0)
|
||||
{
|
||||
uint64_t memory_usage = cgroup_reader->readMemoryUsage();
|
||||
LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage));
|
||||
if (memory_usage > hard_limit)
|
||||
{
|
||||
if (last_memory_usage <= hard_limit)
|
||||
on_hard_limit(true);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (last_memory_usage > hard_limit)
|
||||
on_hard_limit(false);
|
||||
}
|
||||
|
||||
if (memory_usage > soft_limit)
|
||||
{
|
||||
if (last_memory_usage <= soft_limit)
|
||||
on_soft_limit(true);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (last_memory_usage > soft_limit)
|
||||
on_soft_limit(false);
|
||||
}
|
||||
last_memory_usage = memory_usage;
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -337,13 +90,6 @@ void CgroupsMemoryUsageObserver::runThread()
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<ICgroupsReader> createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path)
|
||||
{
|
||||
if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
|
||||
return std::make_unique<CgroupsV2Reader>(cgroup_path);
|
||||
else
|
||||
return std::make_unique<CgroupsV1Reader>(cgroup_path);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -3,53 +3,27 @@
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct ICgroupsReader
|
||||
{
|
||||
virtual ~ICgroupsReader() = default;
|
||||
|
||||
virtual uint64_t readMemoryUsage() = 0;
|
||||
|
||||
virtual std::string dumpAllStats() = 0;
|
||||
};
|
||||
|
||||
/// Does two things:
|
||||
/// 1. Periodically reads the memory usage of the process from Linux cgroups.
|
||||
/// You can specify soft or hard memory limits:
|
||||
/// - When the soft memory limit is hit, drop jemalloc cache.
|
||||
/// - When the hard memory limit is hit, update MemoryTracking metric to throw memory exceptions faster.
|
||||
/// The goal of this is to avoid that the process hits the maximum allowed memory limit at which there is a good
|
||||
/// chance that the Limux OOM killer terminates it. All of this is done is because internal memory tracking in
|
||||
/// ClickHouse can unfortunately under-estimate the actually used memory.
|
||||
/// 2. Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
|
||||
/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server
|
||||
/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit'
|
||||
/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.).
|
||||
/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling
|
||||
/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes
|
||||
/// to the database.
|
||||
/// Periodically reads the the maximum memory available to the process (which can change due to cgroups settings).
|
||||
/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server
|
||||
/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit'
|
||||
/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.).
|
||||
/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling
|
||||
/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes
|
||||
/// to the database.
|
||||
#if defined(OS_LINUX)
|
||||
class CgroupsMemoryUsageObserver
|
||||
{
|
||||
public:
|
||||
using OnMemoryLimitFn = std::function<void(bool)>;
|
||||
using OnMemoryAmountAvailableChangedFn = std::function<void()>;
|
||||
|
||||
enum class CgroupsVersion : uint8_t
|
||||
{
|
||||
V1,
|
||||
V2
|
||||
};
|
||||
|
||||
explicit CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_);
|
||||
~CgroupsMemoryUsageObserver();
|
||||
|
||||
void setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_);
|
||||
void setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_);
|
||||
|
||||
void startThread();
|
||||
@ -60,32 +34,22 @@ private:
|
||||
const std::chrono::seconds wait_time;
|
||||
|
||||
std::mutex limit_mutex;
|
||||
size_t hard_limit TSA_GUARDED_BY(limit_mutex) = 0;
|
||||
size_t soft_limit TSA_GUARDED_BY(limit_mutex) = 0;
|
||||
OnMemoryLimitFn on_hard_limit TSA_GUARDED_BY(limit_mutex);
|
||||
OnMemoryLimitFn on_soft_limit TSA_GUARDED_BY(limit_mutex);
|
||||
|
||||
std::mutex memory_amount_available_changed_mutex;
|
||||
OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed TSA_GUARDED_BY(memory_amount_available_changed_mutex);
|
||||
|
||||
uint64_t last_memory_usage = 0; /// how much memory does the process use
|
||||
uint64_t last_available_memory_amount; /// how much memory can the process use
|
||||
|
||||
void stopThread();
|
||||
|
||||
void runThread();
|
||||
|
||||
std::unique_ptr<ICgroupsReader> cgroup_reader;
|
||||
|
||||
std::mutex thread_mutex;
|
||||
std::condition_variable cond;
|
||||
ThreadFromGlobalPool thread;
|
||||
bool quit = false;
|
||||
};
|
||||
|
||||
std::unique_ptr<ICgroupsReader>
|
||||
createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path);
|
||||
|
||||
#else
|
||||
class CgroupsMemoryUsageObserver
|
||||
{
|
||||
|
@ -609,6 +609,7 @@
|
||||
M(728, UNEXPECTED_DATA_TYPE) \
|
||||
M(729, ILLEGAL_TIME_SERIES_TAGS) \
|
||||
M(730, REFRESH_FAILED) \
|
||||
M(731, QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE) \
|
||||
\
|
||||
M(900, DISTRIBUTED_CACHE_ERROR) \
|
||||
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \
|
||||
|
@ -63,6 +63,7 @@ static struct InitFiu
|
||||
REGULAR(keepermap_fail_drop_data) \
|
||||
REGULAR(lazy_pipe_fds_fail_close) \
|
||||
PAUSEABLE(infinite_sleep) \
|
||||
PAUSEABLE(stop_moving_part_before_swap_with_active) \
|
||||
|
||||
|
||||
namespace FailPoints
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <jemalloc/jemalloc.h>
|
||||
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
@ -26,7 +25,6 @@ namespace ErrorCodes
|
||||
|
||||
void purgeJemallocArenas()
|
||||
{
|
||||
LOG_TRACE(getLogger("SystemJemalloc"), "Purging unused memory");
|
||||
Stopwatch watch;
|
||||
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
|
||||
@ -46,20 +44,6 @@ void checkJemallocProfilingEnabled()
|
||||
"set: MALLOC_CONF=background_thread:true,prof:true");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void setJemallocValue(const char * name, T value)
|
||||
{
|
||||
T old_value;
|
||||
size_t old_value_size = sizeof(T);
|
||||
if (mallctl(name, &old_value, &old_value_size, reinterpret_cast<void*>(&value), sizeof(T)))
|
||||
{
|
||||
LOG_WARNING(getLogger("Jemalloc"), "mallctl for {} failed", name);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value);
|
||||
}
|
||||
|
||||
void setJemallocProfileActive(bool value)
|
||||
{
|
||||
checkJemallocProfilingEnabled();
|
||||
|
@ -5,6 +5,8 @@
|
||||
#if USE_JEMALLOC
|
||||
|
||||
#include <string>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <jemalloc/jemalloc.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -21,6 +23,59 @@ void setJemallocBackgroundThreads(bool enabled);
|
||||
|
||||
void setJemallocMaxBackgroundThreads(size_t max_threads);
|
||||
|
||||
template <typename T>
|
||||
void setJemallocValue(const char * name, T value)
|
||||
{
|
||||
T old_value;
|
||||
size_t old_value_size = sizeof(T);
|
||||
mallctl(name, &old_value, &old_value_size, reinterpret_cast<void*>(&value), sizeof(T));
|
||||
LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T getJemallocValue(const char * name)
|
||||
{
|
||||
T value;
|
||||
size_t value_size = sizeof(T);
|
||||
mallctl(name, &value, &value_size, nullptr, 0);
|
||||
return value;
|
||||
}
|
||||
|
||||
/// Each mallctl call consists of string name lookup which can be expensive.
|
||||
/// This can be avoided by translating name to "Management Information Base" (MIB)
|
||||
/// and using it in mallctlbymib calls
|
||||
template <typename T>
|
||||
struct JemallocMibCache
|
||||
{
|
||||
explicit JemallocMibCache(const char * name)
|
||||
{
|
||||
mallctlnametomib(name, mib, &mib_length);
|
||||
}
|
||||
|
||||
void setValue(T value)
|
||||
{
|
||||
mallctlbymib(mib, mib_length, nullptr, nullptr, reinterpret_cast<void*>(&value), sizeof(T));
|
||||
}
|
||||
|
||||
T getValue()
|
||||
{
|
||||
T value;
|
||||
size_t value_size = sizeof(T);
|
||||
mallctlbymib(mib, mib_length, &value, &value_size, nullptr, 0);
|
||||
return value;
|
||||
}
|
||||
|
||||
void run()
|
||||
{
|
||||
mallctlbymib(mib, mib_length, nullptr, nullptr, nullptr, 0);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t max_mib_length = 4;
|
||||
size_t mib[max_mib_length];
|
||||
size_t mib_length = max_mib_length;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -20,13 +20,9 @@
|
||||
#if USE_JEMALLOC
|
||||
# include <jemalloc/jemalloc.h>
|
||||
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
|
||||
#endif
|
||||
|
||||
#include <atomic>
|
||||
#include <cmath>
|
||||
#include <random>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
@ -115,8 +111,6 @@ void AllocationTrace::onFreeImpl(void * ptr, size_t size) const
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event QueryMemoryLimitExceeded;
|
||||
extern const Event MemoryAllocatorPurge;
|
||||
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
|
||||
}
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
@ -126,15 +120,13 @@ static constexpr size_t log_peak_memory_usage_every = 1ULL << 30;
|
||||
MemoryTracker total_memory_tracker(nullptr, VariableContext::Global);
|
||||
MemoryTracker background_memory_tracker(&total_memory_tracker, VariableContext::User, false);
|
||||
|
||||
std::atomic<Int64> MemoryTracker::free_memory_in_allocator_arenas;
|
||||
|
||||
MemoryTracker::MemoryTracker(VariableContext level_) : parent(&total_memory_tracker), level(level_) {}
|
||||
MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_) : parent(parent_), level(level_) {}
|
||||
|
||||
MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_, bool log_peak_memory_usage_in_destructor_)
|
||||
: parent(parent_)
|
||||
, log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_)
|
||||
, level(level_)
|
||||
{}
|
||||
: parent(parent_), log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_), level(level_)
|
||||
{
|
||||
}
|
||||
|
||||
MemoryTracker::~MemoryTracker()
|
||||
{
|
||||
@ -204,10 +196,14 @@ void MemoryTracker::debugLogBigAllocationWithoutCheck(Int64 size [[maybe_unused]
|
||||
return;
|
||||
|
||||
MemoryTrackerBlockerInThread blocker(VariableContext::Global);
|
||||
LOG_TEST(getLogger("MemoryTracker"), "Too big allocation ({} bytes) without checking memory limits, "
|
||||
"it may lead to OOM. Stack trace: {}", size, StackTrace().toString());
|
||||
LOG_TEST(
|
||||
getLogger("MemoryTracker"),
|
||||
"Too big allocation ({} bytes) without checking memory limits, "
|
||||
"it may lead to OOM. Stack trace: {}",
|
||||
size,
|
||||
StackTrace().toString());
|
||||
#else
|
||||
return; /// Avoid trash logging in release builds
|
||||
/// Avoid trash logging in release builds
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -228,6 +224,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
{
|
||||
/// For global memory tracker always update memory usage.
|
||||
amount.fetch_add(size, std::memory_order_relaxed);
|
||||
rss.fetch_add(size, std::memory_order_relaxed);
|
||||
|
||||
auto metric_loaded = metric.load(std::memory_order_relaxed);
|
||||
if (metric_loaded != CurrentMetrics::end())
|
||||
@ -249,6 +246,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
* So, we allow over-allocations.
|
||||
*/
|
||||
Int64 will_be = size ? size + amount.fetch_add(size, std::memory_order_relaxed) : amount.load(std::memory_order_relaxed);
|
||||
Int64 will_be_rss = size ? size + rss.fetch_add(size, std::memory_order_relaxed) : rss.load(std::memory_order_relaxed);
|
||||
|
||||
auto metric_loaded = metric.load(std::memory_order_relaxed);
|
||||
if (metric_loaded != CurrentMetrics::end() && size)
|
||||
@ -275,6 +273,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
{
|
||||
/// Revert
|
||||
amount.fetch_sub(size, std::memory_order_relaxed);
|
||||
rss.fetch_sub(size, std::memory_order_relaxed);
|
||||
|
||||
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
@ -297,33 +296,8 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
}
|
||||
}
|
||||
|
||||
Int64 limit_to_check = current_hard_limit;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
if (level == VariableContext::Global && allow_use_jemalloc_memory.load(std::memory_order_relaxed))
|
||||
{
|
||||
/// Jemalloc arenas may keep some extra memory.
|
||||
/// This memory was substucted from RSS to decrease memory drift.
|
||||
/// In case memory is close to limit, try to pugre the arenas.
|
||||
/// This is needed to avoid OOM, because some allocations are directly done with mmap.
|
||||
Int64 current_free_memory_in_allocator_arenas = free_memory_in_allocator_arenas.load(std::memory_order_relaxed);
|
||||
|
||||
if (current_free_memory_in_allocator_arenas > 0 && current_hard_limit && current_free_memory_in_allocator_arenas + will_be > current_hard_limit)
|
||||
{
|
||||
if (free_memory_in_allocator_arenas.exchange(-current_free_memory_in_allocator_arenas) > 0)
|
||||
{
|
||||
Stopwatch watch;
|
||||
mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
}
|
||||
|
||||
limit_to_check += abs(current_free_memory_in_allocator_arenas);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(current_hard_limit && will_be > limit_to_check))
|
||||
if (unlikely(
|
||||
current_hard_limit && (will_be > current_hard_limit || (level == VariableContext::Global && will_be_rss > current_hard_limit))))
|
||||
{
|
||||
if (memoryTrackerCanThrow(level, false) && throw_if_memory_exceeded)
|
||||
{
|
||||
@ -335,6 +309,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
{
|
||||
/// Revert
|
||||
amount.fetch_sub(size, std::memory_order_relaxed);
|
||||
rss.fetch_sub(size, std::memory_order_relaxed);
|
||||
|
||||
/// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc
|
||||
MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global);
|
||||
@ -343,12 +318,13 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed
|
||||
throw DB::Exception(
|
||||
DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED,
|
||||
"Memory limit{}{} exceeded: "
|
||||
"would use {} (attempt to allocate chunk of {} bytes), maximum: {}."
|
||||
"would use {} (attempt to allocate chunk of {} bytes), current RSS {}, maximum: {}."
|
||||
"{}{}",
|
||||
description ? " " : "",
|
||||
description ? description : "",
|
||||
formatReadableSizeWithBinarySuffix(will_be),
|
||||
size,
|
||||
formatReadableSizeWithBinarySuffix(rss.load(std::memory_order_relaxed)),
|
||||
formatReadableSizeWithBinarySuffix(current_hard_limit),
|
||||
overcommit_result == OvercommitResult::NONE ? "" : " OvercommitTracker decision: ",
|
||||
toDescription(overcommit_result));
|
||||
@ -442,6 +418,7 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability)
|
||||
{
|
||||
/// For global memory tracker always update memory usage.
|
||||
amount.fetch_sub(size, std::memory_order_relaxed);
|
||||
rss.fetch_sub(size, std::memory_order_relaxed);
|
||||
auto metric_loaded = metric.load(std::memory_order_relaxed);
|
||||
if (metric_loaded != CurrentMetrics::end())
|
||||
CurrentMetrics::sub(metric_loaded, size);
|
||||
@ -455,7 +432,12 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability)
|
||||
}
|
||||
|
||||
Int64 accounted_size = size;
|
||||
if (level == VariableContext::Thread || level == VariableContext::Global)
|
||||
if (level == VariableContext::Global)
|
||||
{
|
||||
amount.fetch_sub(accounted_size, std::memory_order_relaxed);
|
||||
rss.fetch_sub(accounted_size, std::memory_order_relaxed);
|
||||
}
|
||||
else if (level == VariableContext::Thread)
|
||||
{
|
||||
/// Could become negative if memory allocated in this thread is freed in another one
|
||||
amount.fetch_sub(accounted_size, std::memory_order_relaxed);
|
||||
@ -529,21 +511,29 @@ void MemoryTracker::reset()
|
||||
}
|
||||
|
||||
|
||||
void MemoryTracker::setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_)
|
||||
void MemoryTracker::updateRSS(Int64 rss_)
|
||||
{
|
||||
Int64 new_amount = rss_;
|
||||
total_memory_tracker.rss.store(rss_, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void MemoryTracker::updateAllocated(Int64 allocated_)
|
||||
{
|
||||
Int64 new_amount = allocated_;
|
||||
LOG_INFO(
|
||||
getLogger("MemoryTracker"),
|
||||
"Correcting the value of global memory tracker from {} to {}",
|
||||
ReadableSize(total_memory_tracker.amount.load(std::memory_order_relaxed)),
|
||||
ReadableSize(allocated_));
|
||||
total_memory_tracker.amount.store(new_amount, std::memory_order_relaxed);
|
||||
free_memory_in_allocator_arenas.store(free_memory_in_allocator_arenas_, std::memory_order_relaxed);
|
||||
|
||||
auto metric_loaded = total_memory_tracker.metric.load(std::memory_order_relaxed);
|
||||
if (metric_loaded != CurrentMetrics::end())
|
||||
CurrentMetrics::set(metric_loaded, new_amount);
|
||||
|
||||
bool log_memory_usage = true;
|
||||
total_memory_tracker.updatePeak(rss_, log_memory_usage);
|
||||
total_memory_tracker.updatePeak(new_amount, log_memory_usage);
|
||||
}
|
||||
|
||||
|
||||
void MemoryTracker::setSoftLimit(Int64 value)
|
||||
{
|
||||
soft_limit.store(value, std::memory_order_relaxed);
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
#include <base/types.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/VariableContext.h>
|
||||
@ -57,9 +56,8 @@ private:
|
||||
std::atomic<Int64> soft_limit {0};
|
||||
std::atomic<Int64> hard_limit {0};
|
||||
std::atomic<Int64> profiler_limit {0};
|
||||
std::atomic_bool allow_use_jemalloc_memory {true};
|
||||
|
||||
static std::atomic<Int64> free_memory_in_allocator_arenas;
|
||||
std::atomic<Int64> rss{0};
|
||||
|
||||
Int64 profiler_step = 0;
|
||||
|
||||
@ -122,6 +120,11 @@ public:
|
||||
return amount.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
Int64 getRSS() const
|
||||
{
|
||||
return rss.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
// Merges and mutations may pass memory ownership to other threads thus in the end of execution
|
||||
// MemoryTracker for background task may have a non-zero counter.
|
||||
// This method is intended to fix the counter inside of background_memory_tracker.
|
||||
@ -154,14 +157,6 @@ public:
|
||||
{
|
||||
return soft_limit.load(std::memory_order_relaxed);
|
||||
}
|
||||
void setAllowUseJemallocMemory(bool value)
|
||||
{
|
||||
allow_use_jemalloc_memory.store(value, std::memory_order_relaxed);
|
||||
}
|
||||
bool getAllowUseJemallocMmemory() const
|
||||
{
|
||||
return allow_use_jemalloc_memory.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/** Set limit if it was not set.
|
||||
* Otherwise, set limit to new value, if new value is greater than previous limit.
|
||||
@ -249,10 +244,9 @@ public:
|
||||
/// Reset the accumulated data.
|
||||
void reset();
|
||||
|
||||
/// Reset current counter to an RSS value.
|
||||
/// Jemalloc may have pre-allocated arenas, they are accounted in RSS.
|
||||
/// We can free this arenas in case of exception to avoid OOM.
|
||||
static void setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_);
|
||||
/// update values based on external information (e.g. jemalloc's stat)
|
||||
static void updateRSS(Int64 rss_);
|
||||
static void updateAllocated(Int64 allocated_);
|
||||
|
||||
/// Prints info about peak memory consumption into log.
|
||||
void logPeakMemoryUsage();
|
||||
|
333
src/Common/MemoryWorker.cpp
Normal file
333
src/Common/MemoryWorker.cpp
Normal file
@ -0,0 +1,333 @@
|
||||
#include <Common/MemoryWorker.h>
|
||||
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <base/cgroupsv2.h>
|
||||
#include <Common/Jemalloc.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event MemoryAllocatorPurge;
|
||||
extern const Event MemoryAllocatorPurgeTimeMicroseconds;
|
||||
extern const Event MemoryWorkerRun;
|
||||
extern const Event MemoryWorkerRunElapsedMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
namespace
|
||||
{
|
||||
|
||||
using Metrics = std::map<std::string, uint64_t>;
|
||||
|
||||
/// Format is
|
||||
/// kernel 5
|
||||
/// rss 15
|
||||
/// [...]
|
||||
Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf)
|
||||
{
|
||||
Metrics metrics;
|
||||
while (!buf.eof())
|
||||
{
|
||||
std::string current_key;
|
||||
readStringUntilWhitespace(current_key, buf);
|
||||
|
||||
assertChar(' ', buf);
|
||||
|
||||
uint64_t value = 0;
|
||||
readIntText(value, buf);
|
||||
assertChar('\n', buf);
|
||||
|
||||
auto [_, inserted] = metrics.emplace(std::move(current_key), value);
|
||||
chassert(inserted, "Duplicate keys in stat file");
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
||||
uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, std::string_view key)
|
||||
{
|
||||
while (!buf.eof())
|
||||
{
|
||||
std::string current_key;
|
||||
readStringUntilWhitespace(current_key, buf);
|
||||
if (current_key != key)
|
||||
{
|
||||
std::string dummy;
|
||||
readStringUntilNewlineInto(dummy, buf);
|
||||
buf.ignore();
|
||||
continue;
|
||||
}
|
||||
|
||||
assertChar(' ', buf);
|
||||
uint64_t value = 0;
|
||||
readIntText(value, buf);
|
||||
return value;
|
||||
}
|
||||
LOG_ERROR(getLogger("CgroupsReader"), "Cannot find '{}' in '{}'", key, buf.getFileName());
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct CgroupsV1Reader : ICgroupsReader
|
||||
{
|
||||
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
|
||||
|
||||
uint64_t readMemoryUsage() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
buf.rewind();
|
||||
return readMetricFromStatFile(buf, "rss");
|
||||
}
|
||||
|
||||
std::string dumpAllStats() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
buf.rewind();
|
||||
return fmt::format("{}", readAllMetricsFromStatFile(buf));
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex;
|
||||
ReadBufferFromFile buf TSA_GUARDED_BY(mutex);
|
||||
};
|
||||
|
||||
struct CgroupsV2Reader : ICgroupsReader
|
||||
{
|
||||
explicit CgroupsV2Reader(const fs::path & stat_file_dir) : stat_buf(stat_file_dir / "memory.stat") { }
|
||||
|
||||
uint64_t readMemoryUsage() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
stat_buf.rewind();
|
||||
return readMetricFromStatFile(stat_buf, "anon");
|
||||
}
|
||||
|
||||
std::string dumpAllStats() override
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
stat_buf.rewind();
|
||||
return fmt::format("{}", readAllMetricsFromStatFile(stat_buf));
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex mutex;
|
||||
ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex);
|
||||
};
|
||||
|
||||
/// Caveats:
|
||||
/// - All of the logic in this file assumes that the current process is the only process in the
|
||||
/// containing cgroup (or more precisely: the only process with significant memory consumption).
|
||||
/// If this is not the case, then other processe's memory consumption may affect the internal
|
||||
/// memory tracker ...
|
||||
/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a
|
||||
/// decade and will go away at some point, hierarchical detection is only implemented for v2.
|
||||
/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such
|
||||
/// systems existed only for a short transition period.
|
||||
|
||||
std::optional<std::string> getCgroupsV1Path()
|
||||
{
|
||||
auto path = default_cgroups_mount / "memory/memory.stat";
|
||||
if (!fs::exists(path))
|
||||
return {};
|
||||
return {default_cgroups_mount / "memory"};
|
||||
}
|
||||
|
||||
std::pair<std::string, ICgroupsReader::CgroupsVersion> getCgroupsPath()
|
||||
{
|
||||
auto v2_path = getCgroupsV2PathContainingFile("memory.current");
|
||||
if (v2_path.has_value())
|
||||
return {*v2_path, ICgroupsReader::CgroupsVersion::V2};
|
||||
|
||||
auto v1_path = getCgroupsV1Path();
|
||||
if (v1_path.has_value())
|
||||
return {*v1_path, ICgroupsReader::CgroupsVersion::V1};
|
||||
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
std::shared_ptr<ICgroupsReader> ICgroupsReader::createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path)
|
||||
{
|
||||
if (version == CgroupsVersion::V2)
|
||||
return std::make_shared<CgroupsV2Reader>(cgroup_path);
|
||||
else
|
||||
{
|
||||
chassert(version == CgroupsVersion::V1);
|
||||
return std::make_shared<CgroupsV1Reader>(cgroup_path);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
std::string_view sourceToString(MemoryWorker::MemoryUsageSource source)
|
||||
{
|
||||
switch (source)
|
||||
{
|
||||
case MemoryWorker::MemoryUsageSource::Cgroups: return "Cgroups";
|
||||
case MemoryWorker::MemoryUsageSource::Jemalloc: return "Jemalloc";
|
||||
case MemoryWorker::MemoryUsageSource::None: return "None";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// We try to pick the best possible supported source for reading memory usage.
|
||||
/// Supported sources in order of priority
|
||||
/// - reading from cgroups' pseudo-files (fastest and most accurate)
|
||||
/// - reading jemalloc's resident stat (doesn't take into account allocations that didn't use jemalloc)
|
||||
/// Also, different tick rates are used because not all options are equally fast
|
||||
MemoryWorker::MemoryWorker(uint64_t period_ms_)
|
||||
: log(getLogger("MemoryWorker"))
|
||||
, period_ms(period_ms_)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
try
|
||||
{
|
||||
static constexpr uint64_t cgroups_memory_usage_tick_ms{50};
|
||||
|
||||
const auto [cgroup_path, version] = getCgroupsPath();
|
||||
LOG_INFO(
|
||||
getLogger("CgroupsReader"),
|
||||
"Will create cgroup reader from '{}' (cgroups version: {})",
|
||||
cgroup_path,
|
||||
(version == ICgroupsReader::CgroupsVersion::V1) ? "v1" : "v2");
|
||||
|
||||
cgroups_reader = ICgroupsReader::createCgroupsReader(version, cgroup_path);
|
||||
source = MemoryUsageSource::Cgroups;
|
||||
if (period_ms == 0)
|
||||
period_ms = cgroups_memory_usage_tick_ms;
|
||||
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot use cgroups reader");
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_JEMALLOC
|
||||
static constexpr uint64_t jemalloc_memory_usage_tick_ms{100};
|
||||
|
||||
source = MemoryUsageSource::Jemalloc;
|
||||
if (period_ms == 0)
|
||||
period_ms = jemalloc_memory_usage_tick_ms;
|
||||
#endif
|
||||
}
|
||||
|
||||
MemoryWorker::MemoryUsageSource MemoryWorker::getSource()
|
||||
{
|
||||
return source;
|
||||
}
|
||||
|
||||
void MemoryWorker::start()
|
||||
{
|
||||
if (source == MemoryUsageSource::None)
|
||||
return;
|
||||
|
||||
LOG_INFO(
|
||||
getLogger("MemoryWorker"),
|
||||
"Starting background memory thread with period of {}ms, using {} as source",
|
||||
period_ms,
|
||||
sourceToString(source));
|
||||
background_thread = ThreadFromGlobalPool([this] { backgroundThread(); });
|
||||
}
|
||||
|
||||
MemoryWorker::~MemoryWorker()
|
||||
{
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
shutdown = true;
|
||||
}
|
||||
cv.notify_all();
|
||||
|
||||
if (background_thread.joinable())
|
||||
background_thread.join();
|
||||
}
|
||||
|
||||
uint64_t MemoryWorker::getMemoryUsage()
|
||||
{
|
||||
switch (source)
|
||||
{
|
||||
case MemoryUsageSource::Cgroups:
|
||||
return cgroups_reader != nullptr ? cgroups_reader->readMemoryUsage() : 0;
|
||||
case MemoryUsageSource::Jemalloc:
|
||||
#if USE_JEMALLOC
|
||||
return resident_mib.getValue();
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
case MemoryUsageSource::None:
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Trying to fetch memory usage while no memory source can be used");
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryWorker::backgroundThread()
|
||||
{
|
||||
std::chrono::milliseconds chrono_period_ms{period_ms};
|
||||
[[maybe_unused]] bool first_run = true;
|
||||
std::unique_lock lock(mutex);
|
||||
while (true)
|
||||
{
|
||||
cv.wait_for(lock, chrono_period_ms, [this] { return shutdown; });
|
||||
if (shutdown)
|
||||
return;
|
||||
|
||||
Stopwatch total_watch;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
if (source == MemoryUsageSource::Jemalloc)
|
||||
epoch_mib.setValue(0);
|
||||
#endif
|
||||
|
||||
Int64 resident = getMemoryUsage();
|
||||
MemoryTracker::updateRSS(resident);
|
||||
|
||||
#if USE_JEMALLOC
|
||||
if (resident > total_memory_tracker.getHardLimit())
|
||||
{
|
||||
Stopwatch purge_watch;
|
||||
purge_mib.run();
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, purge_watch.elapsedMicroseconds());
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_JEMALLOC
|
||||
if (unlikely(first_run || total_memory_tracker.get() < 0))
|
||||
{
|
||||
if (source != MemoryUsageSource::Jemalloc)
|
||||
epoch_mib.setValue(0);
|
||||
|
||||
MemoryTracker::updateAllocated(allocated_mib.getValue());
|
||||
}
|
||||
#endif
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::MemoryWorkerRun);
|
||||
ProfileEvents::increment(ProfileEvents::MemoryWorkerRunElapsedMicroseconds, total_watch.elapsedMicroseconds());
|
||||
first_run = false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
84
src/Common/MemoryWorker.h
Normal file
84
src/Common/MemoryWorker.h
Normal file
@ -0,0 +1,84 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/Jemalloc.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct ICgroupsReader
|
||||
{
|
||||
enum class CgroupsVersion : uint8_t
|
||||
{
|
||||
V1,
|
||||
V2
|
||||
};
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
static std::shared_ptr<ICgroupsReader>
|
||||
createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path);
|
||||
#endif
|
||||
|
||||
virtual ~ICgroupsReader() = default;
|
||||
|
||||
virtual uint64_t readMemoryUsage() = 0;
|
||||
|
||||
virtual std::string dumpAllStats() = 0;
|
||||
};
|
||||
|
||||
|
||||
/// Correct MemoryTracker based on external information (e.g. Cgroups or stats.resident from jemalloc)
|
||||
/// The worker spawns a background thread which periodically reads current resident memory from the source,
|
||||
/// whose value is sent to global MemoryTracker.
|
||||
/// It can do additional things like purging jemalloc dirty pages if the current memory usage is higher than global hard limit.
|
||||
class MemoryWorker
|
||||
{
|
||||
public:
|
||||
explicit MemoryWorker(uint64_t period_ms_);
|
||||
|
||||
enum class MemoryUsageSource : uint8_t
|
||||
{
|
||||
None,
|
||||
Cgroups,
|
||||
Jemalloc
|
||||
};
|
||||
|
||||
MemoryUsageSource getSource();
|
||||
|
||||
void start();
|
||||
|
||||
~MemoryWorker();
|
||||
private:
|
||||
uint64_t getMemoryUsage();
|
||||
|
||||
void backgroundThread();
|
||||
|
||||
ThreadFromGlobalPool background_thread;
|
||||
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
bool shutdown = false;
|
||||
|
||||
LoggerPtr log;
|
||||
|
||||
uint64_t period_ms;
|
||||
|
||||
MemoryUsageSource source{MemoryUsageSource::None};
|
||||
|
||||
std::shared_ptr<ICgroupsReader> cgroups_reader;
|
||||
|
||||
#if USE_JEMALLOC
|
||||
JemallocMibCache<uint64_t> epoch_mib{"epoch"};
|
||||
JemallocMibCache<size_t> resident_mib{"stats.resident"};
|
||||
JemallocMibCache<size_t> allocated_mib{"stats.allocated"};
|
||||
|
||||
#define STRINGIFY_HELPER(x) #x
|
||||
#define STRINGIFY(x) STRINGIFY_HELPER(x)
|
||||
JemallocMibCache<size_t> purge_mib{"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge"};
|
||||
#undef STRINGIFY
|
||||
#undef STRINGIFY_HELPER
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
@ -122,12 +122,18 @@ public:
|
||||
return result.entry.isNull() || !result.is_usable || (skip_read_only_replicas && result.is_readonly);
|
||||
}
|
||||
|
||||
void checkTryResultIsValid(const TryResult & result, bool skip_read_only_replicas) const
|
||||
TryResult getValidTryResult(const std::vector<TryResult> & results, bool skip_read_only_replicas) const
|
||||
{
|
||||
if (results.empty())
|
||||
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Cannot get any valid connection because all connection tries failed");
|
||||
|
||||
auto result = results.front();
|
||||
if (isTryResultInvalid(result, skip_read_only_replicas))
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
|
||||
"Got an invalid connection result: entry.isNull {}, is_usable {}, is_up_to_date {}, delay {}, is_readonly {}, skip_read_only_replicas {}",
|
||||
result.entry.isNull(), result.is_usable, result.is_up_to_date, result.delay, result.is_readonly, skip_read_only_replicas);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t getPoolSize() const { return nested_pools.size(); }
|
||||
|
@ -827,6 +827,9 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(GWPAsanAllocateSuccess, "Number of successful allocations done by GWPAsan") \
|
||||
M(GWPAsanAllocateFailed, "Number of failed allocations done by GWPAsan (i.e. filled pool)") \
|
||||
M(GWPAsanFree, "Number of free operations done by GWPAsan") \
|
||||
\
|
||||
M(MemoryWorkerRun, "Number of runs done by MemoryWorker in background") \
|
||||
M(MemoryWorkerRunElapsedMicroseconds, "Total time spent by MemoryWorker for background work") \
|
||||
|
||||
|
||||
#ifdef APPLY_FOR_EXTERNAL_EVENTS
|
||||
|
@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc
|
||||
= info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s);
|
||||
|
||||
const std::string_view access =
|
||||
#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__)
|
||||
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
|
||||
#if defined(__arm__)
|
||||
"<not available on ARM>";
|
||||
#elif defined(__powerpc__)
|
||||
"<not available on PowerPC>";
|
||||
#elif defined(OS_DARWIN)
|
||||
"<not available on Darwin>";
|
||||
#elif defined(OS_FREEBSD)
|
||||
"<not available on FreeBSD>";
|
||||
#elif !defined(__x86_64__)
|
||||
"<not available>";
|
||||
#else
|
||||
"";
|
||||
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
|
||||
#endif
|
||||
|
||||
std::string_view message;
|
||||
|
@ -171,6 +171,7 @@ bool isUserError(Error zk_return_code)
|
||||
|
||||
void CreateRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
void RemoveRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
void RemoveRecursiveRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
void ExistsRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
void GetRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
void SetRequest::addRootPath(const String & root_path) { Coordination::addRootPath(path, root_path); }
|
||||
|
@ -248,6 +248,23 @@ struct RemoveResponse : virtual Response
|
||||
{
|
||||
};
|
||||
|
||||
struct RemoveRecursiveRequest : virtual Request
|
||||
{
|
||||
String path;
|
||||
|
||||
/// strict limit for number of deleted nodes
|
||||
uint32_t remove_nodes_limit = 1;
|
||||
|
||||
void addRootPath(const String & root_path) override;
|
||||
String getPath() const override { return path; }
|
||||
|
||||
size_t bytesSize() const override { return path.size() + sizeof(remove_nodes_limit); }
|
||||
};
|
||||
|
||||
struct RemoveRecursiveResponse : virtual Response
|
||||
{
|
||||
};
|
||||
|
||||
struct ExistsRequest : virtual Request
|
||||
{
|
||||
String path;
|
||||
@ -430,6 +447,7 @@ struct ErrorResponse : virtual Response
|
||||
|
||||
using CreateCallback = std::function<void(const CreateResponse &)>;
|
||||
using RemoveCallback = std::function<void(const RemoveResponse &)>;
|
||||
using RemoveRecursiveCallback = std::function<void(const RemoveRecursiveResponse &)>;
|
||||
using ExistsCallback = std::function<void(const ExistsResponse &)>;
|
||||
using GetCallback = std::function<void(const GetResponse &)>;
|
||||
using SetCallback = std::function<void(const SetResponse &)>;
|
||||
@ -587,6 +605,11 @@ public:
|
||||
int32_t version,
|
||||
RemoveCallback callback) = 0;
|
||||
|
||||
virtual void removeRecursive(
|
||||
const String & path,
|
||||
uint32_t remove_nodes_limit,
|
||||
RemoveRecursiveCallback callback) = 0;
|
||||
|
||||
virtual void exists(
|
||||
const String & path,
|
||||
ExistsCallback callback,
|
||||
|
@ -90,6 +90,36 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperRemoveRecursiveRequest final : RemoveRecursiveRequest, TestKeeperRequest
|
||||
{
|
||||
TestKeeperRemoveRecursiveRequest() = default;
|
||||
explicit TestKeeperRemoveRecursiveRequest(const RemoveRecursiveRequest & base) : RemoveRecursiveRequest(base) {}
|
||||
ResponsePtr createResponse() const override;
|
||||
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||
|
||||
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
||||
{
|
||||
std::vector<std::pair<String, size_t>> deleted;
|
||||
|
||||
auto add_deleted_watches = [&](TestKeeper::Watches & w)
|
||||
{
|
||||
for (const auto & [watch_path, _] : w)
|
||||
if (watch_path.starts_with(path))
|
||||
deleted.emplace_back(watch_path, std::count(watch_path.begin(), watch_path.end(), '/'));
|
||||
};
|
||||
|
||||
add_deleted_watches(node_watches);
|
||||
add_deleted_watches(list_watches);
|
||||
std::sort(deleted.begin(), deleted.end(), [](const auto & lhs, const auto & rhs)
|
||||
{
|
||||
return lhs.second < rhs.second;
|
||||
});
|
||||
|
||||
for (const auto & [watch_path, _] : deleted)
|
||||
processWatchesImpl(watch_path, node_watches, list_watches);
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperExistsRequest final : ExistsRequest, TestKeeperRequest
|
||||
{
|
||||
ResponsePtr createResponse() const override;
|
||||
@ -175,6 +205,10 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
||||
{
|
||||
requests.push_back(std::make_shared<TestKeeperRemoveRequest>(*concrete_request_remove));
|
||||
}
|
||||
else if (const auto * concrete_request_remove_recursive = dynamic_cast<const RemoveRecursiveRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<TestKeeperRemoveRecursiveRequest>(*concrete_request_remove_recursive));
|
||||
}
|
||||
else if (const auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<TestKeeperSetRequest>(*concrete_request_set));
|
||||
@ -313,6 +347,62 @@ std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Contai
|
||||
return { std::make_shared<RemoveResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperRemoveRecursiveRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
RemoveRecursiveResponse response;
|
||||
response.zxid = zxid;
|
||||
Undo undo;
|
||||
|
||||
auto root_it = container.find(path);
|
||||
if (root_it == container.end())
|
||||
{
|
||||
response.error = Error::ZNONODE;
|
||||
return { std::make_shared<RemoveRecursiveResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::vector<std::pair<std::string, Coordination::TestKeeper::Node>> children;
|
||||
|
||||
for (auto it = std::next(root_it); it != container.end(); ++it)
|
||||
{
|
||||
const auto & [child_path, child_node] = *it;
|
||||
|
||||
if (child_path.starts_with(path))
|
||||
children.emplace_back(child_path, child_node);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (children.size() > remove_nodes_limit)
|
||||
{
|
||||
response.error = Error::ZNOTEMPTY;
|
||||
return { std::make_shared<RemoveRecursiveResponse>(response), undo };
|
||||
}
|
||||
|
||||
auto & parent = container.at(parentPath(path));
|
||||
--parent.stat.numChildren;
|
||||
++parent.stat.cversion;
|
||||
|
||||
for (const auto & [child_path, child_node] : children)
|
||||
{
|
||||
auto child_it = container.find(child_path);
|
||||
chassert(child_it != container.end());
|
||||
container.erase(child_it);
|
||||
}
|
||||
|
||||
response.error = Error::ZOK;
|
||||
undo = [&container, dead = std::move(children), root_path = path]()
|
||||
{
|
||||
for (auto && [child_path, child_node] : dead)
|
||||
container.emplace(child_path, child_node);
|
||||
|
||||
auto & undo_parent = container.at(parentPath(root_path));
|
||||
++undo_parent.stat.numChildren;
|
||||
--undo_parent.stat.cversion;
|
||||
};
|
||||
|
||||
return { std::make_shared<RemoveRecursiveResponse>(response), undo };
|
||||
}
|
||||
|
||||
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||
{
|
||||
ExistsResponse response;
|
||||
@ -530,6 +620,7 @@ std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Contain
|
||||
|
||||
ResponsePtr TestKeeperCreateRequest::createResponse() const { return std::make_shared<CreateResponse>(); }
|
||||
ResponsePtr TestKeeperRemoveRequest::createResponse() const { return std::make_shared<RemoveResponse>(); }
|
||||
ResponsePtr TestKeeperRemoveRecursiveRequest::createResponse() const { return std::make_shared<RemoveRecursiveResponse>(); }
|
||||
ResponsePtr TestKeeperExistsRequest::createResponse() const { return std::make_shared<ExistsResponse>(); }
|
||||
ResponsePtr TestKeeperGetRequest::createResponse() const { return std::make_shared<GetResponse>(); }
|
||||
ResponsePtr TestKeeperSetRequest::createResponse() const { return std::make_shared<SetResponse>(); }
|
||||
@ -771,6 +862,21 @@ void TestKeeper::remove(
|
||||
pushRequest(std::move(request_info));
|
||||
}
|
||||
|
||||
void TestKeeper::removeRecursive(
|
||||
const String & path,
|
||||
uint32_t remove_nodes_limit,
|
||||
RemoveRecursiveCallback callback)
|
||||
{
|
||||
TestKeeperRemoveRecursiveRequest request;
|
||||
request.path = path;
|
||||
request.remove_nodes_limit = remove_nodes_limit;
|
||||
|
||||
RequestInfo request_info;
|
||||
request_info.request = std::make_shared<TestKeeperRemoveRecursiveRequest>(std::move(request));
|
||||
request_info.callback = [callback](const Response & response) { callback(dynamic_cast<const RemoveRecursiveResponse &>(response)); };
|
||||
pushRequest(std::move(request_info));
|
||||
}
|
||||
|
||||
void TestKeeper::exists(
|
||||
const String & path,
|
||||
ExistsCallback callback,
|
||||
|
@ -58,6 +58,11 @@ public:
|
||||
int32_t version,
|
||||
RemoveCallback callback) override;
|
||||
|
||||
void removeRecursive(
|
||||
const String & path,
|
||||
uint32_t remove_nodes_limit,
|
||||
RemoveRecursiveCallback callback) override;
|
||||
|
||||
void exists(
|
||||
const String & path,
|
||||
ExistsCallback callback,
|
||||
|
@ -31,6 +31,7 @@ using AsyncResponses = std::vector<std::pair<std::string, std::future<R>>>;
|
||||
|
||||
Coordination::RequestPtr makeCreateRequest(const std::string & path, const std::string & data, int create_mode, bool ignore_if_exists = false);
|
||||
Coordination::RequestPtr makeRemoveRequest(const std::string & path, int version);
|
||||
Coordination::RequestPtr makeRemoveRecursiveRequest(const std::string & path, uint32_t remove_nodes_limit);
|
||||
Coordination::RequestPtr makeSetRequest(const std::string & path, const std::string & data, int version);
|
||||
Coordination::RequestPtr makeCheckRequest(const std::string & path, int version);
|
||||
|
||||
|
@ -979,18 +979,47 @@ bool ZooKeeper::tryRemoveChildrenRecursive(const std::string & path, bool probab
|
||||
return removed_as_expected;
|
||||
}
|
||||
|
||||
void ZooKeeper::removeRecursive(const std::string & path)
|
||||
void ZooKeeper::removeRecursive(const std::string & path, uint32_t remove_nodes_limit)
|
||||
{
|
||||
removeChildrenRecursive(path);
|
||||
remove(path);
|
||||
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
|
||||
{
|
||||
removeChildrenRecursive(path);
|
||||
remove(path);
|
||||
return;
|
||||
}
|
||||
|
||||
check(tryRemoveRecursive(path, remove_nodes_limit), path);
|
||||
}
|
||||
|
||||
void ZooKeeper::tryRemoveRecursive(const std::string & path)
|
||||
Coordination::Error ZooKeeper::tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit)
|
||||
{
|
||||
tryRemoveChildrenRecursive(path);
|
||||
tryRemove(path);
|
||||
}
|
||||
if (!isFeatureEnabled(DB::KeeperFeatureFlag::REMOVE_RECURSIVE))
|
||||
{
|
||||
tryRemoveChildrenRecursive(path);
|
||||
return tryRemove(path);
|
||||
}
|
||||
|
||||
auto promise = std::make_shared<std::promise<Coordination::RemoveRecursiveResponse>>();
|
||||
auto future = promise->get_future();
|
||||
|
||||
auto callback = [promise](const Coordination::RemoveRecursiveResponse & response) mutable
|
||||
{
|
||||
promise->set_value(response);
|
||||
};
|
||||
|
||||
impl->removeRecursive(path, remove_nodes_limit, std::move(callback));
|
||||
|
||||
if (future.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
|
||||
{
|
||||
impl->finalize(fmt::format("Operation timeout on {} {}", Coordination::OpNum::RemoveRecursive, path));
|
||||
return Coordination::Error::ZOPERATIONTIMEOUT;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto response = future.get();
|
||||
return response.error;
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -1619,6 +1648,14 @@ Coordination::RequestPtr makeRemoveRequest(const std::string & path, int version
|
||||
return request;
|
||||
}
|
||||
|
||||
Coordination::RequestPtr makeRemoveRecursiveRequest(const std::string & path, uint32_t remove_nodes_limit)
|
||||
{
|
||||
auto request = std::make_shared<Coordination::RemoveRecursiveRequest>();
|
||||
request->path = path;
|
||||
request->remove_nodes_limit = remove_nodes_limit;
|
||||
return request;
|
||||
}
|
||||
|
||||
Coordination::RequestPtr makeSetRequest(const std::string & path, const std::string & data, int version)
|
||||
{
|
||||
auto request = std::make_shared<Coordination::SetRequest>();
|
||||
|
@ -479,15 +479,16 @@ public:
|
||||
|
||||
Int64 getClientID();
|
||||
|
||||
/// Remove the node with the subtree. If someone concurrently adds or removes a node
|
||||
/// in the subtree, the result is undefined.
|
||||
void removeRecursive(const std::string & path);
|
||||
/// Remove the node with the subtree.
|
||||
/// If Keeper supports RemoveRecursive operation then it will be performed atomically.
|
||||
/// Otherwise if someone concurrently adds or removes a node in the subtree, the result is undefined.
|
||||
void removeRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
|
||||
|
||||
/// Remove the node with the subtree. If someone concurrently removes a node in the subtree,
|
||||
/// this will not cause errors.
|
||||
/// Same as removeRecursive but in case if Keeper does not supports RemoveRecursive and
|
||||
/// if someone concurrently removes a node in the subtree, this will not cause errors.
|
||||
/// For instance, you can call this method twice concurrently for the same node and the end
|
||||
/// result would be the same as for the single call.
|
||||
void tryRemoveRecursive(const std::string & path);
|
||||
Coordination::Error tryRemoveRecursive(const std::string & path, uint32_t remove_nodes_limit = 100);
|
||||
|
||||
/// Similar to removeRecursive(...) and tryRemoveRecursive(...), but does not remove path itself.
|
||||
/// Node defined as RemoveException will not be deleted.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "Common/ZooKeeper/IKeeper.h"
|
||||
#include "Common/ZooKeeper/ZooKeeperConstants.h"
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
@ -232,6 +232,27 @@ void ZooKeeperRemoveRequest::readImpl(ReadBuffer & in)
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRecursiveRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(remove_nodes_limit, out);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRecursiveRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(remove_nodes_limit, in);
|
||||
}
|
||||
|
||||
std::string ZooKeeperRemoveRecursiveRequest::toStringImpl(bool /*short_format*/) const
|
||||
{
|
||||
return fmt::format(
|
||||
"path = {}\n"
|
||||
"remove_nodes_limit = {}",
|
||||
path,
|
||||
remove_nodes_limit);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
@ -510,6 +531,11 @@ ZooKeeperMultiRequest::ZooKeeperMultiRequest(std::span<const Coordination::Reque
|
||||
checkOperationType(Write);
|
||||
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request_remove));
|
||||
}
|
||||
else if (const auto * concrete_request_remove_recursive = dynamic_cast<const RemoveRecursiveRequest *>(generic_request.get()))
|
||||
{
|
||||
checkOperationType(Write);
|
||||
requests.push_back(std::make_shared<ZooKeeperRemoveRecursiveRequest>(*concrete_request_remove_recursive));
|
||||
}
|
||||
else if (const auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
|
||||
{
|
||||
checkOperationType(Write);
|
||||
@ -707,6 +733,7 @@ ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return se
|
||||
ZooKeeperResponsePtr ZooKeeperSyncRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSyncResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperAuthResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperRemoveResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperRemoveRecursiveRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperRemoveRecursiveResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperExistsResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperGetResponse>()); }
|
||||
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return setTime(std::make_shared<ZooKeeperSetResponse>()); }
|
||||
@ -1024,6 +1051,7 @@ ZooKeeperRequestFactory::ZooKeeperRequestFactory()
|
||||
registerZooKeeperRequest<OpNum::SetACL, ZooKeeperSetACLRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::FilteredList, ZooKeeperFilteredListRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::CheckNotExists, ZooKeeperCheckRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::RemoveRecursive, ZooKeeperRemoveRecursiveRequest>(*this);
|
||||
}
|
||||
|
||||
PathMatchResult matchPath(std::string_view path, std::string_view match_to)
|
||||
|
@ -285,6 +285,31 @@ struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||
size_t bytesSize() const override { return RemoveResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveRecursiveRequest final : RemoveRecursiveRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperRemoveRecursiveRequest() = default;
|
||||
explicit ZooKeeperRemoveRecursiveRequest(const RemoveRecursiveRequest & base) : RemoveRecursiveRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::RemoveRecursive; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
std::string toStringImpl(bool short_format) const override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
bool isReadRequest() const override { return false; }
|
||||
|
||||
size_t bytesSize() const override { return RemoveRecursiveRequest::bytesSize() + sizeof(xid); }
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveRecursiveResponse : RemoveRecursiveResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::RemoveRecursive; }
|
||||
|
||||
size_t bytesSize() const override { return RemoveRecursiveResponse::bytesSize() + sizeof(xid) + sizeof(zxid); }
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperExistsRequest() = default;
|
||||
|
@ -29,6 +29,7 @@ static const std::unordered_set<int32_t> VALID_OPERATIONS =
|
||||
static_cast<int32_t>(OpNum::GetACL),
|
||||
static_cast<int32_t>(OpNum::FilteredList),
|
||||
static_cast<int32_t>(OpNum::CheckNotExists),
|
||||
static_cast<int32_t>(OpNum::RemoveRecursive),
|
||||
};
|
||||
|
||||
OpNum getOpNum(int32_t raw_op_num)
|
||||
|
@ -40,6 +40,7 @@ enum class OpNum : int32_t
|
||||
FilteredList = 500,
|
||||
CheckNotExists = 501,
|
||||
CreateIfNotExists = 502,
|
||||
RemoveRecursive = 503,
|
||||
|
||||
SessionID = 997, /// Special internal request
|
||||
};
|
||||
|
@ -1347,6 +1347,25 @@ void ZooKeeper::remove(
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperRemove);
|
||||
}
|
||||
|
||||
void ZooKeeper::removeRecursive(
|
||||
const String &path,
|
||||
uint32_t remove_nodes_limit,
|
||||
RemoveRecursiveCallback callback)
|
||||
{
|
||||
if (!isFeatureEnabled(KeeperFeatureFlag::REMOVE_RECURSIVE))
|
||||
throw Exception::fromMessage(Error::ZBADARGUMENTS, "RemoveRecursive request type cannot be used because it's not supported by the server");
|
||||
|
||||
ZooKeeperRemoveRecursiveRequest request;
|
||||
request.path = path;
|
||||
request.remove_nodes_limit = remove_nodes_limit;
|
||||
|
||||
RequestInfo request_info;
|
||||
request_info.request = std::make_shared<ZooKeeperRemoveRecursiveRequest>(std::move(request));
|
||||
request_info.callback = [callback](const Response & response) { callback(dynamic_cast<const RemoveRecursiveResponse &>(response)); };
|
||||
|
||||
pushRequest(std::move(request_info));
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperRemove);
|
||||
}
|
||||
|
||||
void ZooKeeper::exists(
|
||||
const String & path,
|
||||
|
@ -146,6 +146,11 @@ public:
|
||||
int32_t version,
|
||||
RemoveCallback callback) override;
|
||||
|
||||
void removeRecursive(
|
||||
const String &path,
|
||||
uint32_t remove_nodes_limit,
|
||||
RemoveRecursiveCallback callback) override;
|
||||
|
||||
void exists(
|
||||
const String & path,
|
||||
ExistsCallback callback,
|
||||
|
@ -57,11 +57,13 @@ namespace ErrorCodes
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static bool supportsAtomicRenameImpl()
|
||||
static std::optional<std::string> supportsAtomicRenameImpl()
|
||||
{
|
||||
VersionNumber renameat2_minimal_version(3, 15, 0);
|
||||
VersionNumber linux_version(Poco::Environment::osVersion());
|
||||
return linux_version >= renameat2_minimal_version;
|
||||
if (linux_version >= renameat2_minimal_version)
|
||||
return std::nullopt;
|
||||
return fmt::format("Linux kernel 3.15+ is required, got {}", linux_version.toString());
|
||||
}
|
||||
|
||||
static bool renameat2(const std::string & old_path, const std::string & new_path, int flags)
|
||||
@ -97,10 +99,14 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
|
||||
ErrnoException::throwFromPath(ErrorCodes::SYSTEM_ERROR, new_path, "Cannot rename {} to {}", old_path, new_path);
|
||||
}
|
||||
|
||||
bool supportsAtomicRename()
|
||||
bool supportsAtomicRename(std::string * out_message)
|
||||
{
|
||||
static bool supports = supportsAtomicRenameImpl();
|
||||
return supports;
|
||||
static auto error = supportsAtomicRenameImpl();
|
||||
if (!error.has_value())
|
||||
return true;
|
||||
if (out_message)
|
||||
*out_message = error.value();
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
@ -152,16 +158,22 @@ static bool renameat2(const std::string & old_path, const std::string & new_path
|
||||
}
|
||||
|
||||
|
||||
static bool supportsAtomicRenameImpl()
|
||||
static std::optional<std::string> supportsAtomicRenameImpl()
|
||||
{
|
||||
auto fun = dlsym(RTLD_DEFAULT, "renamex_np");
|
||||
return fun != nullptr;
|
||||
if (fun != nullptr)
|
||||
return std::nullopt;
|
||||
return "macOS 10.12 or later is required";
|
||||
}
|
||||
|
||||
bool supportsAtomicRename()
|
||||
bool supportsAtomicRename(std::string * out_message)
|
||||
{
|
||||
static bool supports = supportsAtomicRenameImpl();
|
||||
return supports;
|
||||
static auto error = supportsAtomicRenameImpl();
|
||||
if (!error.has_value())
|
||||
return true;
|
||||
if (out_message)
|
||||
*out_message = error.value();
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
@ -179,8 +191,10 @@ static bool renameat2(const std::string &, const std::string &, int)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool supportsAtomicRename()
|
||||
bool supportsAtomicRename(std::string * out_message)
|
||||
{
|
||||
if (out_message)
|
||||
*out_message = "only Linux and macOS are supported";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
/// Returns true, if the following functions supported by the system
|
||||
bool supportsAtomicRename();
|
||||
bool supportsAtomicRename(std::string * out_message = nullptr);
|
||||
|
||||
/// Atomically rename old_path to new_path. If new_path exists, do not overwrite it and throw exception
|
||||
void renameNoReplace(const std::string & old_path, const std::string & new_path);
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <filesystem>
|
||||
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Common/CgroupsMemoryUsageObserver.h>
|
||||
#include <Common/MemoryWorker.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
using namespace DB;
|
||||
@ -126,7 +126,7 @@ const std::string EXPECTED[2]
|
||||
"\"workingset_restore_anon\": 0, \"workingset_restore_file\": 0, \"zswap\": 0, \"zswapped\": 0, \"zswpin\": 0, \"zswpout\": 0}"};
|
||||
|
||||
|
||||
class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam<CgroupsMemoryUsageObserver::CgroupsVersion>
|
||||
class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam<ICgroupsReader::CgroupsVersion>
|
||||
{
|
||||
void SetUp() override
|
||||
{
|
||||
@ -138,7 +138,7 @@ class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam<Cgroup
|
||||
stat_file.write(SAMPLE_FILE[version].data(), SAMPLE_FILE[version].size());
|
||||
stat_file.sync();
|
||||
|
||||
if (GetParam() == CgroupsMemoryUsageObserver::CgroupsVersion::V2)
|
||||
if (GetParam() == ICgroupsReader::CgroupsVersion::V2)
|
||||
{
|
||||
auto current_file = WriteBufferFromFile(tmp_dir + "/memory.current");
|
||||
current_file.write("29645422592", 11);
|
||||
@ -154,18 +154,18 @@ protected:
|
||||
TEST_P(CgroupsMemoryUsageObserverFixture, ReadMemoryUsageTest)
|
||||
{
|
||||
const auto version = GetParam();
|
||||
auto reader = createCgroupsReader(version, tmp_dir);
|
||||
auto reader = ICgroupsReader::createCgroupsReader(version, tmp_dir);
|
||||
ASSERT_EQ(
|
||||
reader->readMemoryUsage(),
|
||||
version == CgroupsMemoryUsageObserver::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184
|
||||
: /* value from memory.current - inactive_file */ 20952338432);
|
||||
version == ICgroupsReader::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184
|
||||
: /* anon from memory.stat */ 10429399040);
|
||||
}
|
||||
|
||||
|
||||
TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest)
|
||||
{
|
||||
const auto version = GetParam();
|
||||
auto reader = createCgroupsReader(version, tmp_dir);
|
||||
auto reader = ICgroupsReader::createCgroupsReader(version, tmp_dir);
|
||||
ASSERT_EQ(reader->dumpAllStats(), EXPECTED[static_cast<uint8_t>(version)]);
|
||||
}
|
||||
|
||||
@ -173,6 +173,6 @@ TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest)
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
CgroupsMemoryUsageObserverTests,
|
||||
CgroupsMemoryUsageObserverFixture,
|
||||
::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2));
|
||||
::testing::Values(ICgroupsReader::CgroupsVersion::V1, ICgroupsReader::CgroupsVersion::V2));
|
||||
|
||||
#endif
|
||||
|
@ -39,7 +39,7 @@ using Checksum = CityHash_v1_0_2::uint128;
|
||||
|
||||
|
||||
/// Validate checksum of data, and if it mismatches, find out possible reason and throw exception.
|
||||
static void validateChecksum(char * data, size_t size, const Checksum expected_checksum)
|
||||
static void validateChecksum(char * data, size_t size, const Checksum expected_checksum, bool external_data)
|
||||
{
|
||||
auto calculated_checksum = CityHash_v1_0_2::CityHash128(data, size);
|
||||
if (expected_checksum == calculated_checksum)
|
||||
@ -64,6 +64,8 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
"this can be caused by disk bit rot. This exception protects ClickHouse "
|
||||
"from data corruption due to hardware failures.";
|
||||
|
||||
int error_code = external_data ? ErrorCodes::CANNOT_DECOMPRESS : ErrorCodes::CHECKSUM_DOESNT_MATCH;
|
||||
|
||||
auto flip_bit = [](char * buf, size_t pos)
|
||||
{
|
||||
buf[pos / 8] ^= 1 << pos % 8;
|
||||
@ -87,7 +89,7 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
{
|
||||
message << ". The mismatch is caused by single bit flip in data block at byte " << (bit_pos / 8) << ", bit " << (bit_pos % 8) << ". "
|
||||
<< message_hardware_failure;
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), error_code);
|
||||
}
|
||||
|
||||
flip_bit(tmp_data, bit_pos); /// Restore
|
||||
@ -102,10 +104,10 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
{
|
||||
message << ". The mismatch is caused by single bit flip in checksum. "
|
||||
<< message_hardware_failure;
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), error_code);
|
||||
}
|
||||
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), error_code);
|
||||
}
|
||||
|
||||
static void readHeaderAndGetCodecAndSize(
|
||||
@ -151,7 +153,7 @@ static void readHeaderAndGetCodecAndSize(
|
||||
"Most likely corrupted data.", size_compressed_without_checksum);
|
||||
|
||||
if (size_compressed_without_checksum < header_size)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA, "Can't decompress data: "
|
||||
throw Exception(external_data ? ErrorCodes::CANNOT_DECOMPRESS : ErrorCodes::CORRUPTED_DATA, "Can't decompress data: "
|
||||
"the compressed data size ({}, this should include header size) is less than the header size ({})",
|
||||
size_compressed_without_checksum, static_cast<size_t>(header_size));
|
||||
}
|
||||
@ -202,7 +204,7 @@ size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed,
|
||||
readBinaryLittleEndian(checksum.low64, checksum_in);
|
||||
readBinaryLittleEndian(checksum.high64, checksum_in);
|
||||
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum);
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum, external_data);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ReadCompressedBytes, size_compressed_without_checksum + sizeof(Checksum));
|
||||
@ -247,7 +249,7 @@ size_t CompressedReadBufferBase::readCompressedDataBlockForAsynchronous(size_t &
|
||||
readBinaryLittleEndian(checksum.low64, checksum_in);
|
||||
readBinaryLittleEndian(checksum.high64, checksum_in);
|
||||
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum);
|
||||
validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum, external_data);
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ReadCompressedBytes, size_compressed_without_checksum + sizeof(Checksum));
|
||||
@ -307,7 +309,7 @@ void CompressedReadBufferBase::decompress(BufferBase::Buffer & to, size_t size_d
|
||||
|
||||
UInt8 header_size = ICompressionCodec::getHeaderSize();
|
||||
if (size_compressed_without_checksum < header_size)
|
||||
throw Exception(ErrorCodes::CORRUPTED_DATA,
|
||||
throw Exception(external_data ? ErrorCodes::CANNOT_DECOMPRESS : ErrorCodes::CORRUPTED_DATA,
|
||||
"Can't decompress data: the compressed data size ({}, this should include header size) is less than the header size ({})",
|
||||
size_compressed_without_checksum, static_cast<size_t>(header_size));
|
||||
|
||||
|
@ -55,10 +55,29 @@ void CompressedWriteBuffer::nextImpl()
|
||||
|
||||
out.write(compressed_buffer.data(), compressed_size);
|
||||
}
|
||||
|
||||
/// Increase buffer size for next data if adaptive buffer size is used and nextImpl was called because of end of buffer.
|
||||
if (!available() && use_adaptive_buffer_size && memory.size() < adaptive_buffer_max_size)
|
||||
{
|
||||
memory.resize(std::min(memory.size() * 2, adaptive_buffer_max_size));
|
||||
BufferBase::set(memory.data(), memory.size(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
CompressedWriteBuffer::CompressedWriteBuffer(WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size)
|
||||
: BufferWithOwnMemory<WriteBuffer>(buf_size), out(out_), codec(std::move(codec_))
|
||||
void CompressedWriteBuffer::finalizeImpl()
|
||||
{
|
||||
/// Don't try to resize buffer in nextImpl.
|
||||
use_adaptive_buffer_size = false;
|
||||
next();
|
||||
}
|
||||
|
||||
CompressedWriteBuffer::CompressedWriteBuffer(
|
||||
WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size, bool use_adaptive_buffer_size_, size_t adaptive_buffer_initial_size)
|
||||
: BufferWithOwnMemory<WriteBuffer>(use_adaptive_buffer_size_ ? adaptive_buffer_initial_size : buf_size)
|
||||
, out(out_)
|
||||
, codec(std::move(codec_))
|
||||
, use_adaptive_buffer_size(use_adaptive_buffer_size_)
|
||||
, adaptive_buffer_max_size(buf_size)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,9 @@ public:
|
||||
explicit CompressedWriteBuffer(
|
||||
WriteBuffer & out_,
|
||||
CompressionCodecPtr codec_ = CompressionCodecFactory::instance().getDefaultCodec(),
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE);
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
bool use_adaptive_buffer_size_ = false,
|
||||
size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE);
|
||||
|
||||
~CompressedWriteBuffer() override;
|
||||
|
||||
@ -45,10 +47,17 @@ public:
|
||||
|
||||
private:
|
||||
void nextImpl() override;
|
||||
void finalizeImpl() override;
|
||||
|
||||
WriteBuffer & out;
|
||||
CompressionCodecPtr codec;
|
||||
|
||||
/// If true, the size of internal buffer will be exponentially increased up to
|
||||
/// adaptive_buffer_max_size after each nextImpl call. It can be used to avoid
|
||||
/// large buffer allocation when actual size of written data is small.
|
||||
bool use_adaptive_buffer_size;
|
||||
size_t adaptive_buffer_max_size;
|
||||
|
||||
PODArray<char> compressed_buffer;
|
||||
};
|
||||
|
||||
|
@ -114,8 +114,13 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM
|
||||
}
|
||||
|
||||
KeeperAsynchronousMetrics::KeeperAsynchronousMetrics(
|
||||
ContextPtr context_, unsigned update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_)
|
||||
: AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_), context(std::move(context_))
|
||||
ContextPtr context_,
|
||||
unsigned update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||
bool update_jemalloc_epoch_,
|
||||
bool update_rss_)
|
||||
: AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_, update_jemalloc_epoch_, update_rss_)
|
||||
, context(std::move(context_))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -13,9 +13,13 @@ class KeeperAsynchronousMetrics : public AsynchronousMetrics
|
||||
{
|
||||
public:
|
||||
KeeperAsynchronousMetrics(
|
||||
ContextPtr context_, unsigned update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_);
|
||||
~KeeperAsynchronousMetrics() override;
|
||||
ContextPtr context_,
|
||||
unsigned update_period_seconds,
|
||||
const ProtocolServerMetricsFunc & protocol_server_metrics_func_,
|
||||
bool update_jemalloc_epoch_,
|
||||
bool update_rss_);
|
||||
|
||||
~KeeperAsynchronousMetrics() override;
|
||||
private:
|
||||
ContextPtr context;
|
||||
|
||||
|
@ -11,6 +11,7 @@ enum class KeeperApiVersion : uint8_t
|
||||
WITH_FILTERED_LIST,
|
||||
WITH_MULTI_READ,
|
||||
WITH_CHECK_NOT_EXISTS,
|
||||
WITH_REMOVE_RECURSIVE,
|
||||
};
|
||||
|
||||
const String keeper_system_path = "/keeper";
|
||||
|
@ -91,6 +91,12 @@ bool checkIfRequestIncreaseMem(const Coordination::ZooKeeperRequestPtr & request
|
||||
memory_delta -= remove_req.bytesSize();
|
||||
break;
|
||||
}
|
||||
case Coordination::OpNum::RemoveRecursive:
|
||||
{
|
||||
Coordination::ZooKeeperRemoveRecursiveRequest & remove_req = dynamic_cast<Coordination::ZooKeeperRemoveRecursiveRequest &>(*sub_zk_request);
|
||||
memory_delta -= remove_req.bytesSize();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -148,7 +154,14 @@ void KeeperDispatcher::requestThread()
|
||||
Int64 mem_soft_limit = keeper_context->getKeeperMemorySoftLimit();
|
||||
if (configuration_and_settings->standalone_keeper && isExceedingMemorySoftLimit() && checkIfRequestIncreaseMem(request.request))
|
||||
{
|
||||
LOG_WARNING(log, "Processing requests refused because of max_memory_usage_soft_limit {}, the total used memory is {}, request type is {}", ReadableSize(mem_soft_limit), ReadableSize(total_memory_tracker.get()), request.request->getOpNum());
|
||||
LOG_WARNING(
|
||||
log,
|
||||
"Processing requests refused because of max_memory_usage_soft_limit {}, the total allocated memory is {}, RSS is {}, request type "
|
||||
"is {}",
|
||||
ReadableSize(mem_soft_limit),
|
||||
ReadableSize(total_memory_tracker.get()),
|
||||
ReadableSize(total_memory_tracker.getRSS()),
|
||||
request.request->getOpNum());
|
||||
addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS);
|
||||
continue;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ enum class KeeperFeatureFlag : size_t
|
||||
MULTI_READ,
|
||||
CHECK_NOT_EXISTS,
|
||||
CREATE_IF_NOT_EXISTS,
|
||||
REMOVE_RECURSIVE,
|
||||
};
|
||||
|
||||
class KeeperFeatureFlags
|
||||
|
@ -602,7 +602,7 @@ bool KeeperServer::isLeaderAlive() const
|
||||
bool KeeperServer::isExceedingMemorySoftLimit() const
|
||||
{
|
||||
Int64 mem_soft_limit = keeper_context->getKeeperMemorySoftLimit();
|
||||
return mem_soft_limit > 0 && total_memory_tracker.get() >= mem_soft_limit;
|
||||
return mem_soft_limit > 0 && std::max(total_memory_tracker.get(), total_memory_tracker.getRSS()) >= mem_soft_limit;
|
||||
}
|
||||
|
||||
/// TODO test whether taking failed peer in count
|
||||
|
@ -832,6 +832,15 @@ std::shared_ptr<typename Container::Node> KeeperStorage<Container>::UncommittedS
|
||||
return tryGetNodeFromStorage(path);
|
||||
}
|
||||
|
||||
template<typename Container>
|
||||
const typename Container::Node * KeeperStorage<Container>::UncommittedState::getActualNodeView(StringRef path, const Node & storage_node) const
|
||||
{
|
||||
if (auto node_it = nodes.find(path.toView()); node_it != nodes.end())
|
||||
return node_it->second.node.get();
|
||||
|
||||
return &storage_node;
|
||||
}
|
||||
|
||||
template<typename Container>
|
||||
Coordination::ACLs KeeperStorage<Container>::UncommittedState::getACLs(StringRef path) const
|
||||
{
|
||||
@ -1124,7 +1133,7 @@ struct KeeperStorageRequestProcessor
|
||||
}
|
||||
|
||||
virtual KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(KeeperStorageBase::Watches & /*watches*/, KeeperStorageBase::Watches & /*list_watches*/) const
|
||||
processWatches(const Storage & /*storage*/, int64_t /*zxid*/, KeeperStorageBase::Watches & /*watches*/, KeeperStorageBase::Watches & /*list_watches*/) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
@ -1241,7 +1250,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
|
||||
using KeeperStorageRequestProcessor<Storage>::KeeperStorageRequestProcessor;
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(KeeperStorageBase::Watches & watches, KeeperStorageBase::Watches & list_watches) const override
|
||||
processWatches(const Storage & /*storage*/, int64_t /*zxid*/, KeeperStorageBase::Watches & watches, KeeperStorageBase::Watches & list_watches) const override
|
||||
{
|
||||
return processWatchesImpl(this->zk_request->getPath(), watches, list_watches, Coordination::Event::CREATED);
|
||||
}
|
||||
@ -1462,16 +1471,41 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
|
||||
}
|
||||
};
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename Storage>
|
||||
void addUpdateParentPzxidDelta(Storage & storage, std::vector<typename Storage::Delta> & deltas, int64_t zxid, StringRef path)
|
||||
{
|
||||
auto parent_path = parentNodePath(path);
|
||||
if (!storage.uncommitted_state.getNode(parent_path))
|
||||
return;
|
||||
|
||||
deltas.emplace_back(
|
||||
std::string{parent_path},
|
||||
zxid,
|
||||
typename Storage::UpdateNodeDelta
|
||||
{
|
||||
[zxid](Storage::Node & parent)
|
||||
{
|
||||
parent.pzxid = std::max(parent.pzxid, zxid);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template<typename Storage>
|
||||
struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestProcessor<Storage>
|
||||
{
|
||||
using KeeperStorageRequestProcessor<Storage>::KeeperStorageRequestProcessor;
|
||||
|
||||
bool checkAuth(Storage & storage, int64_t session_id, bool is_local) const override
|
||||
{
|
||||
return storage.checkACL(parentNodePath(this->zk_request->getPath()), Coordination::ACL::Delete, session_id, is_local);
|
||||
}
|
||||
|
||||
using KeeperStorageRequestProcessor<Storage>::KeeperStorageRequestProcessor;
|
||||
|
||||
std::vector<typename Storage::Delta>
|
||||
preprocess(Storage & storage, int64_t zxid, int64_t /*session_id*/, int64_t /*time*/, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
@ -1488,31 +1522,12 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZBADARGUMENTS}};
|
||||
}
|
||||
|
||||
const auto update_parent_pzxid = [&]()
|
||||
{
|
||||
auto parent_path = parentNodePath(request.path);
|
||||
if (!storage.uncommitted_state.getNode(parent_path))
|
||||
return;
|
||||
|
||||
new_deltas.emplace_back(
|
||||
std::string{parent_path},
|
||||
zxid,
|
||||
typename Storage::UpdateNodeDelta
|
||||
{
|
||||
[zxid](Storage::Node & parent)
|
||||
{
|
||||
parent.pzxid = std::max(parent.pzxid, zxid);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
auto node = storage.uncommitted_state.getNode(request.path);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
if (request.restored_from_zookeeper_log)
|
||||
update_parent_pzxid();
|
||||
addUpdateParentPzxidDelta(storage, new_deltas, zxid, request.path);
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZNONODE}};
|
||||
}
|
||||
else if (request.version != -1 && request.version != node->version)
|
||||
@ -1521,7 +1536,7 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZNOTEMPTY}};
|
||||
|
||||
if (request.restored_from_zookeeper_log)
|
||||
update_parent_pzxid();
|
||||
addUpdateParentPzxidDelta(storage, new_deltas, zxid, request.path);
|
||||
|
||||
new_deltas.emplace_back(
|
||||
std::string{parentNodePath(request.path)},
|
||||
@ -1552,12 +1567,318 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
||||
}
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(KeeperStorageBase::Watches & watches, KeeperStorageBase::Watches & list_watches) const override
|
||||
processWatches(const Storage & /*storage*/, int64_t /*zxid*/, KeeperStorageBase::Watches & watches, KeeperStorageBase::Watches & list_watches) const override
|
||||
{
|
||||
return processWatchesImpl(this->zk_request->getPath(), watches, list_watches, Coordination::Event::DELETED);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Storage>
|
||||
struct KeeperStorageRemoveRecursiveRequestProcessor final : public KeeperStorageRequestProcessor<Storage>
|
||||
{
|
||||
using KeeperStorageRequestProcessor<Storage>::KeeperStorageRequestProcessor;
|
||||
|
||||
bool checkAuth(Storage & storage, int64_t session_id, bool is_local) const override
|
||||
{
|
||||
return storage.checkACL(parentNodePath(this->zk_request->getPath()), Coordination::ACL::Delete, session_id, is_local);
|
||||
}
|
||||
|
||||
std::vector<typename Storage::Delta>
|
||||
preprocess(Storage & storage, int64_t zxid, int64_t session_id, int64_t /*time*/, uint64_t & digest, const KeeperContext & keeper_context) const override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::KeeperRemoveRequest);
|
||||
Coordination::ZooKeeperRemoveRecursiveRequest & request = dynamic_cast<Coordination::ZooKeeperRemoveRecursiveRequest &>(*this->zk_request);
|
||||
|
||||
std::vector<typename Storage::Delta> new_deltas;
|
||||
|
||||
if (Coordination::matchPath(request.path, keeper_system_path) != Coordination::PathMatchResult::NOT_MATCH)
|
||||
{
|
||||
auto error_msg = fmt::format("Trying to delete an internal Keeper path ({}) which is not allowed", request.path);
|
||||
|
||||
handleSystemNodeModification(keeper_context, error_msg);
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZBADARGUMENTS}};
|
||||
}
|
||||
|
||||
auto node = storage.uncommitted_state.getNode(request.path);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
if (request.restored_from_zookeeper_log)
|
||||
addUpdateParentPzxidDelta(storage, new_deltas, zxid, request.path);
|
||||
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZNONODE}};
|
||||
}
|
||||
|
||||
ToDeleteTreeCollector collector(storage, zxid, session_id, request.remove_nodes_limit);
|
||||
auto collect_status = collector.collect(request.path, *node);
|
||||
|
||||
if (collect_status == ToDeleteTreeCollector::CollectStatus::NoAuth)
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZNOAUTH}};
|
||||
|
||||
if (collect_status == ToDeleteTreeCollector::CollectStatus::LimitExceeded)
|
||||
return {typename Storage::Delta{zxid, Coordination::Error::ZNOTEMPTY}};
|
||||
|
||||
if (request.restored_from_zookeeper_log)
|
||||
addUpdateParentPzxidDelta(storage, new_deltas, zxid, request.path);
|
||||
|
||||
auto delete_deltas = collector.extractDeltas();
|
||||
|
||||
for (const auto & delta : delete_deltas)
|
||||
{
|
||||
const auto * remove_delta = std::get_if<typename Storage::RemoveNodeDelta>(&delta.operation);
|
||||
if (remove_delta && remove_delta->ephemeral_owner)
|
||||
storage.unregisterEphemeralPath(remove_delta->ephemeral_owner, delta.path);
|
||||
}
|
||||
|
||||
new_deltas.insert(new_deltas.end(), std::make_move_iterator(delete_deltas.begin()), std::make_move_iterator(delete_deltas.end()));
|
||||
|
||||
digest = storage.calculateNodesDigest(digest, new_deltas);
|
||||
|
||||
return new_deltas;
|
||||
}
|
||||
|
||||
Coordination::ZooKeeperResponsePtr process(Storage & storage, int64_t zxid) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = this->zk_request->makeResponse();
|
||||
Coordination::ZooKeeperRemoveRecursiveResponse & response = dynamic_cast<Coordination::ZooKeeperRemoveRecursiveResponse &>(*response_ptr);
|
||||
|
||||
response.error = storage.commit(zxid);
|
||||
return response_ptr;
|
||||
}
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(const Storage & storage, int64_t zxid, KeeperStorageBase::Watches & watches, KeeperStorageBase::Watches & list_watches) const override
|
||||
{
|
||||
/// need to iterate over zxid deltas and update watches for deleted tree.
|
||||
const auto & deltas = storage.uncommitted_state.deltas;
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions responses;
|
||||
for (auto it = deltas.rbegin(); it != deltas.rend() && it->zxid == zxid; ++it)
|
||||
{
|
||||
const auto * remove_delta = std::get_if<typename Storage::RemoveNodeDelta>(&it->operation);
|
||||
if (remove_delta)
|
||||
{
|
||||
auto new_responses = processWatchesImpl(it->path, watches, list_watches, Coordination::Event::DELETED);
|
||||
responses.insert(responses.end(), std::make_move_iterator(new_responses.begin()), std::make_move_iterator(new_responses.end()));
|
||||
}
|
||||
}
|
||||
|
||||
return responses;
|
||||
}
|
||||
|
||||
private:
|
||||
using SNode = typename Storage::Node;
|
||||
|
||||
class ToDeleteTreeCollector
|
||||
{
|
||||
Storage & storage;
|
||||
int64_t zxid;
|
||||
int64_t session_id;
|
||||
uint32_t limit;
|
||||
|
||||
uint32_t max_level = 0;
|
||||
uint32_t nodes_observed = 1; /// root node
|
||||
std::unordered_map<uint32_t, std::vector<typename Storage::Delta>> by_level_deltas;
|
||||
|
||||
struct Step
|
||||
{
|
||||
String path;
|
||||
std::variant<SNode, const SNode *> node;
|
||||
uint32_t level;
|
||||
};
|
||||
|
||||
enum class CollectStatus
|
||||
{
|
||||
Ok,
|
||||
NoAuth,
|
||||
LimitExceeded,
|
||||
};
|
||||
|
||||
friend struct KeeperStorageRemoveRecursiveRequestProcessor;
|
||||
|
||||
public:
|
||||
ToDeleteTreeCollector(Storage & storage_, int64_t zxid_, int64_t session_id_, uint32_t limit_)
|
||||
: storage(storage_)
|
||||
, zxid(zxid_)
|
||||
, session_id(session_id_)
|
||||
, limit(limit_)
|
||||
{
|
||||
}
|
||||
|
||||
CollectStatus collect(StringRef root_path, const SNode & root_node)
|
||||
{
|
||||
std::deque<Step> steps;
|
||||
|
||||
if (checkLimits(&root_node))
|
||||
return CollectStatus::LimitExceeded;
|
||||
|
||||
steps.push_back(Step{root_path.toString(), &root_node, 0});
|
||||
|
||||
while (!steps.empty())
|
||||
{
|
||||
Step step = std::move(steps.front());
|
||||
steps.pop_front();
|
||||
|
||||
StringRef path = step.path;
|
||||
uint32_t level = step.level;
|
||||
const SNode * node_ptr = nullptr;
|
||||
|
||||
if (auto * rdb = std::get_if<SNode>(&step.node))
|
||||
node_ptr = rdb;
|
||||
else
|
||||
node_ptr = std::get<const SNode *>(step.node);
|
||||
|
||||
chassert(!path.empty());
|
||||
chassert(node_ptr != nullptr);
|
||||
|
||||
const auto & node = *node_ptr;
|
||||
auto actual_node_ptr = storage.uncommitted_state.getActualNodeView(path, node);
|
||||
chassert(actual_node_ptr != nullptr); /// explicitly check that node is not deleted
|
||||
|
||||
if (actual_node_ptr->numChildren() > 0 && !storage.checkACL(path, Coordination::ACL::Delete, session_id, /*is_local=*/false))
|
||||
return CollectStatus::NoAuth;
|
||||
|
||||
if (auto status = visitRocksDBNode(steps, path, level); status != CollectStatus::Ok)
|
||||
return status;
|
||||
|
||||
if (auto status = visitMemNode(steps, path, level); status != CollectStatus::Ok)
|
||||
return status;
|
||||
|
||||
if (auto status = visitRootAndUncommitted(steps, path, node, level); status != CollectStatus::Ok)
|
||||
return status;
|
||||
}
|
||||
|
||||
return CollectStatus::Ok;
|
||||
}
|
||||
|
||||
std::vector<typename Storage::Delta> extractDeltas()
|
||||
{
|
||||
std::vector<typename Storage::Delta> deltas;
|
||||
|
||||
for (ssize_t level = max_level; level >= 0; --level)
|
||||
{
|
||||
auto & level_deltas = by_level_deltas[static_cast<uint32_t>(level)];
|
||||
deltas.insert(deltas.end(), std::make_move_iterator(level_deltas.begin()), std::make_move_iterator(level_deltas.end()));
|
||||
}
|
||||
|
||||
return std::move(deltas);
|
||||
}
|
||||
|
||||
private:
|
||||
CollectStatus visitRocksDBNode(std::deque<Step> & steps, StringRef root_path, uint32_t level)
|
||||
{
|
||||
if constexpr (Storage::use_rocksdb)
|
||||
{
|
||||
std::filesystem::path root_fs_path(root_path.toString());
|
||||
auto children = storage.container.getChildren(root_path.toString());
|
||||
|
||||
for (auto && [child_name, child_node] : children)
|
||||
{
|
||||
auto child_path = (root_fs_path / child_name).generic_string();
|
||||
const auto actual_child_node_ptr = storage.uncommitted_state.getActualNodeView(child_path, child_node);
|
||||
|
||||
if (actual_child_node_ptr == nullptr) /// node was deleted in previous step of multi transaction
|
||||
continue;
|
||||
|
||||
if (checkLimits(actual_child_node_ptr))
|
||||
return CollectStatus::LimitExceeded;
|
||||
|
||||
steps.push_back(Step{std::move(child_path), std::move(child_node), level + 1});
|
||||
}
|
||||
}
|
||||
|
||||
return CollectStatus::Ok;
|
||||
}
|
||||
|
||||
CollectStatus visitMemNode(std::deque<Step> & steps, StringRef root_path, uint32_t level)
|
||||
{
|
||||
if constexpr (!Storage::use_rocksdb)
|
||||
{
|
||||
auto node_it = storage.container.find(root_path);
|
||||
if (node_it == storage.container.end())
|
||||
return CollectStatus::Ok;
|
||||
|
||||
std::filesystem::path root_fs_path(root_path.toString());
|
||||
const auto & children = node_it->value.getChildren();
|
||||
|
||||
for (const auto & child_name : children)
|
||||
{
|
||||
auto child_path = (root_fs_path / child_name.toView()).generic_string();
|
||||
|
||||
auto child_it = storage.container.find(child_path);
|
||||
chassert(child_it != storage.container.end());
|
||||
const auto & child_node = child_it->value;
|
||||
|
||||
const auto actual_child_node_ptr = storage.uncommitted_state.getActualNodeView(child_path, child_node);
|
||||
|
||||
if (actual_child_node_ptr == nullptr) /// node was deleted in previous step of multi transaction
|
||||
continue;
|
||||
|
||||
if (checkLimits(actual_child_node_ptr))
|
||||
return CollectStatus::LimitExceeded;
|
||||
|
||||
steps.push_back(Step{std::move(child_path), &child_node, level + 1});
|
||||
}
|
||||
}
|
||||
|
||||
return CollectStatus::Ok;
|
||||
}
|
||||
|
||||
CollectStatus visitRootAndUncommitted(std::deque<Step> & steps, StringRef root_path, const SNode & root_node, uint32_t level)
|
||||
{
|
||||
const auto & nodes = storage.uncommitted_state.nodes;
|
||||
|
||||
/// nodes are sorted by paths with level locality
|
||||
auto it = nodes.upper_bound(root_path.toString() + "/");
|
||||
|
||||
for (; it != nodes.end() && parentNodePath(it->first) == root_path; ++it)
|
||||
{
|
||||
const auto actual_child_node_ptr = it->second.node.get();
|
||||
|
||||
if (actual_child_node_ptr == nullptr) /// node was deleted in previous step of multi transaction
|
||||
continue;
|
||||
|
||||
if (checkLimits(actual_child_node_ptr))
|
||||
return CollectStatus::LimitExceeded;
|
||||
|
||||
const String & child_path = it->first;
|
||||
const SNode & child_node = *it->second.node;
|
||||
|
||||
steps.push_back(Step{child_path, &child_node, level + 1});
|
||||
}
|
||||
|
||||
addDelta(root_path, root_node, level);
|
||||
|
||||
return CollectStatus::Ok;
|
||||
}
|
||||
|
||||
void addDelta(StringRef root_path, const SNode & root_node, uint32_t level)
|
||||
{
|
||||
max_level = std::max(max_level, level);
|
||||
|
||||
by_level_deltas[level].emplace_back(
|
||||
parentNodePath(root_path).toString(),
|
||||
zxid,
|
||||
typename Storage::UpdateNodeDelta{
|
||||
[](SNode & parent)
|
||||
{
|
||||
++parent.cversion;
|
||||
parent.decreaseNumChildren();
|
||||
}
|
||||
});
|
||||
|
||||
by_level_deltas[level].emplace_back(root_path.toString(), zxid, typename Storage::RemoveNodeDelta{root_node.version, root_node.ephemeralOwner()});
|
||||
}
|
||||
|
||||
bool checkLimits(const SNode * node)
|
||||
{
|
||||
chassert(node != nullptr);
|
||||
nodes_observed += node->numChildren();
|
||||
return nodes_observed > limit;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template<typename Storage>
|
||||
struct KeeperStorageExistsRequestProcessor final : public KeeperStorageRequestProcessor<Storage>
|
||||
{
|
||||
@ -1709,7 +2030,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
|
||||
}
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(typename Storage::Watches & watches, typename Storage::Watches & list_watches) const override
|
||||
processWatches(const Storage & /*storage*/, int64_t /*zxid*/, typename Storage::Watches & watches, typename Storage::Watches & list_watches) const override
|
||||
{
|
||||
return processWatchesImpl(this->zk_request->getPath(), watches, list_watches, Coordination::Event::CHANGED);
|
||||
}
|
||||
@ -2131,6 +2452,10 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
check_operation_type(OperationType::Write);
|
||||
concrete_requests.push_back(std::make_shared<KeeperStorageRemoveRequestProcessor<Storage>>(sub_zk_request));
|
||||
break;
|
||||
case Coordination::OpNum::RemoveRecursive:
|
||||
check_operation_type(OperationType::Write);
|
||||
concrete_requests.push_back(std::make_shared<KeeperStorageRemoveRecursiveRequestProcessor<Storage>>(sub_zk_request));
|
||||
break;
|
||||
case Coordination::OpNum::Set:
|
||||
check_operation_type(OperationType::Write);
|
||||
concrete_requests.push_back(std::make_shared<KeeperStorageSetRequestProcessor<Storage>>(sub_zk_request));
|
||||
@ -2250,12 +2575,12 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
||||
}
|
||||
|
||||
KeeperStorageBase::ResponsesForSessions
|
||||
processWatches(typename Storage::Watches & watches, typename Storage::Watches & list_watches) const override
|
||||
processWatches(const Storage & storage, int64_t zxid, typename Storage::Watches & watches, typename Storage::Watches & list_watches) const override
|
||||
{
|
||||
typename Storage::ResponsesForSessions result;
|
||||
for (const auto & generic_request : concrete_requests)
|
||||
{
|
||||
auto responses = generic_request->processWatches(watches, list_watches);
|
||||
auto responses = generic_request->processWatches(storage, zxid, watches, list_watches);
|
||||
result.insert(result.end(), responses.begin(), responses.end());
|
||||
}
|
||||
return result;
|
||||
@ -2400,6 +2725,7 @@ KeeperStorageRequestProcessorsFactory<Storage>::KeeperStorageRequestProcessorsFa
|
||||
registerKeeperRequestProcessor<Coordination::OpNum::SetACL, KeeperStorageSetACLRequestProcessor<Storage>>(*this);
|
||||
registerKeeperRequestProcessor<Coordination::OpNum::GetACL, KeeperStorageGetACLRequestProcessor<Storage>>(*this);
|
||||
registerKeeperRequestProcessor<Coordination::OpNum::CheckNotExists, KeeperStorageCheckRequestProcessor<Storage>>(*this);
|
||||
registerKeeperRequestProcessor<Coordination::OpNum::RemoveRecursive, KeeperStorageRemoveRecursiveRequestProcessor<Storage>>(*this);
|
||||
}
|
||||
|
||||
|
||||
@ -2718,7 +3044,7 @@ KeeperStorage<Container>::ResponsesForSessions KeeperStorage<Container>::process
|
||||
/// If this requests processed successfully we need to check watches
|
||||
if (response->error == Coordination::Error::ZOK)
|
||||
{
|
||||
auto watch_responses = request_processor->processWatches(watches, list_watches);
|
||||
auto watch_responses = request_processor->processWatches(*this, zxid, watches, list_watches);
|
||||
results.insert(results.end(), watch_responses.begin(), watch_responses.end());
|
||||
}
|
||||
|
||||
|
@ -566,6 +566,7 @@ public:
|
||||
void rollback(int64_t rollback_zxid);
|
||||
|
||||
std::shared_ptr<Node> getNode(StringRef path) const;
|
||||
const Node * getActualNodeView(StringRef path, const Node & storage_node) const;
|
||||
Coordination::ACLs getACLs(StringRef path) const;
|
||||
|
||||
void applyDelta(const Delta & delta);
|
||||
@ -609,7 +610,18 @@ public:
|
||||
using is_transparent = void; // required to make find() work with different type than key_type
|
||||
};
|
||||
|
||||
mutable std::unordered_map<std::string, UncommittedNode, Hash, Equal> nodes;
|
||||
struct PathCmp
|
||||
{
|
||||
using is_transparent = std::true_type;
|
||||
|
||||
auto operator()(const std::string_view a,
|
||||
const std::string_view b) const
|
||||
{
|
||||
return a.size() < b.size() || (a.size() == b.size() && a < b);
|
||||
}
|
||||
};
|
||||
|
||||
mutable std::map<std::string, UncommittedNode, PathCmp> nodes;
|
||||
std::unordered_map<std::string, std::list<const Delta *>, Hash, Equal> deltas_for_path;
|
||||
|
||||
std::list<Delta> deltas;
|
||||
|
@ -3113,6 +3113,8 @@ TYPED_TEST(CoordinationTest, TestFeatureFlags)
|
||||
ASSERT_TRUE(feature_flags.isEnabled(KeeperFeatureFlag::FILTERED_LIST));
|
||||
ASSERT_TRUE(feature_flags.isEnabled(KeeperFeatureFlag::MULTI_READ));
|
||||
ASSERT_FALSE(feature_flags.isEnabled(KeeperFeatureFlag::CHECK_NOT_EXISTS));
|
||||
ASSERT_FALSE(feature_flags.isEnabled(KeeperFeatureFlag::CREATE_IF_NOT_EXISTS));
|
||||
ASSERT_FALSE(feature_flags.isEnabled(KeeperFeatureFlag::REMOVE_RECURSIVE));
|
||||
}
|
||||
|
||||
TYPED_TEST(CoordinationTest, TestSystemNodeModify)
|
||||
@ -3374,6 +3376,474 @@ TYPED_TEST(CoordinationTest, TestReapplyingDeltas)
|
||||
ASSERT_TRUE(children1_set == children2_set);
|
||||
}
|
||||
|
||||
TYPED_TEST(CoordinationTest, TestRemoveRecursiveRequest)
|
||||
{
|
||||
using namespace DB;
|
||||
using namespace Coordination;
|
||||
|
||||
using Storage = typename TestFixture::Storage;
|
||||
|
||||
ChangelogDirTest rocks("./rocksdb");
|
||||
this->setRocksDBDirectory("./rocksdb");
|
||||
|
||||
Storage storage{500, "", this->keeper_context};
|
||||
|
||||
int32_t zxid = 0;
|
||||
|
||||
const auto create = [&](const String & path, int create_mode)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto create_request = std::make_shared<ZooKeeperCreateRequest>();
|
||||
create_request->path = path;
|
||||
create_request->is_ephemeral = create_mode == zkutil::CreateMode::Ephemeral || create_mode == zkutil::CreateMode::EphemeralSequential;
|
||||
create_request->is_sequential = create_mode == zkutil::CreateMode::PersistentSequential || create_mode == zkutil::CreateMode::EphemeralSequential;
|
||||
|
||||
storage.preprocessRequest(create_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(create_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK) << "Failed to create " << path;
|
||||
};
|
||||
|
||||
const auto remove = [&](const String & path, int32_t version = -1)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
auto remove_request = std::make_shared<ZooKeeperRemoveRequest>();
|
||||
remove_request->path = path;
|
||||
remove_request->version = version;
|
||||
|
||||
storage.preprocessRequest(remove_request, 1, 0, new_zxid);
|
||||
return storage.processRequest(remove_request, 1, new_zxid);
|
||||
};
|
||||
|
||||
const auto remove_recursive = [&](const String & path, uint32_t remove_nodes_limit = 1)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
auto remove_request = std::make_shared<ZooKeeperRemoveRecursiveRequest>();
|
||||
remove_request->path = path;
|
||||
remove_request->remove_nodes_limit = remove_nodes_limit;
|
||||
|
||||
storage.preprocessRequest(remove_request, 1, 0, new_zxid);
|
||||
return storage.processRequest(remove_request, 1, new_zxid);
|
||||
};
|
||||
|
||||
const auto exists = [&](const String & path)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto exists_request = std::make_shared<ZooKeeperExistsRequest>();
|
||||
exists_request->path = path;
|
||||
|
||||
storage.preprocessRequest(exists_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(exists_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
return responses[0].response->error == Coordination::Error::ZOK;
|
||||
};
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Single Remove Single Node");
|
||||
create("/T1", zkutil::CreateMode::Persistent);
|
||||
|
||||
auto responses = remove("/T1");
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
ASSERT_FALSE(exists("/T1"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Single Remove Tree");
|
||||
create("/T2", zkutil::CreateMode::Persistent);
|
||||
create("/T2/A", zkutil::CreateMode::Persistent);
|
||||
|
||||
auto responses = remove("/T2");
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZNOTEMPTY);
|
||||
ASSERT_TRUE(exists("/T2"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove Single Node");
|
||||
create("/T3", zkutil::CreateMode::Persistent);
|
||||
|
||||
auto responses = remove_recursive("/T3", 100);
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
ASSERT_FALSE(exists("/T3"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove Tree Small Limit");
|
||||
create("/T5", zkutil::CreateMode::Persistent);
|
||||
create("/T5/A", zkutil::CreateMode::Persistent);
|
||||
create("/T5/B", zkutil::CreateMode::Persistent);
|
||||
create("/T5/A/C", zkutil::CreateMode::Persistent);
|
||||
|
||||
auto responses = remove_recursive("/T5", 2);
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZNOTEMPTY);
|
||||
ASSERT_TRUE(exists("/T5"));
|
||||
ASSERT_TRUE(exists("/T5/A"));
|
||||
ASSERT_TRUE(exists("/T5/B"));
|
||||
ASSERT_TRUE(exists("/T5/A/C"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove Tree Big Limit");
|
||||
create("/T6", zkutil::CreateMode::Persistent);
|
||||
create("/T6/A", zkutil::CreateMode::Persistent);
|
||||
create("/T6/B", zkutil::CreateMode::Persistent);
|
||||
create("/T6/A/C", zkutil::CreateMode::Persistent);
|
||||
|
||||
auto responses = remove_recursive("/T6", 4);
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
ASSERT_FALSE(exists("/T6"));
|
||||
ASSERT_FALSE(exists("/T6/A"));
|
||||
ASSERT_FALSE(exists("/T6/B"));
|
||||
ASSERT_FALSE(exists("/T6/A/C"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove Ephemeral");
|
||||
create("/T7", zkutil::CreateMode::Ephemeral);
|
||||
ASSERT_EQ(storage.ephemerals.size(), 1);
|
||||
|
||||
auto responses = remove_recursive("/T7", 100);
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
ASSERT_EQ(storage.ephemerals.size(), 0);
|
||||
ASSERT_FALSE(exists("/T7"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove Tree With Ephemeral");
|
||||
create("/T8", zkutil::CreateMode::Persistent);
|
||||
create("/T8/A", zkutil::CreateMode::Persistent);
|
||||
create("/T8/B", zkutil::CreateMode::Ephemeral);
|
||||
create("/T8/A/C", zkutil::CreateMode::Ephemeral);
|
||||
ASSERT_EQ(storage.ephemerals.size(), 1);
|
||||
|
||||
auto responses = remove_recursive("/T8", 4);
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
ASSERT_EQ(storage.ephemerals.size(), 0);
|
||||
ASSERT_FALSE(exists("/T8"));
|
||||
ASSERT_FALSE(exists("/T8/A"));
|
||||
ASSERT_FALSE(exists("/T8/B"));
|
||||
ASSERT_FALSE(exists("/T8/A/C"));
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CoordinationTest, TestRemoveRecursiveInMultiRequest)
|
||||
{
|
||||
using namespace DB;
|
||||
using namespace Coordination;
|
||||
|
||||
using Storage = typename TestFixture::Storage;
|
||||
|
||||
ChangelogDirTest rocks("./rocksdb");
|
||||
this->setRocksDBDirectory("./rocksdb");
|
||||
|
||||
Storage storage{500, "", this->keeper_context};
|
||||
int zxid = 0;
|
||||
|
||||
auto prepare_create_tree = []()
|
||||
{
|
||||
return Coordination::Requests{
|
||||
zkutil::makeCreateRequest("/A", "A", zkutil::CreateMode::Persistent),
|
||||
zkutil::makeCreateRequest("/A/B", "B", zkutil::CreateMode::Persistent),
|
||||
zkutil::makeCreateRequest("/A/C", "C", zkutil::CreateMode::Ephemeral),
|
||||
zkutil::makeCreateRequest("/A/B/D", "D", zkutil::CreateMode::Ephemeral),
|
||||
};
|
||||
};
|
||||
|
||||
const auto exists = [&](const String & path)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto exists_request = std::make_shared<ZooKeeperExistsRequest>();
|
||||
exists_request->path = path;
|
||||
|
||||
storage.preprocessRequest(exists_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(exists_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
return responses[0].response->error == Coordination::Error::ZOK;
|
||||
};
|
||||
|
||||
const auto is_multi_ok = [&](Coordination::ZooKeeperResponsePtr response)
|
||||
{
|
||||
const auto & multi_response = dynamic_cast<Coordination::ZooKeeperMultiResponse &>(*response);
|
||||
|
||||
for (const auto & op_response : multi_response.responses)
|
||||
if (op_response->error != Coordination::Error::ZOK)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Remove In Multi Tx");
|
||||
int new_zxid = ++zxid;
|
||||
auto ops = prepare_create_tree();
|
||||
|
||||
ops.push_back(zkutil::makeRemoveRequest("/A", -1));
|
||||
const auto request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
|
||||
|
||||
storage.preprocessRequest(request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(request, 1, new_zxid);
|
||||
ops.pop_back();
|
||||
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_FALSE(is_multi_ok(responses[0].response));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove In Multi Tx");
|
||||
int new_zxid = ++zxid;
|
||||
auto ops = prepare_create_tree();
|
||||
|
||||
ops.push_back(zkutil::makeRemoveRecursiveRequest("/A", 4));
|
||||
const auto request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
|
||||
|
||||
storage.preprocessRequest(request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(request, 1, new_zxid);
|
||||
ops.pop_back();
|
||||
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_TRUE(is_multi_ok(responses[0].response));
|
||||
ASSERT_FALSE(exists("/A"));
|
||||
ASSERT_FALSE(exists("/A/C"));
|
||||
ASSERT_FALSE(exists("/A/B"));
|
||||
ASSERT_FALSE(exists("/A/B/D"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove With Regular In Multi Tx");
|
||||
int new_zxid = ++zxid;
|
||||
auto ops = prepare_create_tree();
|
||||
|
||||
ops.push_back(zkutil::makeRemoveRequest("/A/C", -1));
|
||||
ops.push_back(zkutil::makeRemoveRecursiveRequest("/A", 3));
|
||||
const auto request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
|
||||
|
||||
storage.preprocessRequest(request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(request, 1, new_zxid);
|
||||
ops.pop_back();
|
||||
ops.pop_back();
|
||||
|
||||
ASSERT_EQ(responses.size(), 1);
|
||||
ASSERT_TRUE(is_multi_ok(responses[0].response));
|
||||
ASSERT_FALSE(exists("/A"));
|
||||
ASSERT_FALSE(exists("/A/C"));
|
||||
ASSERT_FALSE(exists("/A/B"));
|
||||
ASSERT_FALSE(exists("/A/B/D"));
|
||||
}
|
||||
|
||||
{
|
||||
SCOPED_TRACE("Recursive Remove From Committed and Uncommitted states");
|
||||
int create_zxid = ++zxid;
|
||||
auto ops = prepare_create_tree();
|
||||
|
||||
/// First create nodes
|
||||
const auto create_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
|
||||
storage.preprocessRequest(create_request, 1, 0, create_zxid);
|
||||
auto create_responses = storage.processRequest(create_request, 1, create_zxid);
|
||||
ASSERT_EQ(create_responses.size(), 1);
|
||||
ASSERT_TRUE(is_multi_ok(create_responses[0].response));
|
||||
ASSERT_TRUE(exists("/A"));
|
||||
ASSERT_TRUE(exists("/A/C"));
|
||||
ASSERT_TRUE(exists("/A/B"));
|
||||
ASSERT_TRUE(exists("/A/B/D"));
|
||||
|
||||
/// Remove node A/C as a single remove request.
|
||||
/// Remove all other as remove recursive request.
|
||||
/// In this case we should list storage to understand the tree topology
|
||||
/// but ignore already deleted nodes in uncommitted state.
|
||||
|
||||
int remove_zxid = ++zxid;
|
||||
ops = {
|
||||
zkutil::makeRemoveRequest("/A/C", -1),
|
||||
zkutil::makeRemoveRecursiveRequest("/A", 3),
|
||||
};
|
||||
const auto remove_request = std::make_shared<ZooKeeperMultiRequest>(ops, ACLs{});
|
||||
|
||||
storage.preprocessRequest(remove_request, 1, 0, remove_zxid);
|
||||
auto remove_responses = storage.processRequest(remove_request, 1, remove_zxid);
|
||||
|
||||
ASSERT_EQ(remove_responses.size(), 1);
|
||||
ASSERT_TRUE(is_multi_ok(remove_responses[0].response));
|
||||
ASSERT_FALSE(exists("/A"));
|
||||
ASSERT_FALSE(exists("/A/C"));
|
||||
ASSERT_FALSE(exists("/A/B"));
|
||||
ASSERT_FALSE(exists("/A/B/D"));
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(CoordinationTest, TestRemoveRecursiveWatches)
|
||||
{
|
||||
using namespace DB;
|
||||
using namespace Coordination;
|
||||
|
||||
using Storage = typename TestFixture::Storage;
|
||||
|
||||
ChangelogDirTest rocks("./rocksdb");
|
||||
this->setRocksDBDirectory("./rocksdb");
|
||||
|
||||
Storage storage{500, "", this->keeper_context};
|
||||
int zxid = 0;
|
||||
|
||||
const auto create = [&](const String & path, int create_mode)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto create_request = std::make_shared<ZooKeeperCreateRequest>();
|
||||
create_request->path = path;
|
||||
create_request->is_ephemeral = create_mode == zkutil::CreateMode::Ephemeral || create_mode == zkutil::CreateMode::EphemeralSequential;
|
||||
create_request->is_sequential = create_mode == zkutil::CreateMode::PersistentSequential || create_mode == zkutil::CreateMode::EphemeralSequential;
|
||||
|
||||
storage.preprocessRequest(create_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(create_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK) << "Failed to create " << path;
|
||||
};
|
||||
|
||||
const auto add_watch = [&](const String & path)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto exists_request = std::make_shared<ZooKeeperExistsRequest>();
|
||||
exists_request->path = path;
|
||||
exists_request->has_watch = true;
|
||||
|
||||
storage.preprocessRequest(exists_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(exists_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
};
|
||||
|
||||
const auto add_list_watch = [&](const String & path)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto list_request = std::make_shared<ZooKeeperListRequest>();
|
||||
list_request->path = path;
|
||||
list_request->has_watch = true;
|
||||
|
||||
storage.preprocessRequest(list_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(list_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK);
|
||||
};
|
||||
|
||||
create("/A", zkutil::CreateMode::Persistent);
|
||||
create("/A/B", zkutil::CreateMode::Persistent);
|
||||
create("/A/C", zkutil::CreateMode::Ephemeral);
|
||||
create("/A/B/D", zkutil::CreateMode::Ephemeral);
|
||||
|
||||
add_watch("/A");
|
||||
add_watch("/A/B");
|
||||
add_watch("/A/C");
|
||||
add_watch("/A/B/D");
|
||||
add_list_watch("/A");
|
||||
add_list_watch("/A/B");
|
||||
ASSERT_EQ(storage.watches.size(), 4);
|
||||
ASSERT_EQ(storage.list_watches.size(), 2);
|
||||
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
auto remove_request = std::make_shared<ZooKeeperRemoveRecursiveRequest>();
|
||||
remove_request->path = "/A";
|
||||
remove_request->remove_nodes_limit = 4;
|
||||
|
||||
storage.preprocessRequest(remove_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(remove_request, 1, new_zxid);
|
||||
|
||||
ASSERT_EQ(responses.size(), 7);
|
||||
|
||||
for (size_t i = 0; i < 7; ++i)
|
||||
{
|
||||
ASSERT_EQ(responses[i].response->error, Coordination::Error::ZOK);
|
||||
|
||||
if (const auto * watch_response = dynamic_cast<Coordination::ZooKeeperWatchResponse *>(responses[i].response.get()))
|
||||
ASSERT_EQ(watch_response->type, Coordination::Event::DELETED);
|
||||
}
|
||||
|
||||
ASSERT_EQ(storage.watches.size(), 0);
|
||||
ASSERT_EQ(storage.list_watches.size(), 0);
|
||||
}
|
||||
|
||||
TYPED_TEST(CoordinationTest, TestRemoveRecursiveAcls)
|
||||
{
|
||||
using namespace DB;
|
||||
using namespace Coordination;
|
||||
|
||||
using Storage = typename TestFixture::Storage;
|
||||
|
||||
ChangelogDirTest rocks("./rocksdb");
|
||||
this->setRocksDBDirectory("./rocksdb");
|
||||
|
||||
Storage storage{500, "", this->keeper_context};
|
||||
int zxid = 0;
|
||||
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
String user_auth_data = "test_user:test_password";
|
||||
|
||||
const auto auth_request = std::make_shared<ZooKeeperAuthRequest>();
|
||||
auth_request->scheme = "digest";
|
||||
auth_request->data = user_auth_data;
|
||||
|
||||
storage.preprocessRequest(auth_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(auth_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK) << "Failed to add auth to session";
|
||||
}
|
||||
|
||||
const auto create = [&](const String & path)
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
const auto create_request = std::make_shared<ZooKeeperCreateRequest>();
|
||||
create_request->path = path;
|
||||
create_request->acls = {{.permissions = ACL::Create, .scheme = "auth", .id = ""}};
|
||||
|
||||
storage.preprocessRequest(create_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(create_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZOK) << "Failed to create " << path;
|
||||
};
|
||||
|
||||
/// Add nodes with only Create ACL
|
||||
create("/A");
|
||||
create("/A/B");
|
||||
create("/A/C");
|
||||
create("/A/B/D");
|
||||
|
||||
{
|
||||
int new_zxid = ++zxid;
|
||||
|
||||
auto remove_request = std::make_shared<ZooKeeperRemoveRecursiveRequest>();
|
||||
remove_request->path = "/A";
|
||||
remove_request->remove_nodes_limit = 4;
|
||||
|
||||
storage.preprocessRequest(remove_request, 1, 0, new_zxid);
|
||||
auto responses = storage.processRequest(remove_request, 1, new_zxid);
|
||||
|
||||
EXPECT_EQ(responses.size(), 1);
|
||||
EXPECT_EQ(responses[0].response->error, Coordination::Error::ZNOAUTH);
|
||||
}
|
||||
}
|
||||
|
||||
/// INSTANTIATE_TEST_SUITE_P(CoordinationTestSuite,
|
||||
/// CoordinationTest,
|
||||
/// ::testing::ValuesIn(std::initializer_list<CompressionParam>{CompressionParam{true, ".zstd"}, CompressionParam{false, ""}}));
|
||||
|
@ -20,6 +20,9 @@ static constexpr auto DBMS_DEFAULT_POLL_INTERVAL = 10;
|
||||
/// The size of the I/O buffer by default.
|
||||
static constexpr auto DBMS_DEFAULT_BUFFER_SIZE = 1048576ULL;
|
||||
|
||||
/// The initial size of adaptive I/O buffer by default.
|
||||
static constexpr auto DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE = 16384ULL;
|
||||
|
||||
static constexpr auto PADDING_FOR_SIMD = 64;
|
||||
|
||||
/** Which blocks by default read the data (by number of rows).
|
||||
@ -40,7 +43,7 @@ static constexpr auto SHOW_CHARS_ON_SYNTAX_ERROR = ptrdiff_t(160);
|
||||
/// each period reduces the error counter by 2 times
|
||||
/// too short a period can cause errors to disappear immediately after creation.
|
||||
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD = 60;
|
||||
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking to long to recover.
|
||||
/// replica error max cap, this is to prevent replica from accumulating too many errors and taking too long to recover.
|
||||
static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT = 1000;
|
||||
|
||||
/// The boundary on which the blocks for asynchronous file operations should be aligned.
|
||||
|
@ -148,6 +148,7 @@ namespace DB
|
||||
M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \
|
||||
M(UInt64, max_materialized_views_count_for_table, 0, "A limit on the number of materialized views attached to a table.", 0) \
|
||||
M(UInt32, max_database_replicated_create_table_thread_pool_size, 1, "The number of threads to create tables during replica recovery in DatabaseReplicated. Zero means number of threads equal number of cores.", 0) \
|
||||
M(Bool, database_replicated_allow_detach_permanently, true, "Allow detaching tables permanently in Replicated databases", 0) \
|
||||
M(Bool, format_alter_operations_with_parentheses, false, "If enabled, each operation in alter queries will be surrounded with parentheses in formatted queries to make them less ambiguous.", 0) \
|
||||
M(String, default_replica_path, "/clickhouse/tables/{uuid}/{shard}", "The path to the table in ZooKeeper", 0) \
|
||||
M(String, default_replica_name, "{replica}", "The replica name in ZooKeeper", 0) \
|
||||
@ -169,6 +170,7 @@ namespace DB
|
||||
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
||||
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
||||
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
|
||||
M(UInt64, memory_worker_period_ms, 0, "Tick period of background memory worker which corrects memory tracker memory usages and cleans up unused pages during higher memory usage. If set to 0, default value will be used depending on the memory usage source", 0) \
|
||||
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0)
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||
|
@ -710,7 +710,8 @@ class IColumn;
|
||||
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
|
||||
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
|
||||
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
|
||||
M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \
|
||||
M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \
|
||||
M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \
|
||||
M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \
|
||||
M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \
|
||||
@ -922,6 +923,9 @@ class IColumn;
|
||||
M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \
|
||||
M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \
|
||||
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
|
||||
M(Int32, join_to_sort_minimum_perkey_rows, 40, "The lower limit of per-key average rows in the right table to determine whether to rerange the right table by key in left or inner join. This setting ensures that the optimization is not applied for sparse table keys", 0) \
|
||||
M(Int32, join_to_sort_maximum_table_rows, 10000, "The maximum number of rows in the right table to determine whether to rerange the right table by key in left or inner join.", 0) \
|
||||
M(Bool, allow_experimental_join_right_table_sorting, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join.", 0) \
|
||||
M(Timezone, session_timezone, "", "This setting can be removed in the future due to potential caveats. It is experimental and is not suitable for production usage. The default timezone for current session or query. The server default timezone if empty.", 0) \
|
||||
M(Bool, use_hive_partitioning, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines.", 0)\
|
||||
\
|
||||
@ -944,6 +948,7 @@ class IColumn;
|
||||
M(Bool, parallel_replicas_prefer_local_join, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.", 0) \
|
||||
M(UInt64, parallel_replicas_mark_segment_size, 128, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing", 0) \
|
||||
M(Bool, allow_archive_path_syntax, true, "File/S3 engines/table function will parse paths with '::' as '<archive> :: <file>' if archive has correct extension", 0) \
|
||||
M(Bool, parallel_replicas_local_plan, false, "Build local plan for local replica", 0) \
|
||||
\
|
||||
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \
|
||||
M(Bool, allow_experimental_full_text_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \
|
||||
@ -1139,6 +1144,7 @@ class IColumn;
|
||||
M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \
|
||||
M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \
|
||||
M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \
|
||||
M(Bool, input_format_json_empty_as_default, false, "Treat empty fields in JSON input as default values.", 0) \
|
||||
M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
|
||||
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
|
||||
|
@ -71,17 +71,24 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
},
|
||||
{"24.9",
|
||||
{
|
||||
{"input_format_json_empty_as_default", false, false, "Added new setting to allow to treat empty fields in JSON input as default values."},
|
||||
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
|
||||
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
|
||||
{"create_if_not_exists", false, false, "New setting."},
|
||||
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
|
||||
{"output_format_always_quote_identifiers", false, false, "New setting."},
|
||||
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."}
|
||||
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."},
|
||||
{"database_replicated_allow_replicated_engine_arguments", 1, 0, "Don't allow explicit arguments by default"},
|
||||
{"database_replicated_allow_explicit_uuid", 0, 0, "Added a new setting to disallow explicitly specifying table UUID"},
|
||||
{"parallel_replicas_local_plan", false, false, "Use local plan for local replica in a query with parallel replicas"},
|
||||
{"join_to_sort_minimum_perkey_rows", 0, 40, "The lower limit of per-key average rows in the right table to determine whether to rerange the right table by key in left or inner join. This setting ensures that the optimization is not applied for sparse table keys"},
|
||||
{"join_to_sort_maximum_table_rows", 0, 10000, "The maximum number of rows in the right table to determine whether to rerange the right table by key in left or inner join"},
|
||||
{"allow_experimental_join_right_table_sorting", false, false, "If it is set to true, and the conditions of `join_to_sort_minimum_perkey_rows` and `join_to_sort_maximum_table_rows` are met, rerange the right table by key to improve the performance in left or inner hash join"}
|
||||
}
|
||||
},
|
||||
{"24.8",
|
||||
{
|
||||
{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
|
||||
{"rows_before_aggregation", false, false, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
|
||||
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
|
||||
{"restore_replace_external_engines_to_null", false, false, "New setting."},
|
||||
{"input_format_json_max_depth", 1000000, 1000, "It was unlimited in previous versions, but that was unsafe."},
|
||||
@ -97,7 +104,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"use_json_alias_for_old_object_type", true, false, "Use JSON type alias to create new JSON type"},
|
||||
{"type_json_skip_duplicated_paths", false, false, "Allow to skip duplicated paths during JSON parsing"},
|
||||
{"allow_experimental_vector_similarity_index", false, false, "Added new setting to allow experimental vector similarity indexes"},
|
||||
{"input_format_try_infer_datetimes_only_datetime64", true, false, "Allow to infer DateTime instead of DateTime64 in data formats"}
|
||||
{"input_format_try_infer_datetimes_only_datetime64", true, false, "Allow to infer DateTime instead of DateTime64 in data formats"},
|
||||
}
|
||||
},
|
||||
{"24.7",
|
||||
|
@ -420,6 +420,21 @@ bool ISerialization::isEphemeralSubcolumn(const DB::ISerialization::SubstreamPat
|
||||
return path[last_elem].type == Substream::VariantElementNullMap;
|
||||
}
|
||||
|
||||
bool ISerialization::isDynamicSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len)
|
||||
{
|
||||
if (prefix_len == 0 || prefix_len > path.size())
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i != prefix_len; ++i)
|
||||
{
|
||||
if (path[i].type == SubstreamType::DynamicData || path[i].type == SubstreamType::DynamicStructure
|
||||
|| path[i].type == SubstreamType::ObjectData || path[i].type == SubstreamType::ObjectStructure)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len)
|
||||
{
|
||||
assert(prefix_len <= path.size());
|
||||
|
@ -457,6 +457,9 @@ public:
|
||||
/// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);.
|
||||
static bool isEphemeralSubcolumn(const SubstreamPath & path, size_t prefix_len);
|
||||
|
||||
/// Returns true if stream with specified path corresponds to dynamic subcolumn.
|
||||
static bool isDynamicSubcolumn(const SubstreamPath & path, size_t prefix_len);
|
||||
|
||||
protected:
|
||||
template <typename State, typename StatePtr>
|
||||
State * checkAndGetState(const StatePtr & state) const;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <Formats/JSONUtils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -615,28 +616,49 @@ void SerializationArray::serializeTextJSONPretty(const IColumn & column, size_t
|
||||
}
|
||||
|
||||
|
||||
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
template <typename ReturnType>
|
||||
ReturnType SerializationArray::deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
deserializeTextImpl(column, istr,
|
||||
[&](IColumn & nested_column)
|
||||
auto deserialize_nested = [&settings, this](IColumn & nested_column, ReadBuffer & buf) -> ReturnType
|
||||
{
|
||||
if constexpr (std::is_same_v<ReturnType, void>)
|
||||
{
|
||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested);
|
||||
SerializationNullable::deserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
|
||||
else
|
||||
nested->deserializeTextJSON(nested_column, istr, settings);
|
||||
}, false);
|
||||
nested->deserializeTextJSON(nested_column, buf, settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, buf, settings, nested);
|
||||
return nested->tryDeserializeTextJSON(nested_column, buf, settings);
|
||||
}
|
||||
};
|
||||
|
||||
if (settings.json.empty_as_default)
|
||||
return deserializeTextImpl<ReturnType>(column, istr,
|
||||
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
|
||||
{
|
||||
return JSONUtils::deserializeEmpyStringAsDefaultOrNested<ReturnType>(nested_column, istr, deserialize_nested);
|
||||
}, false);
|
||||
else
|
||||
return deserializeTextImpl<ReturnType>(column, istr,
|
||||
[&deserialize_nested, &istr](IColumn & nested_column) -> ReturnType
|
||||
{
|
||||
return deserialize_nested(nested_column, istr);
|
||||
}, false);
|
||||
}
|
||||
|
||||
|
||||
void SerializationArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
deserializeTextJSONImpl<void>(column, istr, settings);
|
||||
}
|
||||
|
||||
bool SerializationArray::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
auto read_nested = [&](IColumn & nested_column)
|
||||
{
|
||||
if (settings.null_as_default && !isColumnNullableOrLowCardinalityNullable(nested_column))
|
||||
return SerializationNullable::tryDeserializeNullAsDefaultOrNestedTextJSON(nested_column, istr, settings, nested);
|
||||
return nested->tryDeserializeTextJSON(nested_column, istr, settings);
|
||||
};
|
||||
|
||||
return deserializeTextImpl<bool>(column, istr, std::move(read_nested), false);
|
||||
return deserializeTextJSONImpl<bool>(column, istr, settings);
|
||||
}
|
||||
|
||||
|
||||
|
@ -82,6 +82,10 @@ public:
|
||||
SerializationPtr create(const SerializationPtr & prev) const override;
|
||||
ColumnPtr create(const ColumnPtr & prev) const override;
|
||||
};
|
||||
|
||||
private:
|
||||
template <typename ReturnType>
|
||||
ReturnType deserializeTextJSONImpl(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user